query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
plot the background stars (HR diagram). The plot is a 2d histogram, for better readability. Only bins with at least 10 stars a shown.
def plot_hr_diag(hr_df, x='B_V', y='M_V', cutoff=0.2, bvcutoff=0.05): plt.figure(figsize=(11., 10.)) print "Plotting background stars.." plt.set_cmap('gray_r') plt.hist2d(hr_df[x].tolist(), hr_df[y].tolist(), (200, 200), norm=LogNorm(), cmin=10) plt.axis([-0.2, 2.35, -3., 7.]) plt.gca().invert_yaxis() plt.xlabel(r'$BT-VT$ (mag)') plt.ylabel(r'$M_{VT}$ (mag)') # Plotting M_{VT} plt.title(r'$\sigma_\pi / \pi < %s, \sigma_{BT-VT}< %s$ mag' % (cutoff, bvcutoff)) print "..Done" return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_histogram(img):\n rgb_hist = rgb_histogram(img)\n plt.figure()\n for color, hist in rgb_hist.items():\n plt.plot(hist, color=color)\n plt.xlim([0, 256])", "def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def entries_histogram(turnstile_weather):\n\n plt.figure()\n turnstile_weather[turnstile_weather.rain == 0][\n 'ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is raining\n turnstile_weather[turnstile_weather.rain == 1][\n 'ENTRIESn_hourly'].hist() # your code here to plot a historgram for hourly entries when it is not raining\n return plt", "def plot_hist(self):\n \n plt.figure();\n self.dist_frame.plot(kind='hist',legend=False,orientation='horizontal')", "def plot_star_classes(obj_catalog):\n\n fig = plt.figure(num=None,figsize=(8,8), dpi=100)\n ax = fig.add_subplot(1,1,1)\n\n phot_class = obj_catalog.phot_star_class\n sclass = obj_catalog.star_class\n phot_class_num = np.zeros(obj_catalog.shape[0])\n sclass_num = np.zeros(obj_catalog.shape[0])\n\n star_classes = ['WD',\\\n 'O','O8','O9','OB','B0','B1','B2','B3','B5','B6','B7','B8','B9',\\\n 'A0','A1','A2','A3','A4','A5','A6','A8','A9',\\\n 'F0','F2','F3','F5','F6','F8','F9',\\\n 'G0','G1','G2','G3','G4','G5','G8','G9',\\\n 'K0','K1','K2','K3','K4','K5','K7',\\\n 'M0','M1','M2','M3','M4','M5','M6','M7','M8','M9', \\\n 'L0','L1','L2','L3','L4','L5','L9','Ldwarf', \\\n 'T','other','C']\n print len(star_classes)\n\n star_dict = dict(zip(star_classes,np.arange(len(star_classes))))\n\n # print phot_class.value_counts()\n\n for i in range(len(phot_class)):\n print phot_class[i], star_dict[phot_class[i]], sclass[i],star_dict[sclass[i]]\n phot_class_num[i] = star_dict[phot_class[i]]\n sclass_num[i] = star_dict[sclass[i]]\n\n #ax.plot(sclass_num,phot_class_num,'.')\n\n cmap = plt.cm.Blues\n cmap.set_bad('0.85',1.0)\n\n cax = plt.hist2d(sclass_num,phot_class_num, bins=65,range = [[0,65], [0,65]], norm = LogNorm(), cmap=cmap, zorder=0)\n cbar = plt.colorbar(ticks=[1,5,10,15,20,25,30,40])\n cbar.ax.set_yticklabels([1,5,10,15,20,25,30,40],fontsize=12)\n\n ax.plot(np.arange(65),np.arange(65),'r')\n\n plt.xticks(np.arange(len(star_classes)),star_classes,fontsize=8,rotation='vertical')\n plt.yticks(np.arange(len(star_classes)),star_classes,fontsize=8)\n\n plt.grid(True)\n return plt", "def hist(data):\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n plt.hold(True)\n for x in xrange(len(data[:,0,0])):\n counts, edges = np.histogram(data[x,:,:],bins=100)\n centers = [(edges[i]+edges[i+1])/2.0 for i,v in enumerate(edges[:-1])]\n ax1.plot(centers,counts)\n plt.hold(False)\n\n plt.show(block=False)\n\n # return fig", "def plot_bacteria_hist(folder, depth=6, mid_quantile=False):\n\n # Get the stool dataset and discretize it\n ds = parser.get_dataset()\n ds = compute_relative_values(ds)\n t = Tree(ds)\n ds = t.dataset_at_depth(depth)\n\n # Get header names to priint on the plots\n headers = ds[0][2:]\n\n for index, header in enumerate(headers):\n\n node = t.node_for_clade_name(header)\n abundances = t.abundance_column_in_subtree(node)\n abundances = [round(x,3) for x in abundances]\n\n if mid_quantile:\n abundances.sort()\n abundances = abundances[int(len(abundances)*0.25): -int(len(abundances)*0.25)]\n\n xlabel('Relative abundance')\n ylabel('Bin size')\n\n title_text = header.replace('/','-').replace('|', '-')\n title(title_text)\n binwidth = 0.001\n bins, bin_sizes, patches = hist(abundances, bins=np.arange(min(abundances), max(abundances) + binwidth, binwidth), color='#0066FF')\n\n # Write discretized values\n threshold, discretized_abundances = discretize_row(abundances, maxent_discretization_splitter)\n _0 = '0: ' + str(len([x for x in discretized_abundances if x == 0]))\n _1 = '1: ' + str(len([x for x in discretized_abundances if x == 1]))\n\n text_x = 0.7\n\n smaples_text = 'Samples: %d' % len(abundances)\n figtext(text_x, 0.85, smaples_text, fontsize=10)\n\n threshold_text = 'Splitter: %f' % threshold\n figtext(text_x, 0.82, threshold_text, fontsize=10)\n figtext(text_x, 0.79, _0, fontsize=10)\n figtext(text_x, 0.76, _1, fontsize=10)\n\n # Draw threshold line\n max_bin = len(abundances)\n if len(bins) != 0:\n max_bin = max(bins)\n\n a, b = [threshold, threshold], [0, max_bin]\n plot(a, b, c='r')\n\n grid(True)\n\n # Write max and avg\n # max_abundance = 'max: %f' % max(abundances)\n # avg_abundance = 'avg: %f' % (sum(abundances) / float(len(abundances)))\n # figtext(text_x, 0.76, max_abundance, fontsize=10)\n # figtext(text_x, 0.73, avg_abundance, fontsize=10)\n\n # write variance\n # variance = 'var: %f' % tvar(abundances)\n # figtext(text_x, 0.70, variance, fontsize=10)\n\n # Save fig to folder\n if not (os.path.exists(folder)):\n os.makedirs(folder)\n file_name = os.path.join(folder, title_text)\n print 'Hist: ', file_name\n savefig(file_name)\n\n close()", "def plot_loss(self):\n #x = [k for k in range(self.rep)]\n loss = self.min_list[:,0]//100 #For clarity\n #plt.plot(x,self.min_list[:,0])\n plt.hist(loss,density=True)\n plt.xlabel(self.list_name + '_loss//100')\n plt.ylabel('Frequency')\n #plt.xticks(range(8),[0,250,500,750,1000,1250,1500,1750])\n plt.title('Distribution of '+self.list_name+'_loss ('+str(self.rep)+' iterations)')\n plt.savefig('img/stats/'+self.list_name+'_lossFrequency_'+self.model_name+'.png')\n plt.show()", "def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()", "def create_histogram(self, i):\n # styling\n sns.set(style=\"whitegrid\")\n font = {'weight': 'normal'}\n plt.rc('font', **font)\n plt.rc('axes', labelsize=25) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=25) # fontsize of the tick labels\n plt.rc('ytick', labelsize=25)\n fig, ax = plt.subplots(1, 1, figsize=(5, 5), dpi=100)\n try:\n if self.dtype_is_object() or self.num_of_values() <= 15:\n if self.num_of_values() > 15:\n data = pd.to_numeric(self.data, errors='coerce')\n plot = sns.distplot(data.dropna())\n else:\n plot = sns.countplot(self.remove_nan_values())\n else:\n plot = sns.distplot(self.remove_nan_values())\n plot.set(xlabel='', ylabel='')\n except Exception:\n plt.text(0.5, 0.5, f'Unable to plot', ha='center', va='center', transform=ax.transAxes, fontsize=16)\n if not os.path.isdir('hist_images'):\n os.mkdir('hist_images')\n plt.savefig(f'hist_images/histogram{i}.png', bbox_inches='tight')\n plt.close()\n plt.clf()", "def plotYields(data,signal=None,backgrounds=[],bins=[]):\n print \n if not bins:\n center = [i+0.5 for i,d in enumerate(data)] # pseudo-data points for making histogram\n bins = [i for i in range( len(data)+1 )] # pseudo-binning\n else:\n center = [ 0.5*(b+bins[i+1]) for i,b in enumerate(bins) if i<len(bins)-1]\n data = np.array(data)\n\n # stack the backgrounds on top of each other in the plot\n nbckgs = len(backgrounds)\n labels = ['background {0}'.format(i) for i in range(nbckgs)]\n weights = list(backgrounds)\n bincenters = [ list(center) for _ in range(nbckgs)]\n\n # stack the signal on top of the backgrounds\n if signal is not None:\n # 'signal' is what we want to unfold, e.g., ttbar\n labels += ['signal']\n weights += [list(signal)]\n bincenters += [list(center)]\n\n # plot backgrounds & signal\n d,bb,pp = plt.hist(bincenters,weights=weights,stacked=True,\n histtype='stepfilled',label=labels,\n edgecolor='k',bins=bins)\n\n # plot the data as error bars\n plt.errorbar(center,data,color='k',\n fmt='o',yerr=np.sqrt(data),\n label='Data')\n\n plt.ylim(ymin=0,ymax=plt.ylim()[1]*1.6) # scale the y-axis to accommodate the legend\n plt.legend()\n plt.xlabel(\"Distribution\")\n plt.ylabel(\"Events\")\n\n return", "def het_hist(het_check_df: pd.DataFrame):\n\n fig = plt.figure(figsize=(8,6))\n plt.hist(het_check_df['het_rate'])\n plt.axvline(het_check_df['low_limit'][0], c='red', ls='--')\n plt.axvline(het_check_df['up_limit'][0], c='red', ls='--')\n plt.xlabel(\"Heterozygosity Rate\")\n plt.ylabel(\"Number of Samples\")\n plt.title(\"Heterozygosity Distribution of All Samples\\n (< {:.3f} or > {:.3f} are removed)\".format(het_check_df['low_limit'][0], het_check_df['up_limit'][0]))\n return fig", "def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):\n # Set pyplot style to be consistent within the program\n plt.style.use('seaborn-whitegrid')\n # Import raw data to plot Hertzsprung-Russell diagram\n _hrdata = inithr('hr.dat')\n # Determine distance in parsecs\n _distance = 1 / np.tan(_parallax * 10**-3)\n _derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)\n # Create single data array with all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n _lambda = [0.9, 1.02, 1.22, 1.63, 2.2]\n # Set up empty arrays for each star\n _largestar = np.zeros((1, 2))\n _smallstar = np.zeros((1, 2))\n\n # Determine the spectral flux density from the large star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # The large star uses the maximum flux value (smallest magnitude)\n _largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete first empty row of the array\n _largestar = np.delete(_largestar, 0, axis=0)\n\n # Determine the spectral flux density from the small star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # Smaller star flux value is combined value minus the large star\n _smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -\n magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete the first empty row of the array\n _smallstar = np.delete(_smallstar, 0, axis=0)\n\n # Determine the luminosity and effective temperature of each star\n _luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)\n _lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)\n\n # Calculate luminosities in solar units\n _solluma = _luma / (3.828*10**26)\n _sollumb = _lumb / (3.828*10**26)\n _lumaerr = _lumaerr / (3.828*10**26)\n _lumberr = _lumberr / (3.828*10**26)\n\n # Calculate masses using the mass/luminosity relation in solar mass units\n # N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this\n # approximation\n _solmassa = np.power(_solluma, 1/3.5)\n _solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2\n _solmassb = np.power(_sollumb, 1/3.5)\n _solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2\n\n # Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature\n _solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))\n _solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))\n _solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2\n _solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2\n\n # Output determined values to the screen and write to file\n print('Values for the large star:')\n print('Effective temperature: ' + str(round_sig(_wiena)))\n print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))\n print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))\n print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))\n print('-----------------------------------------------------')\n print('Values for the small star:')\n print('Effective temperature: ' + str(round_sig(_wienb)))\n print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))\n print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))\n print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))\n\n # Convert from luminosity to magnitude in solar units\n _luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))\n _lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))\n\n # Plot Hertzsprung-Russell diagram using provided array\n plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)\n # Plot determined values for each star\n plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')\n plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')\n # Set the x and y axis limits to sensible values\n plt.legend()\n plt.xlim(3000, 10000)\n plt.ylim(-10, 20)\n # Invert both axes as convention\n plt.gca().invert_xaxis()\n plt.gca().invert_yaxis()\n # Save figure to current folder\n plt.savefig('hr.png')\n # Display to screen\n plt.show()", "def plot_histograms(p_hist, p_hbins, title, figure_path=None):\n\n base_fig_size = 7\n h_fig = base_fig_size\n w_fig = base_fig_size * 4\n\n fig = plt.figure(figsize=(w_fig, h_fig))\n fig.suptitle(title)\n iplot = 0\n\n p_Nx, p_Ny = np.amax(p_hbins, axis=1) + 1\n\n p_hist = np.reshape(p_hist, (4, p_Ny, p_Nx))\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Amp (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[0])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Phase (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[1])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Real (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[2])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Imag (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[3])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n if figure_path:\n plt.savefig(figure_path, format='png')\n\n return fig", "def plot_hist(df, num_bins=8):\n df.hist(figsize=(24, 20), bins=num_bins)\n plt.axes", "def stars(self, magnitude=20):\n # Get the stars that are visible within this chart.\n thestars = []\n for s in self.hip_stars:\n if not s: continue\n hip_id, mag, ra, dec, bv = s\n if mag>magnitude: continue\n if dec<min(self.inner_dec, self.outer_dec): continue\n if dec>max(self.inner_dec, self.outer_dec): continue\n thestars.append(s)\n # This should sort them by increasing magnitude (brightest first).\n thestars.sort(key=lambda a:a[1])\n if not thestars: return\n # Set the least bright magnitude.\n self.dimmest_mag = math.floor(thestars[-1][1])\n # Create the star group.\n star_g = self.make_element(self.centered, 'g', (\n 'stroke', 'none'), ('fill', 'black'), (\n 'clip-path', 'url(#innerClipPath)'))\n for hip_id, mag, ra, dec, bv in thestars:\n x, y = self.radec2xy(ra, dec)\n self.make_element(star_g, 'circle', (\n 'cx', x), ('cy', y), ('r', self.starsize(hip_id)))", "def plot_random_schedules(scores):\n\n\tplt.hist(scores, bins = len(scores))\n\tplt.ylabel(\"Score\")\n\tplt.xlabel(\"Times\")\n\tplt.title(\"Histogram random schedules\")\n\tplt.show()", "def histogram_plot(val_addr):\n pos = np.zeros((val_addr.shape[0],2))\n for i in range(val_addr.shape[0]):\n if i%1e6==0:\n print(i)\n temp=val_addr[i]\n if temp<25:\n if temp<=4:\n pos[i,0]=np.sqrt(3)\n pos[i,1] = temp-2.\n elif temp<=8:\n pos[i,0]=np.sqrt(3)/2.\n pos[i,1] = temp-6.5 \n elif temp<=13:\n pos[i,0]=0\n pos[i,1] = temp-11. \n elif temp<=17:\n pos[i,0]=-np.sqrt(3)/2.\n pos[i,1] = temp-15.5 \n elif temp<=23:\n pos[i,0]=-np.sqrt(3)\n pos[i,1] = temp-20.\n plt.hexbin(pos[:,1],pos[:,0],gridsize=(4,2))\n plt.colorbar()\n plt.show()\n return(pos)", "def histogram(values, title, fig_size=(4,3), path=None):\n plt.clf()\n f, ax = plt.subplots(1, figsize=fig_size)\n ax.hist(values, bins=60)\n ax.set_title(title)\n f.tight_layout()\n if(path != None):\n f.savefig(path+'/hist_'+title+'.png')", "def rho_plot2(self, pred=None):\n axRect = [0.1446, 0.2150, 0.7604, 0.7100]\n # plt.figure(22, figsize = (8.5, 11), dpi=300)\n fig, ax = plt.subplots()\n if pred is not None:\n self.rho_sub_plot(ax, axRect, pred=pred)\n else:\n self.rho_sub_plot(ax, axRect)", "def interactive_hess(gr,g):\n def plot(size=100):\n fig,ax = plt.subplots()\n fig.set_size_inches(8,6)\n ax.hexbin(gr, g, gridsize=size, bins='log', cmap='inferno', label=\"Relative stellar density\")\n ax.set_title(\"HESS DIAGRAM, gridsize={0:d}\".format(size), fontsize = 15)\n ax.set_xlabel(r\"$g-r$\",fontsize = 25)\n ax.set_ylabel(r\"$g$\",fontsize = 25)\n ax.legend(loc='upper left')\n ax.set_ylim(ax.get_ylim()[::-1])\n plt.show()\n interact(plot, size=(50,300,1),continuous_update=False);", "def pixel_ts_distribution(self):\n fig,ax = plt.subplots(figsize=(8,6))\n bins = np.linspace(0,25,501)\n tsvec=self.tsmap.vec\n ax.hist(tsvec, bins, log=True, histtype='step', lw=2, cumulative=-1, label='data');\n # make array corresponding to the hist\n h = np.histogram(tsvec, bins, )[0]\n x = bins[:-1]\n yh = sum(h)-h.cumsum() \n f = lambda x: np.exp(-x/2)\n ye=6e5*f(x)\n ax.plot(x, ye, '-g', lw=2, label='exp(-TS/2)')\n ax.fill_between(x,yh,ye,where=x>5, facecolor='red', alpha=0.6)\n plt.setp(ax, xscale='linear', xlabel='TS', ylim=(1,None), ylabel='# greater than TS')\n ax.legend()\n ax.set_title('Cumulative distribution of single-pixel TS values for {}'.format(self.skymodel),\n fontsize=14)\n ax.grid(True, alpha=0.5) \n fig.set_facecolor('white')\n return fig", "def histogram(list):\n for i in range(0,len(list)):#go over the number in the list\n print('*'*list[i])", "def replot_shadow_half( ilh, val, xdata, ydata, color, alpha ):\n shadow_list = get_shadow_list( val, num_lower, num_higher )\n if shadow_list[ilh]:\n l2shadow[ilh] = ax['plot'].plot( xdata, ydata[:,shadow_list[ilh]], '-', color=color, lw=1, alpha=alpha )", "def hist(self):\r\n plt.hist(self.data_array, bins='auto', density=False, facecolor='b')\r\n plt.title(self.column_name)\r\n plt.savefig(self.column_name + \".svg\")\r\n plt.close()", "def draw_histogram(m, rolls, width):\n # Count the number of rolls of each side and store them in a list\n roll_count = []\n for i in list(range(1, m+1)):\n count = rolls.count(i)\n roll_count.append(count)\n\n # Finding the the maximum count from the list\n max_count = max(roll_count)\n\n print(\"Frequency Histogram: \" + str(m) + \"-sided Die\")\n\n # Counting the number of same outputs and then scaling these outputs\n # to the given width\n for j in list(range(1, m+1)):\n count = rolls.count(j)\n scaled_count = round(count*(width/max_count))\n print(str(j)+'.', end='')\n print('#'*scaled_count, end='')\n print('-'*(width-scaled_count))", "def histogram(arr, xlbl, xrng=None, nbins=20, alpha=1.):\n if xrng is None:\n xrng = (np.min(arr),np.max(arr))\n p = figure(plot_width=600, plot_height=400)\n # Histogram\n hist, edges = np.histogram(arr, range=xrng, density=True, bins=nbins)\n p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], fill_color='blue', alpha=alpha)\n # Label\n p.xaxis.axis_label = xlbl\n # Show\n show(p)" ]
[ "0.5921788", "0.5898935", "0.586834", "0.586834", "0.586834", "0.5814485", "0.575388", "0.5714343", "0.5698721", "0.56886125", "0.5656541", "0.5636839", "0.56319624", "0.5586074", "0.55594313", "0.5537156", "0.54848033", "0.54779595", "0.5466038", "0.54587734", "0.5457407", "0.54543346", "0.54367083", "0.54366755", "0.5418318", "0.54174066", "0.54023594", "0.5377258", "0.53762317", "0.5368388" ]
0.66867584
0
Parent function of get_variable_stars. Sequencially select 'variableTypes' variable stars and plot them on the HR diagram.
def plot_variable_stars(variablesdf, variabletype=None, x='B_V', y='M_V'): if variabletype is None: variabletype = ['CEP', 'BCEP', 'BCEPS', 'DSCT', 'SR', 'SRA', 'SRB', 'SRC', 'SRD', 'RR', 'RRAB', 'RRC', 'GDOR', 'SPB', 'M', 'LPV', 'roAp'] markers = ['^', 'D', 'D', 'v', 's', 'D', 'D', 'D', 'D', 's', 'D', 'D', 'D', 'o', 'p', 'o', 'o'] colors = ['k', 'k', 'k', '#00c000', 'r', 'r', 'r', 'r', 'r', 'm', 'm', 'm', '#00c0ff', (1, .7, 0), 'w', 'w', 'r'] sizes = [50, 40, 40, 40, 50, 40, 40, 40, 40, 50, 50, 50, 40, 40, 45, 40, 40] labels = ['', "BCEP, BCEPS", '', 'DSCT', 'SR', "SRA, SRB, SRC, SRD", '', '', '', 'RR', "RRAB, RRC", '', 'GDOR', 'SPB', '', 'LPV', 'roAp'] for i in range(len(variabletype)): if i in [2, 6, 7, 8, 11]: my_label = None else: my_label = "%s" % labels[i] plt.scatter(variablesdf[x].loc[variablesdf.loc[:, 'Type'] == variabletype[i]], variablesdf[y] .loc[variablesdf.loc[:, 'Type'] == variabletype[i]], facecolor=colors[i], marker=markers[i], s=sizes[i], label=my_label, edgecolor='k') print "plotting %s as %s%s" % (variabletype[i], colors[i], markers[i]) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_variable_stars(df_data, df_variables_names, variabletype=None):\n if variabletype is None:\n variabletype = ['CEP', 'BCEP', 'BCEPS', 'DSCT', 'SR', 'SRA', 'SRB', 'SRC', 'SRD', 'RR', 'RRAB', 'RRC',\n 'GDOR', 'SPB', 'M', 'LPV']\n\n print \"Selecting variable stars..\"\n # create a string \"var_type\" of variabletype separated by or ('|').\n # var_type = \"|\".join(variabletype)\n # check if var_type is contained in Type (any or all, partial or not)\n # are_variables = df_variables_names[df_variables_names.Type.str.contains(var_type) == True] # fails with \"is True\"\n # are_variables.Type = are_variables.Type.str.replace(\".*BCEP.*\", \"BCEP\") # rename all types containing 'BCEP'\n are_variables = df_variables_names[df_variables_names.Type.isin(variabletype)]\n types_df = are_variables[['hip', 'tycho2_id', 'source_id', 'Type', 'Name']]\n print \"..Done\"\n print \"Preparing subselection of initial DataFrame..\"\n print \"..Making Hipparcos list..\"\n hip_list = are_variables.hip.tolist()\n hip_list = np.array(hip_list)\n hip_list = hip_list[~np.isnan(hip_list)] # remove the nans\n hip_list = list(hip_list)\n print \"..Making Tycho2 list..\"\n tycho2_list = are_variables.tycho2_id.tolist()\n tycho2_list = np.array(tycho2_list)\n tycho2_list = tycho2_list[tycho2_list != 'nan'] # tycho2 is str\n tycho2_list = list(tycho2_list)\n print \"..Done\\n----------\"\n\n print \"Getting Hipparcos and Tycho variable objects..\"\n hip_objects = df_data[df_data.hip.isin(hip_list)]\n hip_objects = pd.merge(hip_objects, types_df, on='hip', how='inner')\n if 'tycho2_id_y' in hip_objects.columns:\n hip_objects = hip_objects.drop('tycho2_id_y', axis=1)\n hip_objects = hip_objects.rename(columns={'hip_x': 'hip', 'tycho2_id_x': 'tycho2_id'})\n\n tycho_objects = df_data[df_data.tycho2_id.isin(tycho2_list)]\n tycho_objects = pd.merge(tycho_objects, types_df, on='tycho2_id', how='inner')\n if 'hip_y' in tycho_objects.columns:\n tycho_objects = tycho_objects.drop('hip_y', axis=1)\n tycho_objects = tycho_objects.rename(columns={'hip_x': 'hip', 'tycho2_id_x': 'tycho2_id'})\n print \"..Done\\n----------\"\n\n print \"Getting roAp stars from file..\"\n # roAP_names.csv contains tycho2_id names of roAp stars\n with open('roAP/roAP_names.csv') as roAP_file:\n roap_objects_list = roAP_file.readlines()\n roap_objects_list = [line.rstrip() for line in roap_objects_list]\n roap_objects = df_data[df_data.tycho2_id.isin(roap_objects_list)]\n column_number = len(roap_objects.columns)\n roap_objects.insert(column_number, 'Type', 'roAp')\n print \"..Done\\n----------\"\n\n variable_df = pd.concat([hip_objects, tycho_objects, roap_objects], axis=0, ignore_index=True)\n variable_df.source_id = variable_df.source_id.fillna(-9999).astype(int)\n\n return variable_df", "def plot_data_types(self, variable, **kwargs):\n return self.visualizer.plot_data_types(variable, **kwargs)", "def update_graph_type(variable_dropdown_x, variable_dropdown_y):\n\n options = {\n \"violin\": {\"label\": \"Violin\", \"value\": 1},\n \"scatter\": {\"label\": \"Scatter\", \"value\": 2},\n \"bar\": {\"label\": \"Bar\", \"value\": 3},\n \"pie\": {\"label\": \"Pie\", \"value\": 4},\n # \"box\": {\"label\": \"Box\", \"value\": 5,},\n }\n\n if variable_dropdown_x is None:\n return [], None, True, \"Select a graph type\"\n\n graph_selection_list = []\n\n if variable_dropdown_y is None:\n # Only one variable selected\n field_id = variable_dropdown_x\n value_type = get_field_type(field_id)\n\n supported_graphs = value_type.supported_graphs\n\n for option_key in options:\n option = options[option_key]\n graph_type = option[\"value\"]\n if graph_type in supported_graphs:\n graph_selection_list.append(option)\n\n else:\n # Both variables selected\n # Logic is:\n # If the x-axis variable is continuous, integer, date or time:\n # If the y-axis variable is continuous or integer:\n # You can use scatter plot\n # Else if x-axis variable is categorical:\n # If the y-axis variable is continuous or integer:\n # You can use violin plot, box plot\n x_value_type = get_field_type(str(variable_dropdown_x))\n y_value_type = get_field_type(str(variable_dropdown_y))\n\n if (\n x_value_type == ValueType.INTEGER\n or x_value_type == ValueType.CONT\n or x_value_type == ValueType.DATE\n or x_value_type == ValueType.TIME\n ):\n if y_value_type == ValueType.INTEGER or y_value_type == ValueType.CONT:\n graph_selection_list.append(options[\"scatter\"])\n\n elif x_value_type == ValueType.CAT_SINGLE or x_value_type == ValueType.CAT_MULT:\n if y_value_type == ValueType.INTEGER or y_value_type == ValueType.CONT:\n # graph_selection_list.append(options[\"box\"])\n graph_selection_list.append(options[\"violin\"])\n\n if len(graph_selection_list) == 0:\n return graph_selection_list, None, True, \"No supported graph types\"\n\n return (\n graph_selection_list,\n graph_selection_list[0][\"value\"],\n False,\n \"Select a graph type\",\n )", "def stars(self, magnitude=20):\n # Get the stars that are visible within this chart.\n thestars = []\n for s in self.hip_stars:\n if not s: continue\n hip_id, mag, ra, dec, bv = s\n if mag>magnitude: continue\n if dec<min(self.inner_dec, self.outer_dec): continue\n if dec>max(self.inner_dec, self.outer_dec): continue\n thestars.append(s)\n # This should sort them by increasing magnitude (brightest first).\n thestars.sort(key=lambda a:a[1])\n if not thestars: return\n # Set the least bright magnitude.\n self.dimmest_mag = math.floor(thestars[-1][1])\n # Create the star group.\n star_g = self.make_element(self.centered, 'g', (\n 'stroke', 'none'), ('fill', 'black'), (\n 'clip-path', 'url(#innerClipPath)'))\n for hip_id, mag, ra, dec, bv in thestars:\n x, y = self.radec2xy(ra, dec)\n self.make_element(star_g, 'circle', (\n 'cx', x), ('cy', y), ('r', self.starsize(hip_id)))", "def create_plot(x_var, y_var):\r\n\r\n FILE_PATH = 'application/star_data.csv'\r\n TARGET_VAR = 'star_type'\r\n SIZE_VAR = 'r_clipped'\r\n WIDTH = 1000\r\n HEIGHT = 600\r\n\r\n # Get the data\r\n df = pd.read_csv(FILE_PATH)\r\n fig = px.scatter(df, x=x_var, y=y_var, color=TARGET_VAR, size=SIZE_VAR, \r\n width=WIDTH, height=HEIGHT)\r\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\r\n\r\n return graphJSON", "def get_safety_vars_plot(self):\n if 'safety_vars_stats' not in self.stats:\n raise ValueError('No safety vars statistics present in this evaluator.')\n\n safety_vars = self.stats['safety_vars_stats'][0].keys()\n n_plots = len(safety_vars)\n fig, axes = plt.subplots(n_plots, 1, figsize=(8, 6 * n_plots))\n\n for idx, var in enumerate(safety_vars):\n series = collections.defaultdict(list)\n for ep in self.stats['safety_vars_stats']:\n for stat in ep[var]:\n series[stat].append(ep[var][stat])\n ax = axes[idx]\n for stat in ['min', 'max']:\n ax.plot(np.squeeze(np.array(series[stat])), label=stat)\n x = range(len(series['mean']))\n\n mean = np.squeeze(np.array(series['mean']))\n std_dev = np.squeeze(np.array(series['std_dev']))\n ax.plot(x, mean, label='Value')\n ax.fill_between(\n range(len(series['mean'])), mean - std_dev, mean + std_dev, alpha=0.3)\n ax.set_title('Stats for {}'.format(var))\n ax.legend()\n ax.spines['top'].set_visible(False)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xlabel('Episode #')\n ax.set_ylabel('Magnitude')\n ax.plot()\n return fig", "def plotvars_core(gs, data, plotfun=vis.plot_r, plot_radars=True,\n projection=PROJECTION, **kws):\n trans = ccrs.PlateCarree()\n axd = dict(ker=plt.subplot(gs[0, 0], projection=projection),\n kum=plt.subplot(gs[0, 1], projection=projection),\n van=plt.subplot(gs[1, 0], projection=projection),\n com=plt.subplot(gs[1, 1], projection=projection))\n for key in ['ker', 'kum']:\n axd[key].set_xticks([])\n for key in ['kum', 'com']:\n axd[key].set_yticks([])\n ax_cb = plt.subplot(gs[:, -1])\n for key in NAMES.keys():\n ax = axd[key]\n ax.set_ymargin(0)\n ax.set_xmargin(0)\n plotfun(data[I_RADAR[key]], ax=ax, cax=ax_cb, transform=trans, **kws)\n ax.set_title(NAMES[key])\n ax.coastlines(resolution='10m')\n if plot_radars:\n if key != 'com':\n RADAR[key].draw_marker(ax=ax, transform=trans)\n else:\n for radarkey in ['ker', 'kum', 'van']:\n RADAR[radarkey].draw_marker(ax=ax, transform=trans)\n return axd", "def read_stars(self):\n if self.hip_stars: return\n all_stars = list(hipparcos.stars())\n self.hip_stars = [None]*(max(s[0] for s in all_stars)+1)\n for s in all_stars: self.hip_stars[s[0]] = s", "def draw_points(stars, errors, ax):\n\n # Open the file of common star names to HIP numbers. Store this in names dictionary.\n names = {}\n with open(os.environ['HOKU_PROJECT_PATH'] + '/data/star-names.dat') as names_f:\n names_f.readline()\n\n for line in names_f:\n n, h = line.split(',')[0].strip(), line.split(',')[1].strip()\n names.update({h: n})\n\n # Plot clean data set as black.\n for star in stars:\n if quiver_flag:\n ax.quiver(0, 0, 0, star[0], star[1], star[2], arrow_length_ratio=0.0000001, alpha=0.2)\n ax.scatter(star[0], star[1], star[2], marker='*', color='k', s=100)\n\n if str(int(star[3])) in names:\n ax.text(star[0], star[1], star[2], names[str(int(star[3]))])\n else:\n ax.text(star[0], star[1], star[2], 'HIP{}'.format(int(star[3])))\n\n # Plot error models with specified colors.\n for model in errors:\n for error in model:\n if quiver_flag:\n ax.quiver(0, 0, 0, error[0], error[1], error[2], arrow_length_ratio=0.0000001, alpha=0.2)\n ax.scatter(error[0], error[1], error[2], marker='*', color=error[4])\n ax.text(error[0], error[1], error[2], 'ERR{}'.format(int(error[3])))", "def SetStars(self):\r\n\t\tstartype = [self._iconstars[\r\n\t\t\tself.CalcStar(starnum,\\\r\n\t\t\t\tself._configtmp[\"imagerating\"],\r\n\t\t\t\tself._configtmp[\"userrating\"])]\\\r\n\t\t\tfor starnum in range(1,6)]\r\n\t\tself.bitmapButton1Star.SetBitmapLabel(startype[0])\r\n\t\tself.bitmapButton2Star.SetBitmapLabel(startype[1])\r\n\t\tself.bitmapButton3Star.SetBitmapLabel(startype[2])\r\n\t\tself.bitmapButton4Star.SetBitmapLabel(startype[3])\r\n\t\tself.bitmapButton5Star.SetBitmapLabel(startype[4])", "def plot_star_classes(obj_catalog):\n\n fig = plt.figure(num=None,figsize=(8,8), dpi=100)\n ax = fig.add_subplot(1,1,1)\n\n phot_class = obj_catalog.phot_star_class\n sclass = obj_catalog.star_class\n phot_class_num = np.zeros(obj_catalog.shape[0])\n sclass_num = np.zeros(obj_catalog.shape[0])\n\n star_classes = ['WD',\\\n 'O','O8','O9','OB','B0','B1','B2','B3','B5','B6','B7','B8','B9',\\\n 'A0','A1','A2','A3','A4','A5','A6','A8','A9',\\\n 'F0','F2','F3','F5','F6','F8','F9',\\\n 'G0','G1','G2','G3','G4','G5','G8','G9',\\\n 'K0','K1','K2','K3','K4','K5','K7',\\\n 'M0','M1','M2','M3','M4','M5','M6','M7','M8','M9', \\\n 'L0','L1','L2','L3','L4','L5','L9','Ldwarf', \\\n 'T','other','C']\n print len(star_classes)\n\n star_dict = dict(zip(star_classes,np.arange(len(star_classes))))\n\n # print phot_class.value_counts()\n\n for i in range(len(phot_class)):\n print phot_class[i], star_dict[phot_class[i]], sclass[i],star_dict[sclass[i]]\n phot_class_num[i] = star_dict[phot_class[i]]\n sclass_num[i] = star_dict[sclass[i]]\n\n #ax.plot(sclass_num,phot_class_num,'.')\n\n cmap = plt.cm.Blues\n cmap.set_bad('0.85',1.0)\n\n cax = plt.hist2d(sclass_num,phot_class_num, bins=65,range = [[0,65], [0,65]], norm = LogNorm(), cmap=cmap, zorder=0)\n cbar = plt.colorbar(ticks=[1,5,10,15,20,25,30,40])\n cbar.ax.set_yticklabels([1,5,10,15,20,25,30,40],fontsize=12)\n\n ax.plot(np.arange(65),np.arange(65),'r')\n\n plt.xticks(np.arange(len(star_classes)),star_classes,fontsize=8,rotation='vertical')\n plt.yticks(np.arange(len(star_classes)),star_classes,fontsize=8)\n\n plt.grid(True)\n return plt", "def radial_graph(self):\n \n if self['M_RADIAL']['intens'] != None:\n name = self['name']\n id = self._getGraphId()\n figname = 'RADIAL_%s.eps' % id\n sxlabel = 'Pixel Radius' ; sylabel = 'Intens' \n title = 'Radial profile, %s' % (name,)\n y = self['M_RADIAL']['intens']\n x = self['M_RADIAL']['radii']\n xy = ((x,y),)\n Plot(xy,figname,sxlabel,sylabel,title)\n self['figures']['radial'] = figname\n else : pass", "def plot_shape(self, theta=0):\n x = np.zeros(self.nz)\n y_re = np.zeros(self.nz)\n y_ri = np.zeros(self.nz)\n for i in range(0, self.nz):\n x[i] = i * self.dz\n y_re[i] = self.re[i][theta]\n y_ri[i] = self.ri[i][theta]\n p = figure(\n title=\"Shapes of stator and rotor along Z; Theta=\" + str(theta),\n x_axis_label=\"Points along Z\",\n y_axis_label=\"Radial direction\",\n )\n p.line(x, y_re, line_width=2, color=\"red\")\n p.line(x, y_ri, line_width=2, color=\"blue\")\n return p", "def listofstars():\n a = []\n for star in Star.select():\n a.append(star.name)\n return a", "def plot(self, dis_type,diameter=\"*\",thickness=\"*\", loglog=False):\n if dis_type not in self.dis_types:\n print(\"Type %s does not exist, please check it\" % dis_type)\n return\n if diameter != \"*\" and (diameter not in self.diameters):\n print(\"Diameter %s does not exist, please check it\" % diameter)\n return\n if thickness != \"*\" and (thickness not in self.thicknesses):\n print(\"thickness %s does not exist, please check it\" % thickness)\n return\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title('%s' % self.plotTypes[dis_type])\n if diameter != \"*\":\n if thickness != \"*\":\n ax.set_title('%s , diameter = %s nm, thickness = %s nm' % (self.plotTypes[dis_type],diameter,thickness))\n else:\n ax.set_title('%s , diameter = %s nm' % (self.plotTypes[dis_type],diameter))\n \n if (thickness != \"*\" and diameter == \"*\"):\n ax.set_title('%s , thickness = %s nm' % (self.plotTypes[dis_type],thickness))\n\n for diam in sorted(self.distrs[dis_type]):\n if (diam==diameter and diameter!=\"*\") or diameter==\"*\":\n for thick in sorted(self.distrs[dis_type][diam]):\n if (thick==thickness and thickness!=\"*\") or thickness==\"*\":\n d = self.distrs[dis_type][diam][thick]\n if thickness==\"*\" and diameter==\"*\":\n lb = \" d= %s nm, t= %s nm\" % (diam,thick)\n else:\n if diameter==\"*\":\n lb = \"d= %s nm\" % (diam)\n else:\n lb = \"t= %s nm\" % (thick)\n ax.plot(d.x, d.y, label=lb)\n \n ax.legend(numpoints=1,loc=4)\n ax.grid(True)\n # Here we need to explicity say to show the plot\n plt.show()", "def set_plot_props(self):\n \n if self.type == \"gas\":\n self.marker = \"v\"\n self.color = \"cyan\"\n \n elif self.type == \"cluster\":\n self.marker = \"o\"\n self.color = \"maroon\"\n \n elif self.type == \"spiral\":\n self.marker = \"*\"\n self.color = \"green\"\n \n elif self.type == \"loop\":\n self.marker = \"o\"\n self.color = \"maroon\"\n \n elif self.type == \"giant\":\n self.marker = \"s\"\n self.color = \"red\"\n \n return", "def aperture_photometry(self, x_stars, y_stars, aperture, background):\n print '--------------------------------------------------------------------- aperture_photometry'\n\n #--- CONSTANTS ---#\n gain = 0.73 # Gain of camera: electrons pr ADU (ADU = counts from object)- ajust to camera!\n ron = 3.3 # Read out noise - ajust to camera!\n con = 25 # Magnitude constant\n\n #--- PHOTOMETRY ---#\n # Find fluxes:\n N = len(x_stars) # Number of stellar objects \n flux_star = zeros((self.n,N))\n SNR_i = zeros((self.n,N))\n for i in range(self.n): # Loop over all images: if timeseries is available\n for j in range(N): # Loop over all stars and find flux: using same aperture size\n flux_sky, n_star_pix, flux_star[i][j] = self.aperture(self.LF_i[i], x_stars[j], y_stars[j],\\\n aperture, background)\n SNR_i[i][j] = self.SNR(flux_sky, n_star_pix, flux_star[i][j], gain, ron)\n\n #--- FINAL CORRECTIONS ---#\n print flux_star, flux_sky, SNR_i\n return flux_star, SNR_i", "def geom_type(self): # -> str:\n ...", "def plot_rfs(self):\n self.xe = self.data['XE']\n self.ye = self.data['YE']\n# self.IE = self.data['IE']\n self.Var = self.data['Var']\n std = np.sqrt(np.mean(self.Var))\n fig = plt.gcf()\n ax = plt.gca()\n ax.set_xlim((np.min(self.xe), np.max(self.xe)))\n ax.set_ylim((np.min(self.ye), np.max(self.ye)))\n for xe, ye in zip(self.xe, self.ye):\n circ = plt.Circle((xe, ye), std, color='b', alpha=0.4)\n fig.gca().add_artist(circ)", "def createGraphics(self):\r\n\r\n def variableColor(variable):\r\n if variable.type.startswith(('Float', 'Real')):\r\n return QColor.fromRgb(26, 77, 179)\r\n elif variable.type.startswith(('Enumeration', 'Int', 'UInt')):\r\n return QColor.fromRgb(179, 77, 26)\r\n elif variable.type == 'Boolean':\r\n return QColor.fromRgb(255, 0, 255)\r\n elif variable.type == 'String':\r\n return QColor.fromRgb(26, 114, 16)\r\n elif variable.type == 'Binary':\r\n return QColor.fromRgb(81, 81, 81)\r\n else:\r\n return QColor.fromRgb(0, 0, 0)\r\n\r\n inputVariables = []\r\n outputVariables = []\r\n maxInputLabelWidth = 0\r\n maxOutputLabelWidth = 0\r\n\r\n textItem = QGraphicsTextItem()\r\n fontMetrics = QFontMetricsF(textItem.font())\r\n\r\n for variable in self.modelDescription.modelVariables:\r\n if variable.causality == 'input':\r\n inputVariables.append(variable)\r\n elif variable.causality == 'output':\r\n outputVariables.append(variable)\r\n\r\n for variable in inputVariables:\r\n maxInputLabelWidth = max(maxInputLabelWidth, fontMetrics.width(variable.name))\r\n\r\n for variable in outputVariables:\r\n maxOutputLabelWidth = max(maxOutputLabelWidth, fontMetrics.width(variable.name))\r\n\r\n from math import floor\r\n\r\n scene = QGraphicsScene()\r\n self.ui.graphicsView.setScene(scene)\r\n group = QGraphicsItemGroup()\r\n scene.addItem(group)\r\n group.setPos(200.5, -50.5)\r\n lh = 15 # line height\r\n\r\n w = max(150., maxInputLabelWidth + maxOutputLabelWidth + 20)\r\n h = max(50., 10 + lh * max(len(inputVariables), len(outputVariables)))\r\n\r\n block = QGraphicsRectItem(0, 0, w, h, group)\r\n block.setPen(QColor.fromRgb(0, 0, 0))\r\n\r\n pen = QPen()\r\n pen.setWidthF(1)\r\n\r\n font = QFont()\r\n font.setPixelSize(10)\r\n\r\n # inputs\r\n y = floor((h - len(inputVariables) * lh) / 2 - 2)\r\n for variable in inputVariables:\r\n text = QGraphicsTextItem(variable.name, group)\r\n text.setDefaultTextColor(QColor.fromRgb(0, 0, 0))\r\n text.setFont(font)\r\n text.setX(3)\r\n text.setY(y)\r\n\r\n polygon = QPolygonF([QPointF(-8, y + 7.5), QPointF(-1, y + 11), QPointF(-8, y + 14.5)])\r\n\r\n path = QPainterPath()\r\n path.addPolygon(polygon)\r\n path.closeSubpath()\r\n contour = QGraphicsPathItem(path, group)\r\n contour.setPen(QPen(Qt.NoPen))\r\n contour.setBrush(variableColor(variable))\r\n pen = QPen()\r\n pen.setColor(variableColor(variable))\r\n pen.setJoinStyle(Qt.MiterJoin)\r\n contour.setPen(pen)\r\n\r\n y += lh\r\n\r\n # outputs\r\n y = floor((h - len(outputVariables) * lh) / 2 - 2)\r\n for variable in outputVariables:\r\n text = QGraphicsTextItem(variable.name, group)\r\n text.setDefaultTextColor(QColor.fromRgb(0, 0, 0))\r\n text.setFont(font)\r\n text.setX(w - 3 - text.boundingRect().width())\r\n text.setY(y)\r\n\r\n polygon = QPolygonF([QPointF(w + 1, y + 7.5), QPointF(w + 8, y + 11), QPointF(w + 1, y + 14.5)])\r\n\r\n path = QPainterPath()\r\n path.addPolygon(polygon)\r\n path.closeSubpath()\r\n contour = QGraphicsPathItem(path, group)\r\n contour.setPen(QPen(Qt.NoPen))\r\n contour.setBrush(variableColor(variable))\r\n pen = QPen()\r\n pen.setColor(variableColor(variable))\r\n pen.setJoinStyle(Qt.MiterJoin)\r\n contour.setPen(pen)\r\n\r\n y += lh", "def SNR(self, flux_sky, n_pix_star, flux_star, gain, ron):\n SNR = (gain*flux_star/sqrt(gain*flux_star + n_pix_star*gain*flux_sky + n_pix_star*ron**2)) \n return SNR", "def __init__(self, stars_x, stars_y, stars_f):\n self.xpos = stars_x\n self.ypos = stars_y\n self.flux = stars_f\n\n return", "def __init__(self, temperatures, daytypes, consumptions, nb_days, nb_particles, sigma2, kappa, u_heat):\n self.temperatures = temperatures\n self.daytypes = daytypes\n self.consumptions = consumptions\n self.nb_days = nb_days\n self.nb_particles = nb_particles\n self.sigma2 = sigma2\n self.kappa = kappa\n self.u_heat = u_heat\n #Var init\n self.s = np.zeros((nb_days, nb_particles)) \n self.g_heat = np.zeros((nb_days, nb_particles))\n #sigma_s and sigma_g are fixed\n self.sigma_s_star_2 = np.zeros((1, nb_particles)) \n self.sigma_g_star_2 = np.zeros((1, nb_particles))\n self.x_season = np.zeros((1, nb_particles))\n self.x_heat = np.zeros((1, nb_particles))\n self.x = np.zeros((1, nb_particles))\n self.w = np.zeros((1, nb_particles))", "def Mstar_function(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n if not p.xlim:\n p.xlim = np.array([1e10,1e13])\n\n df_all = pd.read_pickle(p.d_data + 'galaxy_selection/all_z0_galaxies')\n Mstar = df_all['M_star_caesar'].values\n\n logM_star = np.log10(Mstar)\n dM = 0.25\n N_gal = len(np.where((Mstar > Mstar.min()) & (Mstar < (Mstar.min() + dM)))[0])\n logM_star_bin = np.arange(logM_star.min(), logM_star.max(), dM)\n logM_star_bin_c = logM_star_bin[0:-1] + (logM_star_bin[1]-logM_star_bin[0])/2\n\n N_gal_array = np.zeros(len(logM_star_bin)-1)\n\n # Number of galaxies in each stellar mass bin\n for i in range(len(logM_star_bin)-1):\n N_gal_array[i] = len(np.where((logM_star > logM_star_bin[i]) & (logM_star < (logM_star_bin[i+1])))[0])\n\n # Corresponding volume density of galaxies\n n_gal_array = N_gal_array / (p.box_size)**3 # number of galaxies per Mpc^3\n\n fig, ax = plt.subplots()\n hb = ax.plot(logM_star_bin_c, np.log10(n_gal_array))\n ax.set_ylabel('$\\log\\Phi$ [Mpc$^{-3}$]')\n ax.set_xlabel('log Stellar Mass [M$_{\\odot}$]')\n ax.set_ylim([-7,0.2])\n plt.tight_layout()\n plt.show()", "def get_variables_of_type(self, variable_type):\n if isinstance(variable_type,str):\n variable_key = variable_type\n else:\n #it is a class\n variable_key = variable_type.__name__\n return self._var_kinds[variable_key]", "def parse(self):\n\n special_vars = {'amplification', 'copy number loss', \n 'epigenetic silencing', 'overexpression'}\n\n special_terms = ['dna binding domain', 'egfrv', 'truncating mutation',\n 'fusion', 'mutation', 'deletion', 'duplication', 'insertion',\n 'hypermethylation']\n\n var = self.var.lower()\n\n # Check if the stop sign '*' in the variation\n if '*' in var:\n self.stop_sign = True\n \n # Type \"exact match with special pre-difined variations\"\n if var in special_vars:\n self.type = var\n return\n \n # Type \"with special term\"\n for term in special_terms:\n if term in var:\n self.type = term\n return\n\n # Type \"point\": A123B or A123* or A123\n if re.match('^[a-z][0-9]+[a-z|*]?$', var):\n split = re.split('[0-9]+', var)\n self.type = 'point'\n self.start_amino = split[0]\n self.end_amino = split[1]\n s = re.search('[0-9]+', var)\n self.pos = int(s.group())\n return\n\n # Type \"del/ins/trunc/splice/dup/fs\": A123del or A123_B234del\n for suffix in ['del', 'ins', 'trunc', 'splice', 'dup', 'fs']:\n if suffix in var:\n self.type = self.alias_dict.get(suffix, suffix)\n self._parse_suffix(var, suffix)\n return\n\n print('[INFO] variation cannot be parsed: %s' % self.var)", "def format_variable(self, variablesReference, filter=None, start=None, count=None, format=None):\n\n # format is ignored, TODO?\n\n vs = None if start is None or start == 0 else start\n es = None if count is None or count == 0 else count\n\n var, name, tt, parent = self.scope_assign[variablesReference]\n\n # print(str(var) + \", \" + str(name) + \", \" + str(tt))\n\n is_slotted = False\n\n if not isinstance(var, dict) and not isinstance(var, list):\n if hasattr(var, \"__dict__\"):\n var = var.__dict__\n else:\n is_slotted = True\n\n # print (str(var))\n\n if not is_slotted and isinstance(var, dict):\n if filter is not None and filter == \"indexed\":\n return []\n keys = sorted(var.keys())\n elif not is_slotted:\n if filter is not None and filter == \"named\":\n return []\n keys = range(len(var))\n elif is_slotted:\n keys = dir(var)\n\n if \"self\" in keys:\n keys.remove(\"self\")\n keys = [\"self\"] + keys\n\n # print (str(keys))\n\n it = 0\n total = 0\n variables = []\n for vkey in keys:\n if vs is None or it >= vs:\n var_ref = self.scope_var_id\n if is_slotted:\n value = getattr(var, vkey)\n else:\n value = var[vkey]\n\n vardesc = {}\n variables.append(vardesc)\n\n vardesc[\"name\"] = vkey\n vardesc[\"value\"] = str(value)\n vardesc[\"type\"] = str(type(value))\n # vardesc[\"presentationHint\"] # TODO!!!\n vardesc[\"evaluateName\"] = vkey\n vardesc[\"variablesReference\"] = var_ref\n\n vv_inner = value\n vv_slotted = False\n if not isinstance(vv_inner, dict) and not isinstance(vv_inner, list):\n if hasattr(vv_inner, \"__dict__\"):\n vv_inner = vv_inner.__dict__\n else:\n vv_slotted = True\n\n if not vv_slotted and isinstance(vv_inner, dict):\n vardesc[\"namedVariables\"] = len(vv_inner.keys())\n elif not vv_slotted:\n vardesc[\"indexedVariables\"] = len(vv_inner)\n else:\n vardesc[\"namedVariables\"] = len(dir(vv_inner))\n\n self.scope_assign[var_ref] = (value, vkey, str(type(value)), var)\n\n self.scope_var_id += 1\n total += 1\n it += 1\n if es is not None and total >= es:\n break\n\n return variables", "def _parse_stars_according_to_image(self, starClipSigma=40.0):\n if issubclass(self.imageType, ReducedScience):\n # Check if all the images were corrected to Airmass 0.0\n if np.sum([img.airmass for img in self.imageList]) > 0:\n raise ValueError('All images in the imageList must be corrected to airmass=0.0 before combining')\n\n # Compute the star masks for this image stack.\n starMask = self._construct_star_mask()\n\n else:\n starMask = False\n starClipSigma = 0\n\n return starMask, starClipSigma", "def analyse_type(self):\n \n t = \" \" # Holder string\n \n data_base = '/local/duman/SIMULATIONS/many_polymers_5/density_0.2/kappa_'\n path = data_base + str(self.k) + '/fp_' + str(self.f) + '/CLUSTER/avg_size.txt'\n if os.path.exists(path):\n data = np.loadtxt(path, dtype=float)\n else:\n data = 10.\n self.cs = data\n print path\n print data\n if data < 12.:\n t = \"gas\"\n elif data > 200.:\n t = \"giant\"\n else:\n t = \"cluster\"\n\n self.type = t # Type of point\n \n return", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])" ]
[ "0.6919843", "0.5500659", "0.5284528", "0.5282017", "0.49810436", "0.49508908", "0.49358875", "0.47995615", "0.47526926", "0.4750297", "0.4654779", "0.4653055", "0.46524152", "0.46244058", "0.46109277", "0.45994213", "0.45680085", "0.45665252", "0.45277017", "0.4504224", "0.44967166", "0.44825602", "0.44634634", "0.44269055", "0.4426561", "0.4409574", "0.4406664", "0.44041947", "0.4397185", "0.4381367" ]
0.76934016
0
Child function fo plot_variable_stars. Process the DataFrame to select only stars marked as 'var_type' variable stars.
def get_variable_stars(df_data, df_variables_names, variabletype=None): if variabletype is None: variabletype = ['CEP', 'BCEP', 'BCEPS', 'DSCT', 'SR', 'SRA', 'SRB', 'SRC', 'SRD', 'RR', 'RRAB', 'RRC', 'GDOR', 'SPB', 'M', 'LPV'] print "Selecting variable stars.." # create a string "var_type" of variabletype separated by or ('|'). # var_type = "|".join(variabletype) # check if var_type is contained in Type (any or all, partial or not) # are_variables = df_variables_names[df_variables_names.Type.str.contains(var_type) == True] # fails with "is True" # are_variables.Type = are_variables.Type.str.replace(".*BCEP.*", "BCEP") # rename all types containing 'BCEP' are_variables = df_variables_names[df_variables_names.Type.isin(variabletype)] types_df = are_variables[['hip', 'tycho2_id', 'source_id', 'Type', 'Name']] print "..Done" print "Preparing subselection of initial DataFrame.." print "..Making Hipparcos list.." hip_list = are_variables.hip.tolist() hip_list = np.array(hip_list) hip_list = hip_list[~np.isnan(hip_list)] # remove the nans hip_list = list(hip_list) print "..Making Tycho2 list.." tycho2_list = are_variables.tycho2_id.tolist() tycho2_list = np.array(tycho2_list) tycho2_list = tycho2_list[tycho2_list != 'nan'] # tycho2 is str tycho2_list = list(tycho2_list) print "..Done\n----------" print "Getting Hipparcos and Tycho variable objects.." hip_objects = df_data[df_data.hip.isin(hip_list)] hip_objects = pd.merge(hip_objects, types_df, on='hip', how='inner') if 'tycho2_id_y' in hip_objects.columns: hip_objects = hip_objects.drop('tycho2_id_y', axis=1) hip_objects = hip_objects.rename(columns={'hip_x': 'hip', 'tycho2_id_x': 'tycho2_id'}) tycho_objects = df_data[df_data.tycho2_id.isin(tycho2_list)] tycho_objects = pd.merge(tycho_objects, types_df, on='tycho2_id', how='inner') if 'hip_y' in tycho_objects.columns: tycho_objects = tycho_objects.drop('hip_y', axis=1) tycho_objects = tycho_objects.rename(columns={'hip_x': 'hip', 'tycho2_id_x': 'tycho2_id'}) print "..Done\n----------" print "Getting roAp stars from file.." # roAP_names.csv contains tycho2_id names of roAp stars with open('roAP/roAP_names.csv') as roAP_file: roap_objects_list = roAP_file.readlines() roap_objects_list = [line.rstrip() for line in roap_objects_list] roap_objects = df_data[df_data.tycho2_id.isin(roap_objects_list)] column_number = len(roap_objects.columns) roap_objects.insert(column_number, 'Type', 'roAp') print "..Done\n----------" variable_df = pd.concat([hip_objects, tycho_objects, roap_objects], axis=0, ignore_index=True) variable_df.source_id = variable_df.source_id.fillna(-9999).astype(int) return variable_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_variable_stars(variablesdf, variabletype=None, x='B_V', y='M_V'):\n if variabletype is None:\n variabletype = ['CEP', 'BCEP', 'BCEPS', 'DSCT', 'SR', 'SRA', 'SRB', 'SRC', 'SRD', 'RR', 'RRAB', 'RRC', 'GDOR',\n 'SPB', 'M', 'LPV', 'roAp']\n markers = ['^', 'D', 'D', 'v', 's', 'D', 'D', 'D', 'D', 's', 'D', 'D', 'D', 'o', 'p', 'o', 'o']\n colors = ['k', 'k', 'k', '#00c000', 'r', 'r', 'r', 'r', 'r', 'm', 'm', 'm', '#00c0ff', (1, .7, 0), 'w', 'w', 'r']\n sizes = [50, 40, 40, 40, 50, 40, 40, 40, 40, 50, 50, 50, 40, 40, 45, 40, 40]\n labels = ['', \"BCEP, BCEPS\", '', 'DSCT', 'SR', \"SRA, SRB, SRC, SRD\", '', '', '', 'RR', \"RRAB, RRC\", '', 'GDOR',\n 'SPB', '', 'LPV', 'roAp']\n for i in range(len(variabletype)):\n if i in [2, 6, 7, 8, 11]:\n my_label = None\n else:\n my_label = \"%s\" % labels[i]\n plt.scatter(variablesdf[x].loc[variablesdf.loc[:, 'Type'] == variabletype[i]], variablesdf[y]\n .loc[variablesdf.loc[:, 'Type'] == variabletype[i]], facecolor=colors[i], marker=markers[i],\n s=sizes[i], label=my_label, edgecolor='k')\n print \"plotting %s as %s%s\" % (variabletype[i], colors[i], markers[i])\n return", "def plot_data_types(self, variable, **kwargs):\n return self.visualizer.plot_data_types(variable, **kwargs)", "def select_variables(df, dtype=\"numeric\"):\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n if dtype == \"numeric\":\n subset = df.copy().select_dtypes(include = numerics)\n else:\n subset = df.copy().select_dtypes(include != numerics)\n return(subset)", "def create_plot(x_var, y_var):\r\n\r\n FILE_PATH = 'application/star_data.csv'\r\n TARGET_VAR = 'star_type'\r\n SIZE_VAR = 'r_clipped'\r\n WIDTH = 1000\r\n HEIGHT = 600\r\n\r\n # Get the data\r\n df = pd.read_csv(FILE_PATH)\r\n fig = px.scatter(df, x=x_var, y=y_var, color=TARGET_VAR, size=SIZE_VAR, \r\n width=WIDTH, height=HEIGHT)\r\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\r\n\r\n return graphJSON", "def plot_selected(df, title='title', columns=[], shouldNormalize=True, symbol='any stock'):\n # df = df[columns][start_index:end_index]\n # df = df.loc[start_index:end_index, columns]\n df = df.loc[:, columns]\n ylabel = \"Price\"\n normal = \"un normalized\"\n if shouldNormalize:\n df = normalize(df.loc[:, ['Close', 'sma200']])\n ylabel = \"%\"\n normal = \"normalized\"\n # print('df.shape in plot=',df.shape)\n plot_data(df, title=title, ylabel=ylabel)", "def plot_selected(df, title='title', columns=[], shouldNormalize = True, symbol='any stock'):\n #df = df[columns][start_index:end_index]\n #df = df.loc[start_index:end_index, columns]\n df = df.loc[:, columns]\n ylabel=\"Price\"\n normal = \"un normalized\"\n if shouldNormalize:\n df = normalize(df.loc[:,['Close', 'sma200']])\n ylabel = \"%\"\n normal = \"normalized\"\n #print('df.shape in plot=',df.shape)\n plot_data(df, title=title, ylabel=ylabel)", "def plot_type_of_topic(data_frame: pb.DataFrame) -> None:\n plt.interactive(False)\n plt.figure()\n data_frame.plot(kind='bar', x= data_frame['TopicID'])\n plt.show()", "def update_graph_type(variable_dropdown_x, variable_dropdown_y):\n\n options = {\n \"violin\": {\"label\": \"Violin\", \"value\": 1},\n \"scatter\": {\"label\": \"Scatter\", \"value\": 2},\n \"bar\": {\"label\": \"Bar\", \"value\": 3},\n \"pie\": {\"label\": \"Pie\", \"value\": 4},\n # \"box\": {\"label\": \"Box\", \"value\": 5,},\n }\n\n if variable_dropdown_x is None:\n return [], None, True, \"Select a graph type\"\n\n graph_selection_list = []\n\n if variable_dropdown_y is None:\n # Only one variable selected\n field_id = variable_dropdown_x\n value_type = get_field_type(field_id)\n\n supported_graphs = value_type.supported_graphs\n\n for option_key in options:\n option = options[option_key]\n graph_type = option[\"value\"]\n if graph_type in supported_graphs:\n graph_selection_list.append(option)\n\n else:\n # Both variables selected\n # Logic is:\n # If the x-axis variable is continuous, integer, date or time:\n # If the y-axis variable is continuous or integer:\n # You can use scatter plot\n # Else if x-axis variable is categorical:\n # If the y-axis variable is continuous or integer:\n # You can use violin plot, box plot\n x_value_type = get_field_type(str(variable_dropdown_x))\n y_value_type = get_field_type(str(variable_dropdown_y))\n\n if (\n x_value_type == ValueType.INTEGER\n or x_value_type == ValueType.CONT\n or x_value_type == ValueType.DATE\n or x_value_type == ValueType.TIME\n ):\n if y_value_type == ValueType.INTEGER or y_value_type == ValueType.CONT:\n graph_selection_list.append(options[\"scatter\"])\n\n elif x_value_type == ValueType.CAT_SINGLE or x_value_type == ValueType.CAT_MULT:\n if y_value_type == ValueType.INTEGER or y_value_type == ValueType.CONT:\n # graph_selection_list.append(options[\"box\"])\n graph_selection_list.append(options[\"violin\"])\n\n if len(graph_selection_list) == 0:\n return graph_selection_list, None, True, \"No supported graph types\"\n\n return (\n graph_selection_list,\n graph_selection_list[0][\"value\"],\n False,\n \"Select a graph type\",\n )", "def plotvars_core(gs, data, plotfun=vis.plot_r, plot_radars=True,\n projection=PROJECTION, **kws):\n trans = ccrs.PlateCarree()\n axd = dict(ker=plt.subplot(gs[0, 0], projection=projection),\n kum=plt.subplot(gs[0, 1], projection=projection),\n van=plt.subplot(gs[1, 0], projection=projection),\n com=plt.subplot(gs[1, 1], projection=projection))\n for key in ['ker', 'kum']:\n axd[key].set_xticks([])\n for key in ['kum', 'com']:\n axd[key].set_yticks([])\n ax_cb = plt.subplot(gs[:, -1])\n for key in NAMES.keys():\n ax = axd[key]\n ax.set_ymargin(0)\n ax.set_xmargin(0)\n plotfun(data[I_RADAR[key]], ax=ax, cax=ax_cb, transform=trans, **kws)\n ax.set_title(NAMES[key])\n ax.coastlines(resolution='10m')\n if plot_radars:\n if key != 'com':\n RADAR[key].draw_marker(ax=ax, transform=trans)\n else:\n for radarkey in ['ker', 'kum', 'van']:\n RADAR[radarkey].draw_marker(ax=ax, transform=trans)\n return axd", "def get_safety_vars_plot(self):\n if 'safety_vars_stats' not in self.stats:\n raise ValueError('No safety vars statistics present in this evaluator.')\n\n safety_vars = self.stats['safety_vars_stats'][0].keys()\n n_plots = len(safety_vars)\n fig, axes = plt.subplots(n_plots, 1, figsize=(8, 6 * n_plots))\n\n for idx, var in enumerate(safety_vars):\n series = collections.defaultdict(list)\n for ep in self.stats['safety_vars_stats']:\n for stat in ep[var]:\n series[stat].append(ep[var][stat])\n ax = axes[idx]\n for stat in ['min', 'max']:\n ax.plot(np.squeeze(np.array(series[stat])), label=stat)\n x = range(len(series['mean']))\n\n mean = np.squeeze(np.array(series['mean']))\n std_dev = np.squeeze(np.array(series['std_dev']))\n ax.plot(x, mean, label='Value')\n ax.fill_between(\n range(len(series['mean'])), mean - std_dev, mean + std_dev, alpha=0.3)\n ax.set_title('Stats for {}'.format(var))\n ax.legend()\n ax.spines['top'].set_visible(False)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xlabel('Episode #')\n ax.set_ylabel('Magnitude')\n ax.plot()\n return fig", "def generate_var_scatter(self):\n pass", "def plot_histplots(\n df: pd.DataFrame,\n var_type: str = \"quant\",\n drop_cols: list = None,\n figsize=(15, 20),\n sub_col=3,\n ticksize=15,\n div: int = 1,\n subplot=True,\n) -> sns.histplot:\n\n assert var_type == \"quant\" or \"qual\", \"var_type has to be either 'quant' or 'qual'.\"\n\n def print_error():\n print(f\"Input var_type: {var_type} is invalid.\")\n print(\"Valide var_type can only be 'quant' or 'qual'.\")\n return\n\n def print_col():\n print(f\"Number of {var_type}itaive columns: {df.shape[1]}\")\n return\n\n def create_fig():\n # create figure and axes based on the number of columns of the dataframe\n _, axes = plt.subplots(\n ceil(len(df.columns) / sub_col), sub_col, figsize=figsize\n )\n y = 0 # set counter\n return axes, y\n\n if not subplot:\n # plt.figure(figsize=figsize)\n if var_type == \"quant\":\n sns.histplot(x=df)\n elif var_type == \"qual\":\n sns.histplot(y=df)\n else:\n print_error()\n\n else:\n # drop unnecessary columns\n if drop_cols:\n df = df.drop(drop_cols, axis=1)\n\n # create relative dataframe according to the var_type\n if var_type == \"quant\":\n # keep only quantitative features\n df = create_quanti_df(df)\n print_col()\n axes, y = create_fig()\n # plot histplot for each column of data\n for col in df.columns:\n i, j = divmod(y, sub_col)\n # sns.histplot(x=df[col], ax=axes[i, j]).set_title(col, fontsize=20)\n sns.histplot(x=df[col][: int(len(df) / div)], ax=axes[i, j]).set_title(\n col, fontsize=20\n )\n y += 1\n elif var_type == \"qual\":\n # keep only qualitatve features\n df = create_quali_df(df)\n print_col()\n axes, y = create_fig()\n # plot histplot for each column of data\n for col in df.columns:\n i, j = divmod(y, sub_col)\n ax = axes[i, j]\n sns.histplot(y=df[col], ax=ax)\n ax.set_title(col, fontsize=20)\n ax.tick_params(axis=\"y\", which=\"major\", labelsize=ticksize)\n y += 1\n else:\n print_error()\n\n plt.tight_layout()\n plt.show()\n return", "def check_is_plottable(self, var):\n self.plot_button.disabled = False # important to enable button once disabled\n data = self.data[var[0]]\n self.plot_button.disabled = len(data.dims) <= 1", "def star_rating(table, record_id, splitstars=False):\n import uuid\n id = uuid.uuid4()\n row=db(db.plugin_wiki_rating.tablename==table)(db.plugin_wiki_rating.record_id==record_id).select().first()\n rating = row.rating if row else 0\n callback = URL('plugin_wiki', 'star_rate', args = [table,record_id])\n incr = 0.5 if splitstars else 1\n return TAG[''](DIV(_id='star'+str(id),_class='rating'),\n SCRIPT(\"jQuery(document).ready(function(){jQuery('%(uid)s').rating('%(callback)s',{increment:%(incr)s, maxvalue:5, curvalue:%(rating)s});});\" % dict(uid='#star'+str(id), callback=callback,incr=incr, rating=rating)))", "def plot_diagram_value(\n df_bugs: pd.DataFrame,\n column_to_inspect: str,\n exclude_values: List[str] = [],\n mapping_latex: Dict[str, str] = None,\n latex_format: bool = False\n ):\n df = expand_columns(df_bugs, column_to_inspect)\n records = []\n for col_value in list(df[column_to_inspect].unique()):\n count = len(df[df[column_to_inspect] == col_value])\n if latex_format and mapping_latex is not None:\n col_value = mapping_latex[col_value]\n records.append(\n {\"code\": col_value, \"count\": count}\n )\n df_agg = pd.DataFrame.from_records(records)\n df_agg = df_agg.groupby(\"code\").sum().reset_index()\n print(\"-\" * 80)\n for i, row in df_agg.iterrows():\n code = str(row['code'])\n count = str(row['count'])\n if code not in exclude_values:\n if latex_format:\n print(\n \"\\\\node[above right=-.5em and -1.5em of \" + code +\n \"] {\" + count + \"};\"\n )\n else:\n print(f\"'{code}' was annotated {count} time(s).\")\n print(\"-\" * 80)", "def _format_variables(df: EDAFrame, cfg: Config, data: Dict[str, Any]) -> Dict[str, Any]:\n res: Dict[str, Any] = {}\n # variables\n if not cfg.variables.enable:\n res[\"has_variables\"] = False\n return res\n\n res[\"variables\"] = {}\n res[\"has_variables\"] = True\n for col in df.columns:\n try:\n stats: Any = None # needed for pylint\n dtp = df.get_eda_dtype(col)\n tab_names: List[str] = []\n if isinstance(dtp, Continuous):\n itmdt = Intermediate(col=col, data=data[col], visual_type=\"numerical_column\")\n stats = format_num_stats(data[col])\n tab_names = [\"Stats\", \"Histogram\", \"KDE Plot\", \"Normal Q-Q Plot\"]\n elif type(dtp) in [Nominal, SmallCardNum, GeoGraphy, GeoPoint]:\n itmdt = Intermediate(col=col, data=data[col], visual_type=\"categorical_column\")\n stats = format_cat_stats(\n data[col][\"stats\"], data[col][\"len_stats\"], data[col][\"letter_stats\"]\n )\n tab_names = [\"Stats\", \"Word Length\", \"Pie Chart\", \"Word Cloud\", \"Word Frequency\"]\n elif isinstance(dtp, DateTime):\n itmdt = Intermediate(\n col=col,\n data=data[col][\"stats\"],\n line=data[col][\"line\"],\n visual_type=\"datetime_column\",\n )\n stats = stats_viz_dt(data[col][\"stats\"])\n else:\n raise RuntimeError(f\"the type of column {col} is unknown: {type(dtp)}\")\n\n rndrd = render(itmdt, cfg)\n layout = rndrd[\"layout\"]\n figs_var: List[Figure] = []\n for tab in layout:\n try:\n fig = tab.children[0]\n except AttributeError:\n fig = tab\n # fig.title = Title(text=tab.title, align=\"center\")\n figs_var.append(fig)\n comp = components(figs_var)\n\n res[\"variables\"][col] = {\n \"tabledata\": stats,\n \"col_type\": itmdt.visual_type.replace(\"_column\", \"\"),\n \"tab_names\": tab_names,\n \"plots\": comp,\n }\n\n except:\n print(f\"error happended in column:{col}\", file=sys.stderr)\n raise\n\n return res", "def plot_missing_values(self, variable, **kwargs):\n return self.visualizer.plot_missing_values(variable, **kwargs)", "def drawPairPlot(df):\n plt.style.use('dark_background')\n warnings.filterwarnings(\"ignore\")\n types = getSpectralTypes()\n colors = getColors()\n sns.set_palette(sns.color_palette(colors))\n g = sns.pairplot(df, hue=\"spectral_type\", hue_order=types, dropna=True,\n vars=[\"stellar_age\", \"stellar_temperature\", \n \"stellar_luminosity\", \"stellar_mass\", \n \"stellar_radius\", \"stellar_surface_gravity\", \n \"optical_magnitude\", \"stellar_metallicity\"])\n plt.show()", "def plotting_helper_method(x_axis, y_axis, df):\n genre_dict = {\n 'g':'Rock',\n 'b':'Hip-Hop',\n 'r':'Pop'\n }\n for color, genre in genre_dict.items():\n filtered_df = df[df['genre'] == genre]\n plt.scatter(filtered_df[x_axis], filtered_df[y_axis], c=color, label=genre)", "def plot(var):\n # MISSCHIEN KUNNEN WE HIER NOG IETS MEE\n # total_dead = len(train_data[\"Survived\"] == 0)\n # total_survived = len(train_data[\"Survived\"] == 1)\n # died = train_data[train_data[\"Survived\"] == 0][var].value_counts() / total_dead\n # survived = train_data[train_data[\"Survived\"] == 1][var].value_counts() / total_survived\n sns.set()\n sns.set_color_codes(\"pastel\")\n\n # order bars for family size variable\n if var == \"FamSize\":\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=.7, order=[\"alone\", 1, 2, 3, \"4 or more\"]).\\\n tick_params(labelsize=18)\n else:\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=1.1).tick_params(labelsize=18)\n\n # plot style properties\n ax = plt.gca()\n\n for ax in plt.gcf().axes:\n x = ax.get_xlabel()\n y = ax.get_ylabel()\n ax.set_xlabel(x, fontsize=20)\n ax.set_ylabel(y, fontsize=20)\n\n plt.title(\"Ratio of survivors for variable \" + str(var), fontsize=22)\n t = ax.title\n t.set_position([.5, 1.05])\n plt.ylim([0, 1])\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/survived_\" + str(var) + \".png\", bbox_inches=\"tight\")\n\n plt.show()", "def plot_posteriors(self, variants=[]):\n if variants != []:\n for var in variants:\n if var not in self.posteriors.keys():\n raise ValueError(('Variants must only be a value in '\n 'bucket_col_name'))\n self._plot_posteriors(variants)", "def plot_variables(labels, plot, data):\n # Create individual figures\n fig = subplots.make_subplots(rows=1, cols=1)\n for var in labels:\n if plot == 0:\n counts = data[var].value_counts()\n fig.append_trace(go.Bar(x=counts, y=counts.index, orientation='h'), 1, 1)\n elif plot == 1:\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][0], 1, 1)\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][1], 1, 1)\n elif plot == 2:\n fig.add_trace(go.Box(x=list(data[data[\"Score\"] == \"good\"][var]), name=\"Good\", hoverinfo=\"x\", marker_color='mediumturquoise'))\n fig.add_trace(go.Box(x=list(data[data[\"Score\"] == \"bad\"][var]), name=\"Bad\", hoverinfo=\"x\", marker_color='darkorange'))\n else:\n raise ValueError(\"plot number must be 0, 1, or 2\")\n # Create buttons for drop down menu\n buttons = []\n for i, label in enumerate(labels):\n if plot == 0:\n visibility = [i == j for j in range(len(labels))]\n else:\n visibility = [j//2 == i for j in range(2*len(labels))]\n button = dict(\n label=label,\n method='update',\n args=[{'visible': visibility},\n {'title': label}])\n buttons.append(button)\n updatemenus = list([\n dict(active=-1,\n x=1.06, y=1.27,\n buttons=buttons\n )\n ])\n # Setup layout\n if plot == 0:\n fig['layout']['title'] = \"Distribution of categorical and discrete variables:\"\n fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',\n marker_line_width=1.5, opacity=0.7)\n elif plot == 1:\n fig['layout']['title'] = \"Distribution of continuous variables:\"\n fig.update_traces(marker_color='rgb(112, 125, 188)', opacity=0.8)\n elif plot == 2:\n fig['layout']['title'] = \"Boxplot of continuous variables by score:\"\n fig['layout']['showlegend'] = False\n fig['layout']['updatemenus'] = updatemenus\n iplot(fig, config={\"displayModeBar\": False})", "def dyn_flareplots(df, folderpath, dyn_list, itype, flare_template = False):\n os.makedirs(folderpath, exist_ok = True)\n colors_auld = ['#800000', '#860000', '#8c0000', '#930000', '#990000', '#9f0000', '#a60000', '#ac0000', '#b20000', '#b90000', '#bf0000', '#c50000', '#cc0000', '#d20000', '#d80000', '#df0000', '#e50000', '#eb0000', '#f20000', '#f80000', '#ff0000', '#ff0700', '#ff0e00', '#ff1500', '#ff1c00', '#ff2300', '#ff2a00', '#ff3100', '#ff3800', '#ff3f00', '#ff4600', '#ff4d00', '#ff5400', '#ff5b00', '#ff6200', '#ff6900', '#ff7000', '#ff7700', '#ff7e00', '#ff8500', '#ff8c00', '#ff9100', '#ff9700', '#ff9d00', '#ffa300', '#ffa800', '#ffae00', '#ffb400', '#ffba00', '#ffbf00', '#ffc500', '#ffcb00', '#ffd100', '#ffd600', '#ffdc00', '#ffe200', '#ffe800', '#ffed00', '#fff300', '#fff900', '#ffff00', '#f2ff00', '#e5ff00', '#d8ff00', '#ccff00', '#bfff00', '#b2ff00', '#a5ff00', '#99ff00', '#8cff00', '#7fff00', '#72ff00', '#66ff00', '#59ff00', '#4cff00', '#3fff00', '#33ff00', '#26ff00', '#19ff00', '#0cff00', '#00ff00', '#0afc0a', '#15fa15', '#1ff81f', '#2af62a', '#34f434', '#3ff13f', '#49ef49', '#54ed54', '#5eeb5e', '#69e969', '#74e674', '#7ee47e', '#89e289', '#93e093', '#9ede9e', '#a8dba8', '#b3d9b3', '#bdd7bd', '#c8d5c8', '#d3d3d3']\n colors_ylorrd = ['#800026', '#850026', '#8a0026', '#8f0026', '#940026', '#990026', '#9e0026', '#a30026', '#a80026', '#ad0026', '#b20026', '#b70026', '#bd0026', '#c00225', '#c30424', '#c60623', '#c90822', '#cc0a21', '#d00d21', '#d30f20', '#d6111f', '#d9131e', '#dc151d', '#df171c', '#e31a1c', '#e51e1d', '#e7221e', '#e9271f', '#eb2b20', '#ed2f21', '#ef3423', '#f13824', '#f33c25', '#f54126', '#f74527', '#f94928', '#fc4e2a', '#fc532b', '#fc582d', '#fc5d2e', '#fc6330', '#fc6831', '#fc6d33', '#fc7234', '#fc7836', '#fc7d37', '#fc8239', '#fc873a', '#fd8d3c', '#fd903d', '#fd933e', '#fd9640', '#fd9941', '#fd9c42', '#fd9f44', '#fda245', '#fda546', '#fda848', '#fdab49', '#fdae4a', '#feb24c', '#feb54f', '#feb853', '#febb56', '#febf5a', '#fec25d', '#fec561', '#fec864', '#fecc68', '#fecf6b', '#fed26f', '#fed572', '#fed976', '#feda79', '#fedc7d', '#fede80', '#fedf84', '#fee187', '#fee38b', '#fee48e', '#fee692', '#fee895', '#fee999', '#feeb9c', '#ffeda0', '#ffeea3', '#fff0a7', '#fff1ab', '#fff3ae', '#fff4b2', '#fff6b6', '#fff7b9', '#fff9bd', '#fffac1', '#fffcc4', '#fffdc8', '#ffffcc']\n colors_inferno = ['#000003', '#000004', '#000006', '#010007', '#010109', '#01010B', '#02010E', '#020210', '#030212', '#040314', '#040316', '#050418', '#06041B', '#07051D', '#08061F', '#090621', '#0A0723', '#0B0726', '#0D0828', '#0E082A', '#0F092D', '#10092F', '#120A32', '#130A34', '#140B36', '#160B39', '#170B3B', '#190B3E', '#1A0B40', '#1C0C43', '#1D0C45', '#1F0C47', '#200C4A', '#220B4C', '#240B4E', '#260B50', '#270B52', '#290B54', '#2B0A56', '#2D0A58', '#2E0A5A', '#300A5C', '#32095D', '#34095F', '#350960', '#370961', '#390962', '#3B0964', '#3C0965', '#3E0966', '#400966', '#410967', '#430A68', '#450A69', '#460A69', '#480B6A', '#4A0B6A', '#4B0C6B', '#4D0C6B', '#4F0D6C', '#500D6C', '#520E6C', '#530E6D', '#550F6D', '#570F6D', '#58106D', '#5A116D', '#5B116E', '#5D126E', '#5F126E', '#60136E', '#62146E', '#63146E', '#65156E', '#66156E', '#68166E', '#6A176E', '#6B176E', '#6D186E', '#6E186E', '#70196E', '#72196D', '#731A6D', '#751B6D', '#761B6D', '#781C6D', '#7A1C6D', '#7B1D6C', '#7D1D6C', '#7E1E6C', '#801F6B', '#811F6B', '#83206B', '#85206A', '#86216A', '#88216A', '#892269', '#8B2269', '#8D2369', '#8E2468', '#902468', '#912567', '#932567', '#952666', '#962666', '#982765', '#992864', '#9B2864', '#9C2963', '#9E2963', '#A02A62', '#A12B61', '#A32B61', '#A42C60', '#A62C5F', '#A72D5F', '#A92E5E', '#AB2E5D', '#AC2F5C', '#AE305B', '#AF315B', '#B1315A', '#B23259', '#B43358', '#B53357', '#B73456', '#B83556', '#BA3655', '#BB3754', '#BD3753', '#BE3852', '#BF3951', '#C13A50', '#C23B4F', '#C43C4E', '#C53D4D', '#C73E4C', '#C83E4B', '#C93F4A', '#CB4049', '#CC4148', '#CD4247', '#CF4446', '#D04544', '#D14643', '#D24742', '#D44841', '#D54940', '#D64A3F', '#D74B3E', '#D94D3D', '#DA4E3B', '#DB4F3A', '#DC5039', '#DD5238', '#DE5337', '#DF5436', '#E05634', '#E25733', '#E35832', '#E45A31', '#E55B30', '#E65C2E', '#E65E2D', '#E75F2C', '#E8612B', '#E9622A', '#EA6428', '#EB6527', '#EC6726', '#ED6825', '#ED6A23', '#EE6C22', '#EF6D21', '#F06F1F', '#F0701E', '#F1721D', '#F2741C', '#F2751A', '#F37719', '#F37918', '#F47A16', '#F57C15', '#F57E14', '#F68012', '#F68111', '#F78310', '#F7850E', '#F8870D', '#F8880C', '#F88A0B', '#F98C09', '#F98E08', '#F99008', '#FA9107', '#FA9306', '#FA9506', '#FA9706', '#FB9906', '#FB9B06', '#FB9D06', '#FB9E07', '#FBA007', '#FBA208', '#FBA40A', '#FBA60B', '#FBA80D', '#FBAA0E', '#FBAC10', '#FBAE12', '#FBB014', '#FBB116', '#FBB318', '#FBB51A', '#FBB71C', '#FBB91E', '#FABB21', '#FABD23', '#FABF25', '#FAC128', '#F9C32A', '#F9C52C', '#F9C72F', '#F8C931', '#F8CB34', '#F8CD37', '#F7CF3A', '#F7D13C', '#F6D33F', '#F6D542', '#F5D745', '#F5D948', '#F4DB4B', '#F4DC4F', '#F3DE52', '#F3E056', '#F3E259', '#F2E45D', '#F2E660', '#F1E864', '#F1E968', '#F1EB6C', '#F1ED70', '#F1EE74', '#F1F079', '#F1F27D', '#F2F381', '#F2F485', '#F3F689', '#F4F78D', '#F5F891', '#F6FA95', '#F7FB99', '#F9FC9D', '#FAFDA0', '#FCFEA4']\n colors_magma = ['#000003', '#000004', '#000006', '#010007', '#010109', '#01010B', '#02020D', '#02020F', '#030311', '#040313', '#040415', '#050417', '#060519', '#07051B', '#08061D', '#09071F', '#0A0722', '#0B0824', '#0C0926', '#0D0A28', '#0E0A2A', '#0F0B2C', '#100C2F', '#110C31', '#120D33', '#140D35', '#150E38', '#160E3A', '#170F3C', '#180F3F', '#1A1041', '#1B1044', '#1C1046', '#1E1049', '#1F114B', '#20114D', '#221150', '#231152', '#251155', '#261157', '#281159', '#2A115C', '#2B115E', '#2D1060', '#2F1062', '#301065', '#321067', '#341068', '#350F6A', '#370F6C', '#390F6E', '#3B0F6F', '#3C0F71', '#3E0F72', '#400F73', '#420F74', '#430F75', '#450F76', '#470F77', '#481078', '#4A1079', '#4B1079', '#4D117A', '#4F117B', '#50127B', '#52127C', '#53137C', '#55137D', '#57147D', '#58157E', '#5A157E', '#5B167E', '#5D177E', '#5E177F', '#60187F', '#61187F', '#63197F', '#651A80', '#661A80', '#681B80', '#691C80', '#6B1C80', '#6C1D80', '#6E1E81', '#6F1E81', '#711F81', '#731F81', '#742081', '#762181', '#772181', '#792281', '#7A2281', '#7C2381', '#7E2481', '#7F2481', '#812581', '#822581', '#842681', '#852681', '#872781', '#892881', '#8A2881', '#8C2980', '#8D2980', '#8F2A80', '#912A80', '#922B80', '#942B80', '#952C80', '#972C7F', '#992D7F', '#9A2D7F', '#9C2E7F', '#9E2E7E', '#9F2F7E', '#A12F7E', '#A3307E', '#A4307D', '#A6317D', '#A7317D', '#A9327C', '#AB337C', '#AC337B', '#AE347B', '#B0347B', '#B1357A', '#B3357A', '#B53679', '#B63679', '#B83778', '#B93778', '#BB3877', '#BD3977', '#BE3976', '#C03A75', '#C23A75', '#C33B74', '#C53C74', '#C63C73', '#C83D72', '#CA3E72', '#CB3E71', '#CD3F70', '#CE4070', '#D0416F', '#D1426E', '#D3426D', '#D4436D', '#D6446C', '#D7456B', '#D9466A', '#DA4769', '#DC4869', '#DD4968', '#DE4A67', '#E04B66', '#E14C66', '#E24D65', '#E44E64', '#E55063', '#E65162', '#E75262', '#E85461', '#EA5560', '#EB5660', '#EC585F', '#ED595F', '#EE5B5E', '#EE5D5D', '#EF5E5D', '#F0605D', '#F1615C', '#F2635C', '#F3655C', '#F3675B', '#F4685B', '#F56A5B', '#F56C5B', '#F66E5B', '#F6705B', '#F7715B', '#F7735C', '#F8755C', '#F8775C', '#F9795C', '#F97B5D', '#F97D5D', '#FA7F5E', '#FA805E', '#FA825F', '#FB8460', '#FB8660', '#FB8861', '#FB8A62', '#FC8C63', '#FC8E63', '#FC9064', '#FC9265', '#FC9366', '#FD9567', '#FD9768', '#FD9969', '#FD9B6A', '#FD9D6B', '#FD9F6C', '#FDA16E', '#FDA26F', '#FDA470', '#FEA671', '#FEA873', '#FEAA74', '#FEAC75', '#FEAE76', '#FEAF78', '#FEB179', '#FEB37B', '#FEB57C', '#FEB77D', '#FEB97F', '#FEBB80', '#FEBC82', '#FEBE83', '#FEC085', '#FEC286', '#FEC488', '#FEC689', '#FEC78B', '#FEC98D', '#FECB8E', '#FDCD90', '#FDCF92', '#FDD193', '#FDD295', '#FDD497', '#FDD698', '#FDD89A', '#FDDA9C', '#FDDC9D', '#FDDD9F', '#FDDFA1', '#FDE1A3', '#FCE3A5', '#FCE5A6', '#FCE6A8', '#FCE8AA', '#FCEAAC', '#FCECAE', '#FCEEB0', '#FCF0B1', '#FCF1B3', '#FCF3B5', '#FCF5B7', '#FBF7B9', '#FBF9BB', '#FBFABD', '#FBFCBF']\n colors_ylgnbl = ['#081d58', '#0a1e5d', '#0c2062', '#0f2267', '#11246c', '#142671', '#162876', '#182a7b', '#1b2c80', '#1d2e85', '#20308a', '#22328f', '#253494', '#243795', '#243b97', '#243e99', '#24429a', '#23459c', '#23499e', '#234c9f', '#2350a1', '#2253a3', '#2257a4', '#225aa6', '#225ea8', '#2162aa', '#2166ac', '#206aae', '#206fb0', '#1f73b2', '#1f77b4', '#1f7bb6', '#1e80b8', '#1e84ba', '#1d88bc', '#1d8cbe', '#1d91c0', '#2094c0', '#2397c0', '#269ac1', '#299dc1', '#2ca0c1', '#2fa3c2', '#32a6c2', '#35a9c2', '#38acc3', '#3bafc3', '#3eb2c3', '#41b6c4', '#46b7c3', '#4bb9c2', '#50bbc1', '#55bdc1', '#5abfc0', '#60c1bf', '#65c3be', '#6ac5be', '#6fc7bd', '#74c9bc', '#79cbbb', '#7fcdbb', '#85cfba', '#8bd1b9', '#91d4b9', '#97d6b8', '#9dd8b8', '#a3dbb7', '#a9ddb6', '#afdfb6', '#b5e2b5', '#bbe4b5', '#c1e6b4', '#c7e9b4', '#caeab3', '#cdebb3', '#d0ecb3', '#d3eeb3', '#d6efb2', '#daf0b2', '#ddf1b2', '#e0f3b2', '#e3f4b1', '#e6f5b1', '#e9f6b1', '#edf8b1', '#eef8b4', '#f0f9b7', '#f1f9bb', '#f3fabe', '#f4fac1', '#f6fbc5', '#f7fcc8', '#f9fccb', '#fafdcf', '#fcfdd2', '#fdfed5', '#ffffd9']\n colors_grorrd = ['#800026', '#850026', '#8a0026', '#8f0026', '#940026', '#990026', '#9e0026', '#a30026', '#a80026', '#ad0026', '#b20026', '#b70026', '#bd0026', '#c00225', '#c30424', '#c60623', '#c90822', '#cc0a21', '#d00d21', '#d30f20', '#d6111f', '#d9131e', '#dc151d', '#df171c', '#e31a1c', '#e51e1d', '#e7221e', '#e9271f', '#eb2b20', '#ed2f21', '#ef3423', '#f13824', '#f33c25', '#f54126', '#f74527', '#f94928', '#fc4e2a', '#fc532b', '#fc582d', '#fc5d2e', '#fc6330', '#fc6831', '#fc6d33', '#fc7234', '#fc7836', '#fc7d37', '#fc8239', '#fc873a', '#fd8d3c', '#fd903d', '#fd933e', '#fd9640', '#fd9941', '#fd9c42', '#fd9f44', '#fda245', '#fda546', '#fda848', '#fdab49', '#fdae4a', '#feb24c', '#feb54f', '#feb853', '#febb56', '#febf5a', '#fec25d', '#fec561', '#fec864', '#fecc68', '#fecf6b', '#fed26f', '#fed572', '#fed976', '#feda79', '#fedc7d', '#fede80', '#fedf84', '#fee187', '#fee38b', '#fee48e', '#fee692', '#fee895', '#fee999', '#feeb9c', '#ffeda0', '#fbeaa4', '#f7e8a8', '#f4e6ac', '#f0e4b1', '#ece2b5', '#e9e0b9', '#e5ddbd', '#e1dbc2', '#ded9c6', '#dad7ca', '#d6d5ce', '#d3d3d3']\n colors = colors_grorrd\n for dyn in dyn_list:\n\n # Select top interactions based on its mean frequency. Also asign color based on mean value\n color_len = len(colors) -1\n df_clust = df.filter(items = [dyn, 'APosition1', 'APosition2', 'BPosition1', 'BPosition2','CPosition1', 'CPosition2','FPosition1', 'FPosition2',])\n df_clust['color'] = df_clust[dyn].apply(lambda x: colors[color_len-round(x*color_len/100)]) #There are 101 colors avalible in list\n\n #Filter top 5 in df_clust\n df_clust = df_clust.nlargest(20, dyn)\n\n # 'Edge' entry for json file\n df_dict = pd.DataFrame(columns = [\"name1\", \"name2\", \"frames\"])\n df_dict['name1'] = df_clust['APosition1'] \n df_dict['name2'] = df_clust['APosition2']\n df_dict['frames'] = [[1]]*len(df_dict)\n df_dict['color'] = df_clust['color']\n df_dict['value'] = df_clust[dyn]\n edges = df_dict.to_dict(orient=\"records\")\n\n # Appending edges to flare plot template, if any submitted\n if flare_template:\n flare_template['edges'] = edges\n jsondict = flare_template\n else:\n jsondict = { 'edges' : edges }\n\n #'Edge' multi-entries, based on the 4 GPCR nomenclatures\n for leter in ['A', 'B', 'C', 'F']:\n df_dict = pd.DataFrame(columns = [\"name1\", \"name2\", \"frames\"])\n df_dict['name1'] = df_clust[leter+'Position1'] \n df_dict['name2'] = df_clust[leter+'Position2']\n df_dict['frames'] = [[1]]*len(df_dict)\n df_dict['color'] = df_clust['color']\n df_dict['value'] = df_clust[dyn]\n leter_edges = df_dict.to_dict(orient=\"records\")\n\n #Appending edges\n if flare_template:\n flare_template[leter+'edges'] = leter_edges\n jsondict = flare_template\n else:\n jsondict = { leter+'edges' : leter_edges }\n\n #Writing json\n jsonpath = folderpath + dyn + \"_top.json\"\n with open(jsonpath, 'w') as jsonfile:\n dump(jsondict, jsonfile, ensure_ascii=False, indent = 4)", "def prep_for_plotting(dataframe, value_var):\n reshaped_df = dataframe.pivot(index='weekday',\n columns='hour',\n values=value_var)\n reshaped_df['day_name'] = reshaped_df.index.values\n reshaped_df['numeric_day'] = reshaped_df.day_name.apply(\n assign_numeric_day)\n reshaped_df = reshaped_df.sort_values('numeric_day')\n del reshaped_df['numeric_day']\n del reshaped_df['day_name']\n\n return reshaped_df", "def Mstar_function(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n if not p.xlim:\n p.xlim = np.array([1e10,1e13])\n\n df_all = pd.read_pickle(p.d_data + 'galaxy_selection/all_z0_galaxies')\n Mstar = df_all['M_star_caesar'].values\n\n logM_star = np.log10(Mstar)\n dM = 0.25\n N_gal = len(np.where((Mstar > Mstar.min()) & (Mstar < (Mstar.min() + dM)))[0])\n logM_star_bin = np.arange(logM_star.min(), logM_star.max(), dM)\n logM_star_bin_c = logM_star_bin[0:-1] + (logM_star_bin[1]-logM_star_bin[0])/2\n\n N_gal_array = np.zeros(len(logM_star_bin)-1)\n\n # Number of galaxies in each stellar mass bin\n for i in range(len(logM_star_bin)-1):\n N_gal_array[i] = len(np.where((logM_star > logM_star_bin[i]) & (logM_star < (logM_star_bin[i+1])))[0])\n\n # Corresponding volume density of galaxies\n n_gal_array = N_gal_array / (p.box_size)**3 # number of galaxies per Mpc^3\n\n fig, ax = plt.subplots()\n hb = ax.plot(logM_star_bin_c, np.log10(n_gal_array))\n ax.set_ylabel('$\\log\\Phi$ [Mpc$^{-3}$]')\n ax.set_xlabel('log Stellar Mass [M$_{\\odot}$]')\n ax.set_ylim([-7,0.2])\n plt.tight_layout()\n plt.show()", "def seaborn_formatting_mag(df, settings):\n df[\"salt\"] = df[\"dataset_saltfit_2classes\"] != -1\n df = du.tag_type(df, settings, type_column=settings.sntype_var)\n # because it doesn't like my normal df\n df_skimmed = pd.DataFrame()\n for f in [\"g\", \"r\", \"i\", \"z\"]:\n var = \"SIM_PEAKMAG_\" + f\n df_skimmed[var] = np.array([k for k in df[var].values])\n df_skimmed[\"salt\"] = np.array([k for k in df[\"salt\"].values])\n df_skimmed[\"target\"] = np.array([k for k in df[\"target_2classes\"].values])\n df_skimmed[\"SIM_REDSHIFT_CMB\"] = np.array(\n [k for k in df[\"SIM_REDSHIFT_CMB\"].values]\n )\n df_skimmed[settings.sntype_var] = np.array(\n [k for k in df[settings.sntype_var].values]\n )\n # skimm\n for f in [\"g\", \"r\", \"i\", \"z\"]:\n var = \"SIM_PEAKMAG_\" + f\n df_skimmed = df_skimmed[(df_skimmed[var] > 20) & (df_skimmed[var] < 28)]\n\n return df_skimmed", "def split_dataframe_datatypes(df, target_var):\n\tdf_num = df.select_dtypes(include=np.number)\n\tdf_cat = df.select_dtypes(include=object)\n\n\tif target_var in df_num.columns:\n\t\tdf_tar = df_num.copy() \n\t\tdf_tar = df_tar[[target_var]]\n\t\tdf_num.drop(columns=[target_var], axis=1, inplace=True) \n\telif target_var in df_cat.columns:\n\t\tdf_tar = df_cat.copy()\n\t\tdf_tar = df_tar[[target_var]]\n\t\tdf_cat.drop(columns=[target_var], axis=1, inplace=True) \n\n\treturn df_num,df_cat,df_tar", "def select_column(variable):\n return relevant_raw_data_df[variable].to_frame()", "def plot_variables(labels, plot, data):\n # Create individual figures\n fig = subplots.make_subplots(rows=1, cols=1)\n for var in labels:\n if plot == 0:\n counts = data[var].value_counts()\n fig.append_trace(go.Bar(x=counts, y=counts.index, orientation='h'), 1, 1)\n elif plot == 1:\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][0], 1, 1)\n fig.append_trace(ff.create_distplot([list(data[var])], ['distplot'])['data'][1], 1, 1)\n else:\n raise ValueError(\"plot number must be 0, 1\")\n # Create buttons for drop down menu\n buttons = []\n for i, label in enumerate(labels):\n if plot == 0:\n visibility = [i == j for j in range(len(labels))]\n else:\n visibility = [j//2 == i for j in range(2*len(labels))]\n button = dict(\n label=label,\n method='update',\n args=[{'visible': visibility},\n {'title': label}])\n buttons.append(button)\n updatemenus = list([\n dict(active=-1,\n x=1.06, y=1.27,\n buttons=buttons\n )\n ])\n # Setup layout\n if plot == 0:\n fig['layout']['title'] = \"Distribution of categorical and discrete variables:\"\n fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',\n marker_line_width=1.5, opacity=0.7)\n elif plot == 1:\n fig['layout']['title'] = \"Distribution of continuous variables:\"\n fig.update_traces(marker_color='rgb(112, 125, 188)', opacity=0.8)\n elif plot == 2:\n fig['layout']['title'] = \"Boxplot of continuous variables by score:\"\n fig['layout']['showlegend'] = False\n fig['layout']['updatemenus'] = updatemenus\n iplot(fig, config={\"displayModeBar\": False})", "def analysis_of_dataframe(self, dataframe):\n\t\ttypes = self.data.type.unique()\n\t\tratings = self.data.rating.unique()\n\n\t\tprint \"\"\n\n\t\t# First analysis section\n\t\tfor rating in ratings:\n\t\t\tpercentage = format(self.data.rating.value_counts()[rating] / len(self.data.index), '.6f')\n\n\t\t\t# Print probability data\n\t\t\tprint \"Prob(rating={}) = {}\".format(rating, percentage)\n\n\t\tprint \"\"\n\n\t\t# Second analysis section\n\t\tfor rating in ratings:\n\t\t\tfor type in types:\n\n\t\t\t\t# Get sub-set dataframe\n\t\t\t\ttemp_dataframe = self.data[self.data['rating'] == rating]\n\n\t\t\t\t# Get conditional probability\n\t\t\t\ttry:\n\t\t\t\t\tpercentage = format(temp_dataframe.type.value_counts()[type] / len(temp_dataframe.index), '.6f')\n\n\t\t\t\t# Current type not found in temp_dataframe\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpercentage = format(0, '.6f')\n\n\t\t\t\t# Print probability data\n\t\t\t\tfinally:\n\t\t\t\t\tprint \"Prob(type={}|rating={}) = {}\".format(type, rating, percentage)" ]
[ "0.7149481", "0.53572685", "0.49779034", "0.49217516", "0.4668608", "0.46611875", "0.46530828", "0.46240893", "0.46187612", "0.45908424", "0.4562476", "0.45395714", "0.4536326", "0.45301196", "0.4524316", "0.4492281", "0.4465708", "0.44525927", "0.4432995", "0.44032437", "0.43878183", "0.437705", "0.4350447", "0.43497247", "0.43416074", "0.43400154", "0.43206483", "0.4305875", "0.4297608", "0.4296757" ]
0.72028685
0
plot a Cepheid at its reddened position on the HR diag. (assume that deredden_cepheids() have been used)
def plot_dereddening(): extinction_coefficients = {'2365-2764-1': np.array([0.2622, 0.844]), '4109-638-1': np.array([0.0524, 0.1576]), '2058-56-1': np.array([0.0751, 0.248]), '3642-2459-1': np.array([0.1907, 0.608]), '3999-1391-1': np.array([0.3911, 1.2480]), '2607-1448-1': np.array([0.0430, 0.1310])} cepheids = {'2365-2764-1': np.array([0.959, 2.09]), '4109-638-1': np.array([0.705, 2.385]), '2058-56-1': np.array([1.222, 1.333]), '3642-2459-1': np.array([1.088, 2.0518]), '3999-1391-1': np.array([1.360, 1.2567]), '2607-1448-1': np.array([1.484, 0.6963])} periods = {'2365-2764-1': 1.61, '4109-638-1': 15.31, '2058-56-1': 63.08, '3642-2459-1': 1.86, '3999-1391-1': 24.98, '2607-1448-1': 8.54} max_periods = max(periods.values()) new_positions_bv_mv = [] # in M_V vs B-V space colors = [] theoretical_position = [] for obj in extinction_coefficients.keys(): # new_positions_bv_mv.append(cepheids[obj]-extinction_coefficients[obj]) new_positions_bv_mv.append(cepheids[obj]) colors.append(periods[obj]/max_periods) theoretical_position.append(-2.78*np.log10(periods[obj])-1.35) for pos in range(len(new_positions_bv_mv)): plt.scatter(new_positions_bv_mv[pos][0], new_positions_bv_mv[pos][1], marker='^', facecolor='w', s=40) plt.scatter(new_positions_bv_mv[pos][0], theoretical_position[pos], marker='o', facecolor='r', s=50) return new_positions_bv_mv, colors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_1d_path(self):\n\n fig = plt.figure(figsize=(8,5))\n \n matches = (self.a_scale == self.c_scale)\n plt.plot(self.a_scale[matches], self.E_coh[matches])\n plt.xlabel('linear deformation coefficient: 0=fcc, 1=bcc')\n plt.ylabel('Cohesive energy (eV/atom)')\n \n return fig", "def plot_hr_diag(hr_df, x='B_V', y='M_V', cutoff=0.2, bvcutoff=0.05):\n plt.figure(figsize=(11., 10.))\n print \"Plotting background stars..\"\n plt.set_cmap('gray_r')\n plt.hist2d(hr_df[x].tolist(), hr_df[y].tolist(), (200, 200), norm=LogNorm(), cmin=10)\n plt.axis([-0.2, 2.35, -3., 7.])\n plt.gca().invert_yaxis()\n plt.xlabel(r'$BT-VT$ (mag)')\n plt.ylabel(r'$M_{VT}$ (mag)') # Plotting M_{VT}\n plt.title(r'$\\sigma_\\pi / \\pi < %s, \\sigma_{BT-VT}< %s$ mag' % (cutoff, bvcutoff))\n print \"..Done\"\n return", "def fig_coh_ph(coh, ph, direc):\n\n colors = plt.cm.cividis(np.linspace(0, 1, coh.shape[0]))\n\n if coh.ndim > 1:\n f, (ax1, ax2) = plt.subplots(1, 2)\n for i, (co, p) in enumerate(zip(coh, ph)):\n ax1.plot(direc, co, c=colors[i])\n ax2.plot(direc, p*180./np.pi, c=colors[i])\n ax1.set_ylabel('Coherence')\n ax1.set_ylim((0, 1.))\n ax2.set_ylabel('Phase')\n ax1.set_xlabel('Angle from H1')\n ax2.set_xlabel('Angle from H1')\n plt.tight_layout()\n\n else:\n plt.figure()\n plt.subplot(121)\n plt.plot(direc, coh, c=colors[0])\n plt.ylim((0, 1.))\n plt.subplot(122)\n plt.plot(direc, ph*180./np.pi, c=colors[0])\n plt.tight_layout()\n\n return plt", "def plotEig(mu, C, axis):\n Q,S = decomposeCov(C)\n axis.arrow(mu[0,0], mu[1,0], .15*Q[0,0], .15*Q[1,0])\n axis.arrow(mu[0,0], mu[1,0], .05*Q[0,1], .05*Q[1,1])", "def plotDihedralEnergy(self, phys, forces, step): \r\n self.plotQuantity(step, phys.app.energies.getTable(4), 'dihedralenergy')", "def plot(self, c='k'):\n plt.plot(self.geometry.convex_hull.exterior.xy[0], self.geometry.convex_hull.exterior.xy[1], c)\n plt.axis('equal')", "def Diag(Fprime, Cprime, E):\n #\n import math\n # Angle for heteronuclear diatonic\n Theta = 0.5 * math.atan(2.0 * Fprime[0, 1] / (Fprime[0, 0] - Fprime[1, 1]))\n # print('Theta', Theta)\n\n Cprime[0, 0] = np.cos(Theta)\n Cprime[1, 0] = np.sin(Theta)\n Cprime[0, 1] = np.sin(Theta)\n Cprime[1, 1] = -np.cos(Theta)\n\n E[0, 0] = Fprime[0, 0] * np.cos(Theta) ** 2 + Fprime[1, 1] * np.sin(Theta) ** 2 + Fprime[0, 1] * np.sin(2.0 * Theta)\n E[1, 1] = Fprime[1, 1] * np.cos(Theta) ** 2 + Fprime[0, 0] * np.sin(Theta) ** 2 - Fprime[0, 1] * np.sin(2.0 * Theta)\n\n if (E[1, 1] <= E[0, 0]):\n Temp = E[1, 1]\n E[1, 1] = E[0, 0]\n E[0, 0] = Temp\n Temp = Cprime[0, 1]\n Cprime[0, 1] = Cprime[0, 0]\n Cprime[0, 0] = Temp\n Temp = Cprime[1, 1]\n Cprime[1, 1] = Cprime[1, 0]\n Cprime[1, 0] = Temp\n return", "def dendogram(self):\r\n \r\n plt.figure(figsize=(20, 7))\r\n dendrogram = sch.dendrogram(sch.linkage(self.X, method='ward'))\r\n plt.title(\"Dendograms\")\r\n plt.axhline(linestyle='--', y=5) \r\n plt.show()", "def draw_rh_lines(data):\n #hnd = extract_right_hand(data);\n hnd = np.array(data['crop']);\n hand.draw_hand_lines(hnd,data['rhkpss'][data['i']]);\n return hnd;", "def plot_dmd(self):\n n_modes = 10\n U = self.uf\n # put the decomposition axis last\n UT = U.transpose(0, 2, 1)\n # create the matrix of snapshots by flattening the non\n # decomp axes so we have a 2d array where we index the\n # decomp axis like snapshots[:,i]\n snapshots = UT.reshape((-1, UT.shape[-1]))\n\n # remove nans\n # TODO: remove nans by interpolation earlier on\n snapshots[np.where(np.isnan(snapshots))] = 0\n\n modes, ritz_values, norms \\\n = mr.compute_DMD_matrices_snaps_method(snapshots, range(n_modes))\n\n # as array, reshape to data dims\n reshaped_modes = modes.A.T.reshape((-1,) + UT.shape[:-1])\n\n fig, ax = plt.subplots(nrows=3)\n c0 = self.mean_velocity_Uf(ax[0])\n\n ax[1].set_title('First mode of DMD')\n ax[1].set_xlabel('time after front passage')\n ax[1].set_ylabel('height')\n c1 = ax[1].contourf(reshaped_modes[0], 100)\n\n ax[2].set_title('Second mode of DMD')\n ax[2].set_xlabel('time after front passage')\n ax[2].set_ylabel('height')\n # TODO: why does reshaped_modes seem to have a list of\n # duplicates?\n # Seems to be complex conjugates - why is this??\n c2 = ax[2].contourf(reshaped_modes[2], 100, levels=c1.levels)\n\n fig.colorbar(c0, ax=ax[0], use_gridspec=True)\n fig.colorbar(c1, ax=ax[1], use_gridspec=True)\n fig.colorbar(c2, ax=ax[2], use_gridspec=True)\n\n fig.tight_layout()\n\n return fig", "def plot_eccentricity(self, z=0):\n p = figure(\n title=\"Cut in plane Z=\" + str(z),\n x_axis_label=\"X axis\",\n y_axis_label=\"Y axis\",\n )\n for j in range(0, self.ntheta):\n p.circle(self.xre[z][j], self.yre[z][j], color=\"red\")\n p.circle(self.xri[z][j], self.yri[z][j], color=\"blue\")\n p.circle(0, 0, color=\"blue\")\n p.circle(self.xi, self.yi, color=\"red\")\n p.circle(0, 0, color=\"black\")\n return p", "def plot_phase_diagram(self):\n t_max = np.log(max(self.temperatures))\n d_min = np.log(min(self.distortions))\n y_axis = [np.log(i) - d_min for i in self.distortions]\n x_axis = [t_max - np.log(i) for i in self.temperatures]\n\n plt.figure(figsize=(12, 9))\n plt.plot(x_axis, y_axis)\n\n region = {}\n for i, c in list(enumerate(self.n_eff_clusters)):\n if c not in region:\n region[c] = {}\n region[c]['min'] = x_axis[i]\n region[c]['max'] = x_axis[i]\n for c in region:\n if c == 0:\n continue\n plt.text((region[c]['min'] + region[c]['max']) / 2, 0.2,\n 'K={}'.format(c), rotation=90)\n plt.axvspan(region[c]['min'], region[c]['max'], color='C' + str(c),\n alpha=0.2)\n plt.title('Phases diagram (log)')\n plt.xlabel('Temperature')\n plt.ylabel('Distortion')\n plt.show()", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def showCl(ell,temps,title='CAMB ISWout power spectrum'):\n plt.plot(ell,temps*ell*(ell+1)/(2*np.pi) *1e12) #1e12 to convert to microK**2\n plt.xlabel('multipole moment l')\n plt.ylabel('l(l+1)C_l/(2pi) [microK**2]')\n plt.title(title)\n plt.show()", "def show_dcr_results(dg):\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n # print(df_dsp.describe()) \n\n # compare DCR and A/E distributions\n fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n elo, ehi, epb = 0, 25000, 100\n \n # aoe distribution\n # ylo, yhi, ypb = -1, 2, 0.1\n # ylo, yhi, ypb = -0.1, 0.3, 0.005\n ylo, yhi, ypb = 0.05, 0.08, 0.0005\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p0.hist2d(df_dsp['trapEmax'], df_dsp['aoe'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n # p0.set_xlabel('Energy (uncal)', ha='right', x=1)\n p0.set_ylabel('A/E', ha='right', y=1)\n\n # dcr distribution\n # ylo, yhi, ypb = -20, 20, 1 # dcr_raw\n # ylo, yhi, ypb = -5, 2.5, 0.1 # dcr = dcr_raw / trapEmax\n # ylo, yhi, ypb = -3, 2, 0.1\n ylo, yhi, ypb = 0.9, 1.08, 0.001\n ylo, yhi, ypb = 1.034, 1.0425, 0.00005 # best for 64.4 us pz\n # ylo, yhi, ypb = 1.05, 1.056, 0.00005 # best for 50 us pz\n # ylo, yhi, ypb = 1.016, 1.022, 0.00005 # best for 100 us pz\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p1.hist2d(df_dsp['trapEmax'], df_dsp['dcr'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n p1.set_xlabel('Energy (uncal)', ha='right', x=1)\n p1.set_ylabel('DCR', ha='right', y=1)\n \n # plt.show()\n plt.savefig(f'./plots/dcr_cyc{cycle}.png', dpi=300)\n plt.cla()", "def scree_plot(self, ev):\n plt.scatter(range(1,len(ev)+1), ev)\n plt.plot(range(1,len(ev)+1), ev)\n plt.title(\"Scree Plot\")\n plt.xlabel(\"Factors\")\n plt.ylabel(\"Eigenvalue\")\n plt.grid()\n plt.show()", "def display_energy_levels_0d(diagram, num_atoms, atoms, h_poly):\n h = eval_hamiltonian(num_atoms, h_poly, (1, 1))\n\n e, v = eigensystem(h)\n\n left = 0\n bottom = 0\n right = max([len(row) for row in diagram.split('\\n')])\n top = len(diagram.split('\\n'))\n\n plot_rows = numpy.ceil(math.sqrt(num_atoms+1))\n plot_cols = plot_rows\n\n for i in range(num_atoms):\n matplotlib.pyplot.subplot(plot_rows, plot_cols, i+1, axisbg=\"#000000\")\n y = [atom[0] for atom in atoms]\n x = [atom[1] for atom in atoms]\n c = numpy.abs(v[i]*v[i])\n\n matplotlib.pyplot.title('E = %f' % numpy.real(e[i]), fontsize = 10)\n norm = matplotlib.colors.Normalize(vmin = min(c),\n vmax = max(0.0001, max(c)))\n #x = [0,0,1,1]\n #y = [0,1,0,1]\n #c = [1,2,3,4]\n matplotlib.pyplot.hexbin(x, y, C = c,\n gridsize = (right-left, top-bottom),\n extent = (left, right, bottom, top),\n cmap = matplotlib.pyplot.get_cmap(\"gray\"),\n norm = norm\n )\n\n matplotlib.pyplot.subplot(plot_rows, plot_cols, num_atoms+1)\n matplotlib.pyplot.scatter(num_atoms*[0], e, s = 0.1)", "def rhombic_dodecahedron(self):\n v = [ [1, 1, 1], [1, 1, -1], [1, -1, 1], [1, -1, -1], [-1, 1, 1], \n [-1, 1, -1], [-1, -1, 1], [-1, -1, -1], [0, 0, 2], [0, 2, 0],\n [2, 0, 0], [0, 0, -2], [0, -2, 0], [-2, 0, 0] ]\n return Polyhedron(vertices = v)", "def energy_kde_paperplot(fields,df):\n plt.figure()\n i = 0\n colorList = ['dodgerblue','tomato']\n lw = 2\n\n meanE_2 = []\n meanE_3 = []\n mup = np.min(df['energy [eV]']) - pp.mu\n chi_0 = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(fields[0]))\n g_en_axis, _, _, _, _, _, _, _, _, _, _, _, _, _ = \\\n occupation_plotter.occupation_v_energy_sep(chi_0, df['energy [eV]'].values, df)\n plt.plot(g_en_axis - np.min(df['energy [eV]']), np.zeros(len(g_en_axis)), '-', color='black', lineWidth=lw,label='Equilibrium')\n\n for ee in fields:\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n # meanE_2 = utilities.mean_energy(chi_2_i,df)\n g_en_axis, g_ftot, g_chiax, g_f0ax, _, _, _, _, _, _, _, _,_,_ = \\\n occupation_plotter.occupation_v_energy_sep(chi_2_i, df['energy [eV]'].values, df)\n plt.plot(g_en_axis - np.min(df['energy [eV]']), g_chiax,'--',color = colorList[i],lineWidth=lw,label=r'Low Field {:.0f} '.format(ee/100)+r'$V \\, cm^{-1}$')\n print(integrate.trapz(g_chiax,g_en_axis))\n\n # plt.plot(meanE_2-np.min(df['energy [eV]']),0,'.')\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n g_en_axis, g_ftot, g_chiax, g_f0ax, _, _, _, _, _, _, _, _,_,_ = \\\n occupation_plotter.occupation_v_energy_sep(chi_3_i, df['energy [eV]'].values, df)\n plt.plot(g_en_axis - np.min(df['energy [eV]']), g_chiax,color = colorList[i],lineWidth=lw,label=r'Full Drift {:.0f} '.format(ee/100)+r'$V \\, cm^{-1}$')\n print(integrate.trapz(g_chiax,g_en_axis))\n\n i = i + 1\n # plt.plot(g_en_axis - np.min(df['energy [eV]']), g_f0ax, '--', color='black', lineWidth=lw,label=r'$f_0$')\n\n plt.legend()\n # plt.ylim([-0.02, 0.015])\n plt.xlabel(r'Energy above CBM ($eV$)')\n plt.ylabel(r'Deviational occupation $\\delta f_{\\mathbf{k}}$ (norm.)')\n # plt.ylabel(r'$\\delta f_{\\mathbf{k}}/f_{\\mathbf{k}}^0$')\n plt.savefig(pp.figureLoc+'energy_KDE.png', bbox_inches='tight',dpi=600)\n\n plt.figure()\n plt.plot(g_en_axis,g_chiax)\n\n plt.figure()\n Z, xedges, yedges = np.histogram2d(df['kx [1/A]']*chi_3_i,df['ky [1/A]']*chi_3_i)\n plt.pcolormesh(xedges, yedges, Z.T)\n\n from scipy.stats.kde import gaussian_kde\n g_inds,_,_ = utilities.gaas_split_valleys(df,False)\n g_df = df.loc[g_inds]\n\n x = g_df['kx [1/A]']*(chi_3_i[g_inds]+g_df['k_FD'])\n y = g_df['ky [1/A]']*(chi_3_i[g_inds]+g_df['k_FD'])\n\n # y = g_df['energy [eV]']*(chi_3_i[g_inds]+g_df['k_FD'])\n k = gaussian_kde(np.vstack([x, y]))\n xi, yi = np.mgrid[x.min():x.max():x.size ** 0.5 * 1j, y.min():y.max():y.size ** 0.5 * 1j]\n zi = k(np.vstack([xi.flatten(), yi.flatten()]))\n\n fig = plt.figure(figsize=(7, 8))\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n\n # alpha=0.5 will make the plots semitransparent\n ax1.pcolormesh(xi, yi, zi.reshape(xi.shape), alpha=0.5)\n ax2.contourf(xi, yi, zi.reshape(xi.shape), alpha=0.5)\n\n ax1.set_xlim(x.min(), x.max())\n ax1.set_ylim(y.min(), y.max())\n ax2.set_xlim(x.min(), x.max())\n ax2.set_ylim(y.min(), y.max())", "def interactive_hess(gr,g):\n def plot(size=100):\n fig,ax = plt.subplots()\n fig.set_size_inches(8,6)\n ax.hexbin(gr, g, gridsize=size, bins='log', cmap='inferno', label=\"Relative stellar density\")\n ax.set_title(\"HESS DIAGRAM, gridsize={0:d}\".format(size), fontsize = 15)\n ax.set_xlabel(r\"$g-r$\",fontsize = 25)\n ax.set_ylabel(r\"$g$\",fontsize = 25)\n ax.legend(loc='upper left')\n ax.set_ylim(ax.get_ylim()[::-1])\n plt.show()\n interact(plot, size=(50,300,1),continuous_update=False);", "def plot_1D_edp(self, start=(-10,25), end=(30,-20), N=100):\n rho = []\n x0, z0 = start\n x1, z1 = end\n xpoints = np.linspace(x0, x1, N)\n zpoints = np.linspace(z0, z1, N)\n for x, z in zip(xpoints, zpoints):\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n dist = np.sqrt((x-x0)**2 + (z-z0)**2)\n rho.append([dist, tmp.sum(axis=0)])\n rho = np.array(rho, float)\n X = rho[:,0]\n Y = rho[:,1]\n plt.figure()\n plt.plot(X, Y)", "def keynesian_cross(T, I, G, C):\n # The data vector to be plotted for production and aggregate expenditure:\n Y_arrey = np.linspace(0,300)\n PE_arrey = (C * (Y_arrey - T) + I + G)\n degree = Y_arrey\n\n # The figure\n fig = plt.figure(figsize=(10,5))\n ax = fig.add_subplot(1,1,1)\n\n ax.plot(Y_arrey, degree, label=\"45-degree line\", color='lightblue',linewidth=3)\n ax.plot(Y_arrey, AD_arrey, label=\"AD=C+I+G+NX\", color='darkorange',linewidth=3)\n\n ax.set_xlabel(\"Y\")\n ax.set_ylabel(\"PE\")\n ax.legend(loc=\"upper left\")\n\n ax.grid()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n return", "def plot_dist(z, dz, om, dom, dist, dh, name, mathname, filename=None):\n # Grid of redshift and matter density values.\n x, y = numpy.meshgrid(z, om)\n pylab.figure(figsize=(5.5,4.5)) \n pylab.imshow(dist/dh, \n extent=(z.min() - dz/2., \n z.max() + dz/2.,\n om.max() + dom/2.,\n om.min() - dom/2., \n ),\n interpolation='nearest',\n aspect = z.max()/om.max(),\n cmap = cm.Spectral,\n )\n cb = pylab.colorbar()\n cb.ax.set_ylabel(r'$' + mathname + '/D_H$')\n\n pylab.contour(x, y, dist/dh, 10, colors='k')\n pylab.xlim(z.min(), z.max())\n pylab.ylim(om.min(), om.max()) \n pylab.xlabel(\"redshift z\")\n pylab.ylabel(r\"$\\Omega_M = 1 - \\Omega_\\lambda$\")\n pylab.title(name)\n if filename is not None:\n prefix, extension = filename.split('.')\n pylab.savefig(prefix + '_' + mathname + '.' + extension,\n bbox_inches=\"tight\")", "def plot_specific_discharge(self, spdis, head=None, kstep=1,\n hstep=1, normalize=False, **kwargs):\n if 'pivot' in kwargs:\n pivot = kwargs.pop('pivot')\n else:\n pivot = 'middle'\n\n if 'ax' in kwargs:\n ax = kwargs.pop('ax')\n else:\n ax = self.ax\n\n if isinstance(spdis, list):\n print(\"Warning: Selecting the final stress period from Specific\"\n \" Discharge list\")\n spdis = spdis[-1]\n\n if self.mg.grid_type == \"structured\":\n ncpl = self.mg.nrow * self.mg.ncol\n\n else:\n ncpl = self.mg.ncpl\n\n nlay = self.mg.nlay\n\n qx = np.zeros((nlay * ncpl))\n qz = np.zeros((nlay * ncpl))\n ib = np.zeros((nlay * ncpl), dtype=bool)\n\n idx = np.array(spdis['node']) - 1\n\n # check that vertex grid cross sections are not arbitrary\n # within a tolerance!\n if self.mg.grid_type != 'structured':\n pts = self.pts\n xuniform = [True if abs(pts.T[0, 0] - i) < 1\n else False for i in pts.T[0]]\n yuniform = [True if abs(pts.T[1, 0] - i) < 1\n else False for i in pts.T[1]]\n if not np.all(xuniform):\n if not np.all(yuniform):\n err_msg = \"plot_specific_discharge does not \" \\\n \"support aribtrary cross sections\"\n raise AssertionError(err_msg)\n\n if self.direction == 'x':\n qx[idx] = spdis['qx']\n elif self.direction == 'y':\n qx[idx] = spdis['qy']\n else:\n err_msg = 'plot_specific_discharge does not ' \\\n 'support arbitrary cross-sections'\n raise AssertionError(err_msg)\n\n qz[idx] = spdis[\"qz\"]\n ib[idx] = True\n\n if self.mg.grid_type == \"structured\":\n qx.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol)\n qz.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol)\n ib.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol)\n\n if isinstance(head, np.ndarray):\n zcentergrid = self.__cls.set_zcentergrid(head)\n else:\n zcentergrid = self.zcentergrid\n\n if nlay == 1:\n x = []\n z = []\n for k in range(nlay):\n for i in range(self.xcentergrid.shape[1]):\n x.append(self.xcentergrid[k, i])\n z.append(0.5 * (zcentergrid[k, i] + zcentergrid[k + 1, i]))\n x = np.array(x).reshape((1, self.xcentergrid.shape[1]))\n z = np.array(z).reshape((1, self.xcentergrid.shape[1]))\n else:\n x = self.xcentergrid\n z = zcentergrid\n\n u = []\n v = []\n ibx = []\n xedge, yedge = self.mg.xyedges\n for k in range(self.mg.nlay):\n u.append(plotutil.cell_value_points(self.xpts, xedge,\n yedge, qx[k, :, :]))\n v.append(plotutil.cell_value_points(self.xpts, xedge,\n yedge, qz[k, :, :]))\n ibx.append(plotutil.cell_value_points(self.xpts, xedge,\n yedge, ib[k, :, :]))\n u = np.array(u)\n v = np.array(v)\n ibx = np.array(ibx)\n x = x[::kstep, ::hstep]\n z = z[::kstep, ::hstep]\n u = u[::kstep, ::hstep]\n v = v[::kstep, ::hstep]\n ib = ibx[::kstep, ::hstep]\n\n # upts and vpts has a value for the left and right\n # sides of a cell. Sample every other value for quiver\n u = u[:, ::2]\n v = v[:, ::2]\n ib = ib[:, ::2]\n\n else:\n # kstep implementation for vertex grid\n projpts = {key: value for key, value in self.__cls.projpts.items()\n if (key // ncpl) % kstep == 0}\n\n # set x and z centers\n if isinstance(head, np.ndarray):\n # pipe kstep to set_zcentergrid to assure consistent array size\n zcenters = self.__cls.set_zcentergrid(np.ravel(head), kstep=kstep)\n else:\n zcenters = [np.mean(np.array(v).T[1]) for i, v\n in sorted(projpts.items())]\n\n u = np.array([qx[cell] for cell in sorted(projpts)])\n\n if self.direction == \"x\":\n x = np.array([np.mean(np.array(v).T[0]) for i, v\n in sorted(projpts.items())])\n else:\n x = np.array([np.mean(np.array(v).T[1]) for i, v\n in sorted(projpts.items())])\n\n z = np.ravel(zcenters)\n v = np.array([qz[cell] for cell\n in sorted(projpts)])\n ib = np.array([ib[cell] for cell\n in sorted(projpts)])\n\n x = x[::hstep]\n z = z[::hstep]\n u = u[::hstep]\n v = v[::hstep]\n ib = ib[::hstep]\n\n if normalize:\n vmag = np.sqrt(u ** 2. + v ** 2.)\n idx = vmag > 0.\n u[idx] /= vmag[idx]\n v[idx] /= vmag[idx]\n\n # mask with an ibound array\n u[~ib] = np.nan\n v[~ib] = np.nan\n\n quiver = ax.quiver(x, z, u, v, pivot=pivot, **kwargs)\n\n return quiver", "def plot_cf(self, **options):\n n = len(self.hs)\n xs = np.arange(-n//2, n//2)\n hs = np.roll(self.hs, len(self.hs) // 2)\n plt.plot(xs, hs.real, label='real', **options)\n plt.plot(xs, hs.imag, label='imag', **options)\n plt.legend()", "def plot_derivatives_divided(self, show=False):\n\n fig, ax = plt.subplots(3, 2, figsize = (15, 10))\n # plt.subplots_adjust(wspace = 0, hspace = 0.1)\n plt.subplots_adjust(hspace=0.5)\n training_index = np.random.randint(self.n_train * self.n_p)\n \n if self.flatten:\n print ('Plotting derivatives... reshaping the flattened data to %s'%str(input_shape))\n # TODO\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n print ('Plotting derivatives... reshaping the flattened data to power spectra')\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n # temp has shape (num_params, ncombinations, len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n \n # Cl has shape (1,10) since it is the data vector for the \n # upper training image for both params\n labels =[r'$θ_1$ ($\\Omega_M$)']\n\n # we loop over them in this plot to assign labels\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[0, 0].plot(ells, Cl[i],label=labels[i])\n else:\n ax[0, 0].loglog(ells, ells*(ells+1)*Cl[i],label=labels[i])\n ax[0, 0].set_title('One upper training example, Cl 0,0')\n ax[0, 0].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[0, 0].set_ylabel(r'$C_\\ell$')\n else:\n ax[0, 0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n ax[0, 0].legend(frameon=False)\n\n if self.flatten:\n # TODO\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n # temp has shape (num_params, ncombinations, len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[1, 0].plot(ells, Cl[i])\n else:\n ax[1, 0].loglog(ells, ells*(ells+1)*Cl[i])\n ax[1, 0].set_title('One lower training example, Cl 0,0')\n ax[1, 0].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[1, 0].set_ylabel(r'$C_\\ell$')\n else:\n ax[1, 0].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data[\"x_m\"][training_index].reshape(len(theta_fid),*input_shape)\n xm, ym = temp.T[:,0]\n\n temp = self.data[\"x_p\"][training_index].reshape(len(theta_fid),*input_shape)\n xp, yp = temp.T[:,0]\n else:\n temp = self.data['x_m'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_lower = temp[:,0,:]\n temp = self.data['x_p'][training_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_upper = temp[:,0,:]\n\n for i in range(Cl_lower.shape[0]):\n ax[2, 0].plot(ells, (Cl_upper[i]-Cl_lower[i])/self.Cl_noiseless)\n ax[2, 0].set_title('Difference between upper and lower training examples');\n ax[2, 0].set_xlabel(r'$\\ell$')\n ax[2, 0].set_ylabel(r'$\\Delta C_\\ell$ / $C_{\\ell,thr}$')\n ax[2, 0].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[2, 0].set_xscale('log')\n\n # also plot sigma_cl / CL\n sigma_cl = np.sqrt(self.covariance)\n ax[2, 0].plot(ells, sigma_cl/self.Cl_noiseless, label=r'$\\sigma_{Cl} / C_{\\ell,thr}$')\n ax[2, 0].legend(frameon=False)\n\n test_index = np.random.randint(self.n_p)\n\n if self.flatten:\n # TODO\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n \n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[0, 1].plot(ells, Cl[i])\n else:\n ax[0, 1].loglog(ells, ells*(ells+1)*Cl[i])\n ax[0, 1].set_title('One upper test example Cl 0,0')\n ax[0, 1].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[0, 1].set_ylabel(r'$C_\\ell$')\n else:\n ax[0, 1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),*input_shape)\n x, y = temp.T[:,0]\n else:\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl = temp[:,0,:] # plot the (0,0) autocorrelation bin\n\n for i in range(Cl.shape[0]):\n if self.rescaled:\n ax[1, 1].plot(ells, Cl[i])\n else:\n ax[1, 1].loglog(ells, ells*(ells+1)*Cl[i])\n ax[1, 1].set_title('One lower test example Cl 0,0')\n ax[1, 1].set_xlabel(r'$\\ell$')\n if self.rescaled:\n ax[1, 1].set_ylabel(r'$C_\\ell$')\n else:\n ax[1, 1].set_ylabel(r'$\\ell(\\ell+1) C_\\ell$')\n\n if self.flatten:\n # TODO\n temp = self.data[\"x_m_test\"][test_index].reshape(len(theta_fid),*input_shape)\n xm, ym = temp.T[:,0]\n\n temp = self.data[\"x_p_test\"][test_index].reshape(len(theta_fid),*input_shape)\n xp, yp = temp.T[:,0]\n else:\n temp = self.data['x_m_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_lower = temp[:,0,:]\n temp = self.data['x_p_test'][test_index].reshape(len(theta_fid),ncombinations,len(ells))\n Cl_upper = temp[:,0,:]\n \n for i in range(Cl_lower.shape[0]):\n ax[2, 1].plot(ells, (Cl_upper[i]-Cl_lower[i]) / self.Cl_noiseless)\n ax[2, 1].set_title('Difference between upper and lower test samples');\n ax[2, 1].set_xlabel(r'$\\ell$')\n ax[2, 1].set_ylabel(r'$\\Delta C_\\ell$ / $C_{\\ell,thr}$')\n ax[2, 1].axhline(xmin = 0., xmax = 1., y = 0.\n , linestyle = 'dashed', color = 'black')\n ax[2, 1].set_xscale('log')\n\n # also plot sigma_cl / CL\n sigma_cl = np.sqrt(self.covariance)\n ax[2, 1].plot(ells, sigma_cl/self.Cl_noiseless, label=r'$\\sigma_{Cl} / C_{\\ell,thr}$')\n\n plt.savefig(f'{self.figuredir}derivatives_visualization_divided_{self.modelversion}.png')\n if show: plt.show()\n plt.close()", "def plot_IVS(self, parent_figure=None):\n nivs = len(FD.figure_AllIVs)\n cprint(\"c\", \"plot_IVS.\")\n rows = nivs\n cols = 5\n height = 1.5 * nivs\n width = 8.5\n PD = PData()\n ymin = -125.0\n ymax = 40.0\n calx = 120.0\n\n self.P = PH.regular_grid(\n rows,\n cols,\n order=\"rowsfirst\",\n figsize=(width, height),\n showgrid=False,\n verticalspacing=0.01,\n horizontalspacing=0.05,\n margins={\n \"bottommargin\": 0.1,\n \"leftmargin\": 0.07,\n \"rightmargin\": 0.05,\n \"topmargin\": 0.08,\n },\n labelposition=(-0.05, 1.06),\n parent_figure=parent_figure,\n # panel_labels=['A', 'B', 'C', 'D', 'E', 'F'],\n )\n cellpath = config[\"cellDataDirectory\"]\n png_path = Path(config[\"baseDataDirectory\"], config[\"pngDirectory\"])\n cprint(\"c\", \"prepping fo run\")\n\n for rax, iv in enumerate(FD.figure_AllIVs.keys()):\n cprint(\"r\", f\"Doing Cell VCN_c{iv:02d} -----------------------------------\")\n celln = Path(png_path, f\"VCN_c{iv:02d}.png\")\n if celln.is_file(): # add images from png files\n img = mpimg.imread(str(celln))\n self.P.axarr[rax, 0].imshow(img, aspect=\"equal\")\n ylim = self.P.axarr[rax, 0].get_ylim()\n self.P.axarr[rax, 0].set_xlim(900, 1500)\n PH.noaxes(self.P.axarr[rax, 0])\n # plot 3 dendrite decorations\n for iax, dendmode in enumerate([\"passive\", \"normal\", \"active\"]):\n dendm = self.get_dendmode(dendmode)\n sfi = Path(\n cellpath,\n f\"VCN_c{iv:02d}\",\n \"Simulations\",\n \"IV\",\n FD.figure_AllIVs[iv][dendm],\n )\n if not sfi.is_dir():\n cprint(\"r\", f\"Unable to find dir: {str(sfi):s}\")\n continue\n fn = list(sfi.glob(\"*\"))\n sfi = Path(sfi, fn[0])\n if rax > 0:\n calx = None # only one cal bar on this plot, top row.\n self.parent.PLT.plot_traces(\n self.P.axarr[rax, iax + 1],\n sfi,\n PD,\n protocol=\"IV\",\n ymin=ymin,\n ymax=ymax,\n iax=iax,\n figure=self.P.figure_handle,\n ivaxis=self.P.axarr[rax, 4], # accumulate IV's in right side\n ivcolor=colors[iax],\n iv_spike_color=spike_colors[dendmode],\n spike_marker_size=1.5,\n spike_marker_color=spike_colors[dendmode],\n calx=calx,\n caly=-10.0,\n )\n if rax == 0:\n self.P.axarr[rax, iax + 1].set_title(dendmode)\n if iax == 0:\n self.P.axarr[rax, 0].text(-0.1, 0.5, str(iv))\n if parent_figure is None:\n fig = FigInfo()\n fig.P = self.P\n fig.filename = f\"Fig_M1A_Supplemental.pdf\"\n timestamp_str = datetime.datetime.now().strftime(\"%Y-%m-%d-%H:%M\")\n fig.title[\n \"title\"\n ] = f\"SBEM Project Figure 1 Modeling (Supplemental A) ({timestamp_str:s})\"\n return fig\n else:\n return self.P", "def _plot_ecdf(self, numerator_name, denominator_name):\n x = self.ecdf[numerator_name][denominator_name]['x']\n y = self.ecdf[numerator_name][denominator_name]['y']\n\n lower_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-self.confidence_level)))]\n median = x[y.index(min(y, key=lambda x:abs(x-0.5)))]\n upper_bound = x[y.index(min(y,\n key=lambda x:\n abs(x-(1-self.confidence_level))))]\n\n sns.lineplot(x=x, y=y)\n ci = 1 - self.confidence_level\n title = ('Median Lift was {0:.2%}, with a '\n '{1:.0%} CI of [{2:.2%}, {3:.2%}]'.format(median,\n ci,\n lower_bound,\n upper_bound))\n title = self._format_title(title)\n plt.title(title)\n plt.xlabel('Lift')\n plt.ylabel('Cumulative Probability')\n plt.axvline(x=lower_bound, linestyle='dotted', color='black')\n plt.axvline(x=median, linestyle='dotted', color='black')\n plt.axvline(x=upper_bound, linestyle='dotted', color='black')\n sns.despine(left=True)\n locs, labels = plt.xticks()\n labels = self._format_axis_as_percent(locs, labels)\n plt.xticks(locs, labels=labels)", "def plot(\n ecg, \n sample_rate = 500, \n title = 'ECG 12', \n lead_index = lead_index, \n lead_order = None,\n style = None,\n columns = 2,\n row_height = 6,\n show_lead_name = True,\n show_grid = True,\n show_separate_line = True,\n ):\n\n if not lead_order:\n lead_order = list(range(0,len(ecg)))\n secs = len(ecg[0])/sample_rate\n leads = len(lead_order)\n rows = ceil(leads/columns)\n # display_factor = 2.5\n display_factor = 1\n line_width = 0.5\n fig, ax = plt.subplots(figsize=(secs*columns * display_factor, rows * row_height / 5 * display_factor))\n display_factor = display_factor ** 0.5\n fig.subplots_adjust(\n hspace = 0, \n wspace = 0,\n left = 0, # the left side of the subplots of the figure\n right = 1, # the right side of the subplots of the figure\n bottom = 0, # the bottom of the subplots of the figure\n top = 1\n )\n\n fig.suptitle(title)\n\n x_min = 0\n x_max = columns*secs\n y_min = row_height/4 - (rows/2)*row_height\n y_max = row_height/4\n\n if (style == 'bw'):\n color_major = (0.4,0.4,0.4)\n color_minor = (0.75, 0.75, 0.75)\n color_line = (0,0,0)\n else:\n color_major = (1,0,0)\n color_minor = (1, 0.7, 0.7)\n color_line = (0,0,0.7)\n\n if(show_grid):\n ax.set_xticks(np.arange(x_min,x_max,0.2)) \n ax.set_yticks(np.arange(y_min,y_max,0.5))\n\n ax.minorticks_on()\n \n ax.xaxis.set_minor_locator(AutoMinorLocator(5))\n\n ax.grid(which='major', linestyle='-', linewidth=0.5 * display_factor, color=color_major)\n ax.grid(which='minor', linestyle='-', linewidth=0.5 * display_factor, color=color_minor)\n\n ax.set_ylim(y_min,y_max)\n ax.set_xlim(x_min,x_max)\n\n\n for c in range(0, columns):\n for i in range(0, rows):\n if (c * rows + i < leads):\n y_offset = -(row_height/2) * ceil(i%rows)\n # if (y_offset < -5):\n # y_offset = y_offset + 0.25\n\n x_offset = 0\n if(c > 0):\n x_offset = secs * c\n if(show_separate_line):\n ax.plot([x_offset, x_offset], [ecg[t_lead][0] + y_offset - 0.3, ecg[t_lead][0] + y_offset + 0.3], linewidth=line_width * display_factor, color=color_line)\n\n \n t_lead = lead_order[c * rows + i]\n \n step = 1.0/sample_rate\n if(show_lead_name):\n ax.text(x_offset + 0.07, y_offset - 0.5, lead_index[t_lead], fontsize=9 * display_factor)\n ax.plot(\n np.arange(0, len(ecg[t_lead])*step, step) + x_offset, \n ecg[t_lead] + y_offset,\n linewidth=line_width * display_factor, \n color=color_line\n )", "def plot_front(quadrant, ring):\n max_ring = 16\n pos = (quadrant-1)*max_ring + ring-1\n ch = f_ch[:, pos]\n E = f_E[:, pos]\n #if pos in front_special_case:\n # ch = f_ch[1:, pos]\n # E = f_E[1:, pos]\n fig = plt.figure()\n plt.plot(ch, E, color='blue', marker=\"x\", linestyle='None')\n plt.plot(ch, f_gain[pos]*ch+f_offset[pos], color='red')\n plt.title(\"Quadrant {}, ring {}\".format(quadrant, ring))\n plt.xlabel(\"Channel\")\n plt.ylabel(\"E (keV)\")\n #plt.legend([\"Ni, Pb, Sm\", \"Lin. fit\"], loc=0)\n plt.legend([\"Pb, Sm\", \"Lin. fit\"], loc=0)\n fig.set_tight_layout(True)\n plt.show()" ]
[ "0.563348", "0.5631599", "0.5533224", "0.5502739", "0.5487151", "0.5443683", "0.5419953", "0.5403358", "0.5385931", "0.53529364", "0.5345485", "0.53156674", "0.53142434", "0.53062516", "0.5289918", "0.52762717", "0.527185", "0.5271523", "0.524425", "0.52201706", "0.5217703", "0.5190057", "0.5187612", "0.51803994", "0.51727897", "0.51549786", "0.5152855", "0.5112868", "0.51081824", "0.51024467" ]
0.6225962
0
Initializes the connection with IMDb.
def initialize_connection(): session = imdb.IMDb() return session
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setup_connection(cls):\n try:\n cls.imdb_access = imdb.IMDb()\n except imdb.IMDbError, err:\n print \"Problem with connectivity to imdb.com due to %s \" \\\n % (err)", "def initialize():\n\t\tDBHelper.con = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')", "def init(self):\n return self.conn.init()", "def initialize(self):\n if not self.connection.is_closed():\n self.connection.close()\n\n self.connection.connect()", "def init_connection(self, connection):", "def __init__(self):\n self._connection = get_db_connection()", "def __init__(self):\n\n\t\tself.connection = self.get_connection()", "def __init__(self):\n\t\tself.obtainDatabaseConnection()", "def init(self, userdata, conn):\r\n pass", "def __init__(self):\n self.dbcon = DbConnection.get_con()", "def __init__(self):\n self.dbconnect = dbConnection.connection", "def init(self):\n self.db.connect()\n try:\n self.db.create_tables([JambiModel], safe=True)\n JambiModel.create(ref='0')\n self.logger.info('Database initialized')\n except IntegrityError:\n self.logger.info('Database was already initialized')\n self.db.close()", "def bootstrap(self):\n\n self.db = connection_manager.get(DbConnection, host=self.ip, port=3306, user=self.user, password=self.password)\n\n self.connected = True", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def init(self):\n self.conn = None\n\n return True", "def __init__(self):\r\n self.conn = create_connection(DATABASE_PATH)", "def __init__(self, con_uri=None, db_name=\"douyin\"):\n super().__init__()\n self.con_uri = con_uri or 'localhost'\n self.client = AsyncIOMotorClient(self.con_uri)\n self.db = self.client[db_name]", "def init_connection(self, db):\n log.info(\"== Stage 1: Init ==\")\n self.use_db(db)\n self.set_no_binlog()\n self.get_mysql_settings()\n self.init_mysql_version()\n self.sanity_checks()\n self.set_tx_isolation()\n self.set_sql_mode()\n self.enable_priority_ddl()\n self.skip_cache_fill_for_myrocks()\n self.enable_sql_wsenv()\n self.override_session_vars()\n self.get_osc_lock()", "def __init__(self):\n\n self.db = ImageDB()\n self.vitess = VitessConn()\n self.minio = MinioConn()", "def connect(self):\n self.conn.connect()", "def __init__(self, app_database):\n try:\n self.database_configuration = app_database\n self.conn = None\n self.cursor = None\n except Exception as error:\n print(f\"DBCM::__init__::{error}\")", "def init_database(self):\n # init_database(self.engine)", "def __init__(self, dbconnect):\n self.dbconnect = dbconnect", "async def conn(self) -> None:\n self.bot.db = await aiosqlite.connect('database.db')", "def setup(cls):\n super().setup()\n cls.db = DBCommunication()", "def connect(self):\n super(NERDmLoader, self).connect()\n self.lateloadr._client = self._client\n self.lateloadr._db = self._db\n self.relloadr._client = self._client\n self.relloadr._db = self._db", "def __connect(self):\n self.conn = pymysql.connect(self.opts.DB_HOST, self.opts.DB_USER,\n self.opts.DB_PASSWORD, self.opts.DB_NAME)", "def __init__(self, wm) -> None:\n conf_dict = wm.context.config.arango_storage._to_dict()\n\n log.debug(conf_dict)\n client = ArangoClient(hosts=conf_dict['hosts'])\n db = client.db(conf_dict['database'],\n username=conf_dict['username'],\n password=conf_dict['password'])\n\n self.db = db\n self.client = client", "async def init(self):\n self.init_connection_params()\n self._pool = await self._create_pool()\n\n return self", "def init_database(self):\n init_database(self.engine)" ]
[ "0.75613075", "0.7126608", "0.70486325", "0.70344806", "0.6966653", "0.6887101", "0.6868013", "0.6741305", "0.6721468", "0.67194366", "0.6630532", "0.65871525", "0.6585673", "0.6537051", "0.6491186", "0.64337355", "0.6409378", "0.6344977", "0.6321777", "0.63136494", "0.6289494", "0.62547606", "0.62376785", "0.62314355", "0.621757", "0.62145686", "0.62050384", "0.61879784", "0.61844724", "0.6172804" ]
0.75485164
1
Displays a generic error message when there is a connection error.
def display_error(): clear_screen() line = '#' * 20 print(f'{line}\n# CONNECTION ERROR #\n{line}') exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_error(self, conn, msg):\n print(\"ERROR PLACEHOLDER\")\n\n return", "def __display_error(self, socket_error):\r\n\t\tif socket_error == QAbstractSocket.RemoteHostClosedError:\r\n\t\t\tself._window.open_dialog(\"Serveur déconnecté\", \"Le serveur s'est déconnecté !\")\r\n\t\t\t# Add signal to be emitted that pops up a dialog window\r\n\t\telif socket_error == QAbstractSocket.OperationError: # Raised when the socket already is connected\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\tself._window.open_dialog(\"Erreur de connection\",\r\n\t\t\t\t\t\t\t\t\t \"L'erreur suivante est survenue : {}.\".format(self.__tcpSocket.errorString()),\r\n\t\t\t\t\t\t\t\t\t type=\"error\")", "def _connection_failed(self, link_uri, msg):\n print \"Connection to %s failed: %s\" % (link_uri, msg)", "def _connection_failed(self, link_uri, msg):\n print('Connection to %s failed: %s' % (link_uri, msg))", "def _connect_failed(self):\n\t\tself.root.stdout.write(\"Error: Connection Failed!\\n\")\n\t\tself.client = False", "def error(self, msg=None):\n\t\tdebug(\"Connection Error:\", True)\n\n\t\tif msg is not None:\n\t\t\tdebug(msg, True)\n\t\t\n\t\tif self.port is not None:\n\t\t\tself.port.close()\n\t\t\n\t\tself.state = State.Unconnected", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def error_handler(msg):\n print(\"Server Error: %s\" % msg)", "def connection_failed(self, connection, error):\n assert False", "def db_connection_error(error):\n return internal_server_error(error)", "def _connection_failed(self, link_uri, msg):\n\t\tprint \"Connection to %s failed: %s\" % (link_uri, msg)\n\t\tself.is_connected = False", "def display_error(self, message):\n self.ui_widget.display_error(message=message)", "def offline_error():\n\n colored('No available internet connection\\n', 'red')", "def error(self, msg):\n self.send_command('error', {\n 'msg': msg,\n })", "def show_error(title, message, print_message=False):\n\n pass", "def send_server_error(self):\n\n self.send_message(\n Message(\n Codes.SERVER_ERROR,\n { 'message': 'The server has encountered an internal error.' }\n )\n )", "def __connect_failed__(self):\n # Ask the user what to do with the error\n choice = input(\"[A]bort, [C]hange address and port, or [R]etry?\")\n if (choice.lower() == \"a\"):\n exit()\n elif (choice.lower() == \"c\"):\n address = input(\"Please enter the address:\")\n port_number = input(\"Please enter the port:\")", "def onConnectError(self, fetcher, error): #$NON-NLS-1$\r", "def error(self, message):\n print message", "def print_requests_connectionerror(cls, class_name):\n print(\n f\"{cls.ERROR_PREFIX} {cls.REQUESTS_PACKAGE_CONNECTIONERROR_MESSAGE} '{class_name}'.\"\n )", "def error(self, message=None, show_help=True):", "def _on_server_error(server, *_):\n exception = sys.exc_info()[1]\n if isinstance(exception, ConnectionError):\n # These are expected errors when the browser closes the connection.\n return\n # Other errors would be unexpected, so print them.\n traceback.print_exc()", "def error(cls, message):\n print('[ERROR] {0}'.format(message))", "def on_connection_error(self):\n log.error(\"Stream connection has errored or timed out\")", "def error():\n title = session.get('title', 'Error')\n error_message = session.get('error_message', 'An error has occurred.')\n level = session.get('level', 'error')\n logger.error(\"Displaying error to the user\", error_message=error_message, level=level)\n return render_template('errors/error.html', title=title, error_message=error_message, level=level)", "def error(msg):\n click.secho(f'[ERROR] {msg}', fg='red')", "def _on_error(self, error):\n print(error + \" for \" + self.session_name)", "def error(self, error_msg):\n print(\"ERROR DETECTED\")\n print(error_msg)", "async def help_error(self, ctx, error):\n await self.log_error_and_apologize(ctx, error)" ]
[ "0.7256641", "0.70974416", "0.7031372", "0.70112437", "0.69415045", "0.6857179", "0.6755222", "0.6755222", "0.67330766", "0.6692993", "0.66359854", "0.65511245", "0.6525908", "0.6504783", "0.6490381", "0.64878225", "0.64797825", "0.64601153", "0.64406806", "0.64355856", "0.64314383", "0.6419784", "0.6413911", "0.63966876", "0.6359918", "0.6353594", "0.63503057", "0.63500124", "0.63422966", "0.6341983" ]
0.738177
0
Displays the ratings that were scraped.
def display_ratings(ratings): # only attempt to display the ratings if any were found if ratings: print('\n[RATINGS]\n') for rating in ratings: print(f' {rating}', end=' ') # needed to get printing back to normal print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_ratings(all_ratings):\n print(\"Here is the current list of all ratings:\")\n for restaurant, rating in sorted(all_ratings.items()):\n print(f'{restaurant} is rated at {rating}.')", "def display_player_ratings(player_ratings):\r\n print('\\nCLASSEMENT DES PARTICIPANTS:\\n Nom ELO Score')\r\n for i in range(0, len(player_ratings)):\r\n print(players_table.get(doc_id=player_ratings[i][0])['Nom'],\r\n players_table.get(doc_id=player_ratings[i][0])['ELO'],\r\n player_ratings[i][1])", "def get_ratings(self):\n return self.ratings", "def get_ratings(self):\n return self.ratings", "def check_ratings(self):\n\n self.browser.get('https://www.imdb.com/')\n\n for title in self.titles:\n input_bar = self.browser.find_element_by_id('navbar-query')\n input_bar.clear()\n\n input_bar.send_keys(title)\n input_bar.send_keys(Keys.RETURN)\n\n time.sleep(3)\n\n # Click on the first suggestion\n css_selector = \"div.findSection:nth-child(3) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(2) > a:nth-child(1)\"\n self.browser.find_element_by_css_selector(css_selector).click()\n time.sleep(3)\n\n # Pull details that will always be available\n score = str(self.browser.find_element_by_class_name('ratingValue').text)\n score = score.split('/10')[0].replace(',', '.')\n\n time.sleep(3)\n\n summary = str(self.browser.find_element_by_class_name('summary_text').text)\n subtext = str(self.browser.find_element_by_class_name('subtext').text)\n\n # Pull details that differ between movies and series\n try:\n duration = str(self.browser.find_element_by_class_name('bp_sub_heading').text) # Only for series\n if 'episodes' not in duration:\n duration = 'Some episodes'\n except Exception:\n # bp_sub_heading won't be found on a movie page\n duration = 'movie'\n\n if subtext[0].isdigit():\n # Split up the details from the subtext\n subtext_list = subtext.split(' | ')\n else:\n # Some movies' subtext starts with 'R' / 'PG-13'\n subtext_list = subtext.split(' | ')\n del subtext_list[0]\n\n # Duration\n if duration == 'movie':\n show_type = 'Movie'\n duration = subtext_list[0]\n try:\n year = datetime.datetime.strptime(subtext_list[2].split(' (')[0], '%d %B %Y').strftime('%Y')\n except ValueError:\n year = str(subtext_list[2].split(' (')[0][-4:])\n\n else: # series\n show_type = 'Serie'\n # Retrieve last season and its release date\n season_tab = str(self.browser.find_element_by_class_name('seasons-and-year-nav').text).strip()\n\n numbers = re.findall('[0-9]+', season_tab)\n latest_season = int(numbers[0])\n latest_year = int(max(numbers, key=lambda x: int(x)))\n\n duration += ' (%d Seasons in %d), %s per episode' % (latest_season, latest_year, subtext_list[0])\n\n year = re.findall('[0-9]+', subtext_list[2])[0]\n\n # Pull some more data out from the subtext\n genres = subtext_list[1].split(', ')\n\n # Pull details that are not always available\n creds_list = []\n creds = self.browser.find_elements_by_class_name('credit_summary_item')\n for c in creds:\n temp = str(c.text)\n if '|' in temp:\n temp = temp.split('|')[0]\n\n creds_list.append(temp)\n\n self.data_dict[title] = {\n 'score': score,\n 'summary': summary,\n 'duration': duration,\n 'credits': creds_list,\n 'genres': genres,\n 'released': year,\n 'type': show_type,\n }", "def printJudgeRatings(self):\n\n try:\n judgeNotesLogger.info(\"printJudgeRatings: Printing out judge ratings from '%s'\\n\", self.notesFile)\n\n # Print Normal List First.\n for ratingTuple in self.judgedSongList:\n if ratingTuple[0][2] != \"\":\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\", \"(\"+ratingTuple[0][2]+\")\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"/10]\\n\")\n else:\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"/10]\\n\")\n\n # Print Special List Second.\n for ratingTuple in self.specialSongList:\n if ratingTuple[0][2] != \"\":\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\", \"(\"+ratingTuple[0][2]+\")\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"]\\n\")\n else:\n print(\"SONG:\", ratingTuple[0][0], \"{\"+ratingTuple[0][1]+\"}\",\n \"\\nRATING:\", \"[\"+str(ratingTuple[1])+\"]\\n\")\n \n except:\n judgeNotesLogger.warning(\"printJudgeRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def get_rating(self):\n self.rating = imdb.get_title_ratings(self.ID)['rating']", "def findRatings():\n if request.method == 'POST':\n connector = appEngine.connect()\n rating = int(request.form['rating'])\n joinTable = connector.execute(\"SELECT movie.movieName, actor.actorName, rating.rating FROM movie INNER JOIN rating ON movie.movieID=rating.movie_ID INNER JOIN movie_actor ON movie.movieID=movie_actor.movie_ID INNER JOIN actor ON movie_actor.actor_ID=actor.actorID WHERE rating.rating >= (?);\", (rating))\n result = {'data': [dict(zip(tuple(joinTable.keys()), i)) for i in joinTable.cursor]}\n return result\n return render_template('rating_search.html')", "def printRawRatings(self):\n\n try:\n judgeNotesLogger.info(\"printRawRatings: Retrieving Raw Ratings from '%s'\\n\", self.notesFile)\n sortedRatings = sorted(self.ratingsRaw.keys(), key=float)\n for rating in sortedRatings:\n print(\"[\"+str(rating)+\"/10]:\"+str(self.ratingsRaw[rating]))\n ratingSum = self.getRatingSum()\n sortedRatings = sorted(self.specialRatingsRaw.keys(), key=str.lower)\n for rating in sortedRatings:\n print(\"[\"+str(rating)+\"]:\"+str(self.specialRatingsRaw[rating]))\n print(\"TOTAL:\"+str(round(ratingSum, 1)))\n print(\"JUDGEDFILES:\"+str(self.numJudgedFiles))\n print(\"SPECIALFILES:\"+str(self.numSpecialFiles))\n print(\"TOTALFILES:\"+str(self.numTotalFiles))\n print(\"AVERAGE:\"+str(round(self.average, 2))+\"\\n\")\n\n except:\n judgeNotesLogger.warning(\"printRawRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def printRatingsToSongs(self):\n judgeNotesLogger.info(\"printRatingsToSongs: Printing songs for each rating parsed\")\n try:\n\n # Print out normal ratings first.\n sortedRatings = sorted(self.ratingsToSongs.keys(), key=float)\n for rating in sortedRatings:\n print(\"\") # For neater printing. Newline still occurs here\n songsInRating = self.ratingsToSongs[rating]\n print(\"[\"+str(rating)+\"/10]\")\n for song in songsInRating:\n if song[2] != \"\":\n print(\"-->\", song[0], \"{\"+song[1]+\"}\", \"(\"+song[2]+\")\")\n else:\n print(\"-->\", song[0], \"{\"+song[1]+\"}\")\n\n # Print out special ratings after.\n sortedRatings = sorted(self.specialRatingsToSongs.keys(), key=str.lower)\n for rating in sortedRatings:\n print(\"\") # For neater printing. Newline still occurs here\n songsInRating = self.specialRatingsToSongs[rating]\n print(\"[\"+str(rating)+\"]\")\n for song in songsInRating:\n if song[2] != \"\":\n print(\"-->\", song[0], \"{\"+song[1]+\"}\", \"(\"+song[2]+\")\")\n else:\n print(\"-->\", song[0], \"{\"+song[1]+\"}\")\n \n print(\"\") # For neater printing. Newline still occurs here\n except:\n judgeNotesLogger.warning(\"printRatingsToSongs: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def review_rating(self, soup):\n logging.info('Getting hotel review rating.')\n reviews_rating = {}\n if soup.select_one('div.scores_full_layout') is None:\n logging.error('Cant get extended rating.')\n reviews_rating = {}\n else:\n for review_rating in soup.select_one('div.scores_full_layout').findAll(\n 'li', {\"class\": \"clearfix\"}):\n rating_class = review_rating.find(\"p\", {\"class\": \"review_score_name\"}).text.strip()\n rating_score = review_rating.find(\"p\", {\"class\": \"review_score_value\"}).text.strip()\n reviews_rating[rating_class] = rating_score\n\n return reviews_rating", "def enterRating():\n if request.method == 'POST':\n movieName = request.form['movieName']\n username = request.form['userName']\n rating = request.form['rating']\n comment = request.form['comment']\n post([movieName, username, rating, comment])\n return render_template('rating_enter.html')", "def print_recommendations(self):\n\n rec_vector = self.generate_recommendation()\n\n print(\"Recommendations for user {} \".format(self.username))\n\n for ranking, subreddit_name in enumerate(rec_vector, 1):\n print(\"{}.: {}\".format(ranking, subreddit_name))\n\n if ranking%10 == 0 and ranking!=0:\n check_if_move_on = True\n print(\"\\nType c and press enter for the next 10 subreddits.\\n\")\n print(\"Type q and press enter to return to main menu.\\n\")\n\n while check_if_move_on:\n choice = input()\n\n if choice == 'c':\n break\n\n elif choice == 'q':\n break\n\n else:\n print(\"Not a valid entry, please enter again.\")\n\n # break the whole thing if they want to quit\n if choice == 'q':\n break", "def all(self):\n ratings = []\n for i in range (1, self.pages()+1):\n ratings.extend(self.page(i))\n \n self._set_attrs_to_values({'ratings': ratings})\n return ratings", "def get_ratings(self):\n df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)\n df = IoManager.scale_ratings(df)\n df = IoManager.normalize_ratings_per_archetype(df)\n df = self.add_ratings_sum(df)\n # print(df[[\"name\", \"monogreen\", \"simic_ramp\", \"general\"]].tail(60))\n # print(df[[\"name\", \"general\"]].sort_values(ascending=False, by=\"general\").head(50))\n return df", "def ratings(self):\n session = Session.object_session(self)\n return session.query(Rating).join(Section).filter(Section.professor == self).all()", "def all_prods(request):\n products = Product.objects.all()\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': products,\n 'stars': stars\n }\n return render(request, \"products.html\", context)", "def all_ratings(self):\n\n for u, u_ratings in iteritems(self.ur):\n for i, r in u_ratings:\n yield u, i, r", "def ratings(self, ratings):\n\n self._ratings = ratings", "def ratings(self, ratings):\n\n self._ratings = ratings", "def rating_review(catalog):\n reviews = list()\n errors = 0\n for ix, page in enumerate(catalog.iloc[:, 0], 1):\n try:\n soup_2 = fetch(page, \"\").find_all(\"div\", {\"class\": \"col-xs-16 review_container\"})\n for comment in soup_2:\n comment_text = comment.find_all(\"div\", {\"class\": \"the_review\"})[0].text.strip()\n icon = str(comment.find_all(\"div\")[0])\n if \"fresh\" in icon:\n reviews.append('1 - ' + comment_text)\n elif \"rotten\" in icon:\n reviews.append('0 - ' + comment_text)\n except:\n errors += 1\n print('\\r4/4 — {:.2%} of reviews scraped. Error rate: {:.2%}'.format(ix/len(catalog),\n errors/ix), end=' ')\n print('\\r{} reviews successfully scraped. Error rate: {:.2%}'.format(\n len(reviews)-errors, errors/ix), end='\\n')\n return reviews", "def get_rating(text):\n movie = text\n page = requests.get('http://www.imdb.com/find?ref_=nv_sr_fn&q=' + movie + '&s=tt')\n soup1 = BeautifulSoup(page.content, 'html.parser')\n movieid = soup1.select(\".findList tr a\")[0].get('href')\n movielink = \"http://www.imdb.com\" + movieid\n mlinkpage = requests.get(movielink)\n soup2 = BeautifulSoup(mlinkpage.content, 'html.parser')\n movierating = soup2.select(\".ratingValue span\")[0].text\n metascore = soup2.select(\".metacriticScore\")\n reviewlink = movielink + 'reviews'\n linkpage = requests.get(reviewlink)\n soup3 = BeautifulSoup(linkpage.content, 'html.parser')\n \n return soup3, movierating", "def get_reviews(review_url):\n print review_url\n html = urllib.urlopen(review_url).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n\n rating_scores = soup.findAll(\"span\", \"ratingScore\")\n num_ratings = len(rating_scores) - 1\n\n current_reviews = soup.findAll(\"div\", \"currentVintageProfessinalReviews\")\n num_cur_reviews = str(current_reviews).count('ratingProvider')\n past_reviews = soup.findAll(\"ul\", \"pastVintagesProfessionalReviews\")\n num_past_reviews = str(past_reviews).count('ratingProvider')\n\n print 'There are {0} reviews for prior vintages of this wine.'.format(num_past_reviews)\n print 'There are {0} current reviews for this vintage.\\n'.format(num_cur_reviews)\n\n rating_provider = soup.findAll(\"span\", \"ratingProvider\")\n rating_score = soup.findAll(\"span\", \"ratingScore\")\n reviewers = re.findall('(?<![A-Z])[>]([A-Z]+(?![A-Z]))', str(rating_provider))\n ratings = re.findall('(?<![A-Z])[0-9]{2}(?![A-Z])', str(rating_score))\n\n print \"Ratings List:\", ratings\n print \"Current Reviews: \", num_cur_reviews\n\n currentreviews = []\n for j in range(num_cur_reviews):\n print \"Current Review #\"+str(j+1)+\":\", reviewers[j], ratings[j]\n currentreviews.append((reviewers[j], ratings[j]))\n print currentreviews\n\n print \"\\nPast Reviews: \", num_past_reviews\n past_review_ratings = []\n for k in range(num_cur_reviews, num_past_reviews+num_cur_reviews):\n #print \"Past Review #\"+str(k-num_cur_reviews+1)+\":\", reviewers[k], int(ratings[k])\n past_review_ratings.append(float(ratings[k]))\n if k > 30:\n break\n if num_past_reviews != 0:\n avg_past_reviews = sum(past_review_ratings)/len(past_review_ratings)\n round(avg_past_reviews, 2)\n else:\n avg_past_reviews = 0\n\n print \"Average of Past Reviews: \", avg_past_reviews\n\n return currentreviews, avg_past_reviews", "def show_movie_profile(movie_id):\n\n # movie object given a movie_id\n movie = Movie.query.filter_by(movie_id=movie_id).first()\n\n # list of all rating objects for a given movie_id ordered by user_id\n sorted_ratings = Rating.query.filter_by(movie_id=movie_id).order_by('user_id').all()\n\n return render_template(\"movie_profile.html\", movie=movie, ratings=sorted_ratings)", "def display_round_matches(player_ratings):\r\n print('\\nMATCHES DE LA RONDE: ')\r\n for i in range(0, len(player_ratings), 2):\r\n print(players_table.get(doc_id=player_ratings[i][0])['Nom'], \"(BLANCS)\",\r\n \"contre\",\r\n players_table.get(doc_id=player_ratings[i+1][0])['Nom'], \"(NOIRS)\")", "def results():\n df = mdr.elo_ratings.sort_values([\"elo\", \"matches\"], ascending=False)\n df[\"score\"] = (1 / (1 + 10**((1500 - df[\"elo\"]) / 400))) * 100\n df = df[[\"items\", \"score\", \"matches\"]]\n\n html_formatted = df.to_html(classes=[\"table\", \"table-dark\", \"table-hover\"], index=False, float_format=\"%.1f\")\n\n return render_template('results.html', table=html_formatted)", "def show_recommendation_pool(self, top_n=None):\n i = 0\n if top_n is None:\n top_n = self.number_of_recommendations\n\n for _, rdata in self.recommendation_pool.items():\n print(\"\\n{R.movie_id} - {R.title} - {R.genres}\".format(\n R=rdata['movie_obj']))\n\n if 'title_similarity' in rdata:\n print(\" Title Similarity: {} - ({})\".format(\n rdata['title_similarity'], rdata['movie_obj'].title))\n\n if 'genres_similarity' in rdata:\n print(\" Genres Similarity: {} - ({})\".format(\n rdata['genres_similarity'], rdata['movie_obj'].genres))\n\n if 'tags_similarity' in rdata:\n print(\" Tags Similarity: {} - ({})\".format(\n rdata['tags_similarity'], rdata['tags']))\n\n if 'final_similarity' in rdata:\n print(\" -> Final Similarity: {}\".format(\n rdata['final_similarity']))\n\n i += 1\n if top_n and i >= top_n:\n break", "def output(self):\n print \"Name:\", self.name\n print \"City:\", self.city\n print \"Country:\", self.country\n print \"Number of Reviews:\", len(self.sentiments)\n print \"Old Reviews (Stars):\", self.stars_avg\n print \"Old Reviews (%):\", self.stars_avg/5\n print \"New Rating (Stars)\", self.new_rating*5\n print \"New Rating (%):\", self.new_rating", "def movie_list():\n\n movies = Movie.query.order_by('title').join(Rating).all()\n # movies = Movie.query.options(db.joinedload('rating')).order_by('title').all()\n\n return render_template(\"movie_list.html\", movies=movies)", "def load_ratings(self):\n logging.debug(\"Loading ratings data...\")\n\n # loading ratings\n data=requests.get(self.__URL_RATINGS)\n self.__dataframe_ratings=pd.DataFrame(data.json())\n # calculate implicit and explicit ratings\n # XXX use a function to calculate implicit rating considering the video lead time\n self.__dataframe_ratings['rating_implicit'] = (self.__dataframe_ratings['video_watch_time']/100) * 0.3\n self.__dataframe_ratings['rating_explicit'] = (self.__dataframe_ratings['rating_value']) * 0.7\n\n # create a new column to put implicit or explicit rating value\n self.__dataframe_ratings['overall_rating_value'] = self.__dataframe_ratings['rating_implicit'] + self.__dataframe_ratings['rating_explicit']\n\n logging.debug(\"Ratings data loaded! n=%s\" % self.__dataframe_ratings.shape[0])\n\n return self.__dataframe_ratings" ]
[ "0.6716487", "0.66836923", "0.6439657", "0.6439657", "0.64141905", "0.6388619", "0.6368018", "0.635137", "0.62618804", "0.6109449", "0.61091423", "0.609001", "0.6076961", "0.60705274", "0.6063568", "0.60551393", "0.60547274", "0.60209185", "0.60183233", "0.60183233", "0.6004603", "0.59918994", "0.591316", "0.5913026", "0.5902307", "0.58399844", "0.5830568", "0.5813931", "0.57905143", "0.57540613" ]
0.74975514
0
Scrapes the plot from the provided URL.
def get_plot(url): soup = get_soup(url.rsplit('/', 1)[0]) if soup: # scrape the plot section plot_div = soup.find('div', {'id': 'titleStoryLine'}) # fixes bug were no plot is found try: plot_class = plot_div.find('span', {'itemprop': 'description'}) plot = plot_class.text.strip() return ' '.join(plot.split()) except AttributeError: return 'The plot was not available.' else: display_error()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def scrape_page(session, url):\n logging.info('Scraping %s', url)\n images = await get_url_images(session, url)\n await save_url_images(images)", "def scrape_url(url):\n html = requests.get(url).text\n return scrape_html(html)", "def fn_GetMoviePlot(self, details):\n\n # If the custom url was not actually defined and we had no cached\n # data, then there is nothing to do.\n #\n if details is None:\n return\n\n dom = parseString(details)\n d = dom.firstChild\n self.plot = get_child_data(d, \"plot\", self.plot)\n dom.unlink()", "def joblib_read_img_url(url):\n\n from matplotlib.image import imread\n fd = urlopen(url, timeout=10)\n return imread(io.BytesIO(fd.read()))", "def get_data(self, url):\n # Initialize the button that needs to be pressed to get download the data\n button = None\n # While this button is of type 'None' we reload the browser\n while button is None:\n try:\n # Navigate to the URL\n self.go_to_url(url)\n # Sleep the code by the defined time plus a random number of seconds between 0s and 2s. This should\n # reduce the likelihood that Google detects us as a scraper\n time.sleep(self.sleep + 2 * np.random.rand(1))\n # Try to find the button and click it\n line_chart = self.browser.find_element_by_css_selector(\n \"widget[type='fe_line_chart']\")\n button = line_chart.find_element_by_css_selector(\n '.widget-actions-item.export')\n button.click()\n except exceptions.NoSuchElementException:\n # If the button cannot be found, try again (load page, ...)\n pass\n # After downloading, wait again to allow the file to be downloaded\n time.sleep(self.sleep)\n # Load the data from the csv-file as pandas.DataFrame object\n data = pd.read_csv(self.filename, skiprows=1)\n # Set date as index:\n if 'Day' in data.columns:\n data.Day = pd.to_datetime(data.Day)\n data = data.set_index(\"Day\")\n frequency = 'Daily'\n elif 'Week' in data.columns:\n data.Week = pd.to_datetime(data.Week)\n data = data.set_index(\"Week\")\n frequency = 'Weekly'\n else:\n data.Month = pd.to_datetime(data.Month)\n data = data.set_index(\"Month\")\n frequency = 'Monthly'\n # Sleep again\n time.sleep(self.sleep)\n # Delete the file\n while os.path.exists(self.filename):\n try:\n os.remove(self.filename)\n except:\n pass\n return data, frequency", "def do_get(self, url):\n self.driver.get(url)", "def get(self, url):\n self.browser.get(url)", "def webScraper(self):\n try:\n self.covid_df = pd.read_csv(self.COVID_URL)\n except:\n sys.exit('COVID data is unavailable at source.')\n \n latest_date = self.covid_df['date'].max()\n earliest_date = self.covid_df['date'].min()\n self.covid_df = self.covid_df[self.covid_df['date'] == self.date.strftime('%Y-%m-%d')]\n \n if self.covid_df.empty:\n exit_string = 'Requested date not available. Latest date available is ' + latest_date + ' while earliest is ' + earliest_date\n sys.exit(exit_string)\n else:\n self.covid_df = self.covid_df[self.covid_df['location'] != 'World']\n \n try:\n self.countries_centroids = pd.read_html(self.CENTROIDS_URL, header=0, index_col='country')[0]\n except:\n sys.exit('Central coordinates data for countries unavailable from Google developers.')\n \n try:\n self.geo_data = requests.get(self.GEOJSON_URL).json()\n except:\n sys.exit('GeoJSON data unavailable to draw country polygons.')", "def parse(self, url):\n pass", "def scrape(url):\n logger.debug('[SCRAPER]\\t Loading url: %s', url)\n try:\n html_page = urlopen(url).read()\n except (http.client.IncompleteRead, urllib.error.URLError):\n logger.warning(\"[SCRAPER]\\t Could not read the page for url: %s\", url)\n return ''\n logger.debug('[SCRAPER]\\t Parsing with BS')\n soup = BeautifulSoup(html_page, 'html5lib')\n data = soup.findAll('p')\n data = [p.get_text().replace('\\n', '').replace('\\t','') for p in data]\n\n if not data:\n logger.warning('[SCRAPER]\\t No data found for url: %s', url)\n else:\n logger.debug('[SCRAPER]\\t [%s]: \\n %s', url, data)\n\n return ' '.join(data) if data else ''", "def run(self, url=''):\n if url:\n webbrowser.open(url)", "def get_plot(session_id, test_name):\n return Plot.get_plot(session_id, test_name)", "def load(self, url):\n pass", "def load(self, url):\n pass", "def get(self, url: str):\n\n self.driver.get(url)", "def plot_race(url):\n #hey, thanks again for these functions!\n idrace = id_from_url(url)\n xml = get_poll_lxml(idrace) \n colors = plot_colors(xml)\n\n if len(colors) == 0:\n return\n \n #really, you shouldn't have\n result = race_result(url)\n \n poll_plot(idrace)\n plt.xlabel(\"Date\")\n plt.ylabel(\"Polling Percentage\")\n for r in result:\n plt.axhline(result[r], color=colors[_strip(r)], alpha=0.6, ls='--')", "def loadComponentFromURL( cUrl, tProperties=() ):\n StarDesktop = getDesktop()\n oDocument = StarDesktop.loadComponentFromURL( cUrl, \"_blank\", 0, tProperties )\n return oDocument", "def loadComponentFromURL( cUrl, tProperties=() ):\n StarDesktop = getDesktop()\n oDocument = StarDesktop.loadComponentFromURL( cUrl, \"_blank\", 0, tProperties )\n return oDocument", "def GetDataFromURL(self, url):\n try:\n deftimeout = socket.getdefaulttimeout()\n socket.setdefaulttimeout(1)\n try:\n logging.debug('Slide fetching data from %s' % url)\n u = urllib.urlopen(url)\n data = u.read()\n return data\n except:\n logging.exception('Uh oh!')\n return None\n finally:\n socket.setdefaulttimeout(deftimeout)", "def from_url(self) -> PngImagePlugin.PngImageFile:\n response = requests.get(self.url)\n img = Image.open(BytesIO(response.content))\n\n return img", "def fetch_song_data(url):\r\n response = requests.get(url)\r\n return response.text", "def open_url(self, url: str):\n self.driver.get(url)", "def load_data(url: str):\n\n page = requests.get(url=url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup", "def run(self):\n\n for url in self.urls:\n try:\n # Use requests to retrieve web page data\n print(url)\n response = session.get(url, ) # allow_redirects=True)\n\n if response.status_code != 200:\n print('Failed to retrieve page, URL: {0}, error: {1}\\n'.format(url, response.status_code))\n return\n\n # Get web page data from HTML response\n content = get_json_data(response.text)\n\n # Compile data into dictionary to be used for reporting\n summary_data = generate_report(content)\n\n # Generate/print report\n print_report(summary_data)\n\n except Exception as error:\n print('Scraper failed to run for URL {0}, error: {1}, {2}\\n'.format(\n url, type(error).__name__, error\n ))\n\n # time.sleep(1) # for load concerns", "def scrape_url(url):\n r = requests.get(url)\n url_list = get_urls(r.text)\n email_list = get_email_addresses(r.text)\n phone_list = get_phone_numbers(r.text)\n\n print_list('Urls', url_list)\n print_list('Emails', email_list)\n print_list('Phone Numbers', phone_list)", "def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)", "def open_url(self, url):\n\n self.driver.get(url)", "def crawl(self, url):\n return None", "def fetch(self, url, listener, useCache = True): #$NON-NLS-1$\r", "def scrape_url(url):\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]" ]
[ "0.59756243", "0.59229475", "0.5871458", "0.5760079", "0.56895536", "0.5675282", "0.56477475", "0.55973434", "0.5542551", "0.55007184", "0.54510456", "0.54468447", "0.54364306", "0.54364306", "0.54296917", "0.5414543", "0.53710335", "0.53710335", "0.5366538", "0.5339143", "0.5326717", "0.5325852", "0.5311769", "0.5304213", "0.53011084", "0.5289374", "0.5289186", "0.5284501", "0.526057", "0.52544653" ]
0.6619968
0
Cleans up the given comments.
def cleanup_comments(comments): clean_comments = [] if comments: for comment in comments: cleaned_up = sub(r'\n\n {8}\n {8}\n {12}\n {16}\n {16}\n {12}\nEdit', '', comment) clean_comments.append(cleaned_up) return clean_comments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_comments(self):\n new_lines = list()\n for line in self.lines:\n if ((not line.startswith(\"//\")) & (not line.isspace()) &\n (not line.startswith(\"/*\") & (not line.startswith(\"*/\")))):\n line = Parser.strip_line(line)\n new_lines.append(line)\n self.lines = new_lines", "def del_comm(self, blocks=False):\n logging.debug('Delete comments from text')\n if not(self.check()):\n raise GcodeError(\"Invalid g-codes\")\n temp = []\n comment = re.compile(';\\ .*')\n for line in self.blocks:\n n = comment.search(line)\n if n:\n line = line[:n.span()[0]]\n line = line.strip()\n if line != \"\":\n temp.append(line)\n if blocks:\n return temp\n return \"\\n\".join(temp)", "def remove_comments(ls):\r\n for i in range(len(ls)):\r\n ls[i] = re.sub(r'//.*', '', ls[i])\r\n\r\n return ls", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def correct_tokenization(self, comments):\n\t\tself.yap(\"Joining orphaned lines of punctuation...\")\n\t\tcorrected = []\n\t\tfor line in comments:\n\t\t\tif all([w in punct for w in line]):\n\t\t\t\tcorrected[-1] = corrected[-1] + line if corrected else \"\"\n\t\t\telse:\n\t\t\t\tcorrected.append(line)\n\t\t#combine punctuation sequences into a single token\n\t\tself.yap(\"Joining punctuation sequences... \")\n\t\tcorrected = [self.joinPunctuationSequence(c) for c in corrected]\n\t\treturn corrected", "def _removeComments(code):\r\n # remove all occurance streamed comments (/*COMMENT */) from string\r\n text = re.sub(re.compile('/\\*.*?\\*/', re.DOTALL), '', code)\r\n # remove all occurance singleline comments (//COMMENT\\n ) from string\r\n return re.sub(re.compile('//.*?\\n'), '', text)", "def strip_comments(tokens):\n prev_typ = None\n prev_end_col = 0\n for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:\n if typ in (tokenize.NL, tokenize.NEWLINE):\n if prev_typ in (tokenize.NL, tokenize.NEWLINE):\n start_col = 0\n else:\n start_col = prev_end_col\n end_col = start_col + 1\n elif typ == tokenize.COMMENT and start_row > 2:\n continue\n prev_typ = typ\n prev_end_col = end_col\n yield typ, tok, (start_row, start_col), (end_row, end_col), line", "def comments(self, comments):\n\n self.container['comments'] = comments", "def deleteComments(self: Self, event: Event = None) -> None:\n #@+<< deleteComments docstring >>\n #@+node:ekr.20171123135625.37: *3* << deleteComments docstring >>\n #@@pagewidth 50\n #@-<< deleteComments docstring >>\n c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper\n #\n # \"Before\" snapshot.\n bunch = u.beforeChangeBody(p)\n #\n # Initial data.\n head, lines, tail, oldSel, oldYview = self.getBodyLines()\n if not lines:\n g.warning('no text selected')\n return\n # The default language in effect at p.\n language = c.frame.body.colorizer.scanLanguageDirectives(p)\n if c.hasAmbiguousLanguage(p):\n language = c.getLanguageAtCursor(p, language)\n d1, d2, d3 = g.set_delims_from_language(language)\n #\n # Calculate the result.\n changed, result = False, []\n if d1:\n # Remove the single-line comment delim in front of each line\n d1b = d1 + ' '\n n1, n1b = len(d1), len(d1b)\n for s in lines:\n i = g.skip_ws(s, 0)\n if g.match(s, i, d1b):\n result.append(s[:i] + s[i + n1b :])\n changed = True\n elif g.match(s, i, d1):\n result.append(s[:i] + s[i + n1 :])\n changed = True\n else:\n result.append(s)\n else:\n # Remove the block comment delimiters from each line.\n n2, n3 = len(d2), len(d3)\n for s in lines:\n i = g.skip_ws(s, 0)\n j = s.find(d3, i + n2)\n if g.match(s, i, d2) and j > -1:\n first = i + n2\n if g.match(s, first, ' '):\n first += 1\n last = j\n if g.match(s, last - 1, ' '):\n last -= 1\n result.append(s[:i] + s[first:last] + s[j + n3 :])\n changed = True\n else:\n result.append(s)\n if not changed:\n return\n #\n # Set p.b and w's text first.\n middle = ''.join(result)\n p.b = head + middle + tail # Sets dirty and changed bits.\n w.setAllText(head + middle + tail)\n #\n # Set the selection range and scroll position.\n i = len(head)\n j = ins = max(i, len(head) + len(middle) - 1)\n w.setSelectionRange(i, j, insert=ins)\n w.setYScrollPosition(oldYview)\n #\n # \"after\" snapshot.\n u.afterChangeBody(p, 'Indent Region', bunch)", "def comments(self, comments):\n if comments is not None and len(comments) > 1000:\n raise ValueError(\"Invalid value for `comments`, length must be less than or equal to `1000`\") # noqa: E501\n\n self._comments = comments", "def comments(self, comments):\n\n self._comments = comments", "def comments(self, comments):\n\n self._comments = comments", "def comments(self, comments):\n\n self._comments = comments", "def comments(self, comments):\n\n self._comments = comments", "def delete_comments(redditor):\n\n for index, comment in enumerate(redditor.comments.new(limit=None)):\n print(\"Deleting comment {}\".format(index))\n comment.edit(\"-\")\n comment.delete()", "def remove_comments(css):\n log.debug(\"Removing all Comments.\")\n iemac, preserve = False, False\n comment_start = css.find(\"/*\")\n while comment_start >= 0: # Preserve comments that look like `/*!...*/`.\n # Slicing is used to make sure we dont get an IndexError.\n preserve = css[comment_start + 2:comment_start + 3] == \"!\"\n comment_end = css.find(\"*/\", comment_start + 2)\n if comment_end < 0:\n if not preserve:\n css = css[:comment_start]\n break\n elif comment_end >= (comment_start + 2):\n if css[comment_end - 1] == \"\\\\\":\n # This is an IE Mac-specific comment; leave this one and the\n # following one alone.\n comment_start = comment_end + 2\n iemac = True\n elif iemac:\n comment_start = comment_end + 2\n iemac = False\n elif not preserve:\n css = css[:comment_start] + css[comment_end + 2:]\n else:\n comment_start = comment_end + 2\n comment_start = css.find(\"/*\", comment_start)\n return css", "def resolve_empty_comments(tree: dict, empty_comments: List[str]):\n empty_comments_dict = {}\n for id in empty_comments:\n empty_comments_dict[id] = tree[\"comments\"][id]\n tree[\"comments\"].pop(id)\n\n for id, comment in tree[\"comments\"].items():\n parent_id = comment[\"parent_id\"]\n while parent_id in empty_comments:\n parent_id = empty_comments_dict[parent_id][\"parent_id\"][3:]\n comment[\"parent_id\"] = (parent_id if parent_id in tree[\"comments\"]\n else tree[\"id\"])\n\n for i, reply_id in enumerate(comment[\"replies\"]):\n if reply_id in empty_comments:\n del comment[\"replies\"][i]\n\n return tree", "def DropComment(text):\n grp = re.compile(r'/\\*[^/]*\\*/').split(text)\n result = string.join(grp);\n grp = re.compile(r'//.*').split(result);\n result = string.join(grp);\n #result = string.join(result.split('\\n')) #remove the line break\n return(' '+result);", "def comment(self, *comments):\n for comment in comments:\n self._p('[*]', comment)", "def check_comments():\n\n # Get the id of the group track\n try:\n group_track = soundcloud.get('/me/tracks')[config.post_track_id]\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)\n sys.exit(1)\n else:\n raise\n\n # Get the comment list for the group track\n comments = soundcloud.get('/tracks/%d/comments' % group_track.id)\n if not comments:\n logging.info('Nothing found...')\n return\n \n # Process each comment and delete it\n for comment in reversed(comments): \n logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)\n response = None\n \n # Try to process the comment\n try:\n response = process_comment(comment)\n except HTTPError as e:\n if e.response.status_code == 429:\n logging.exception('Failed to repost track: too many requests:')\n return\n elif e.response.status_code // 100 == 4:\n logging.exception('Failed to process comment due to a client request error:')\n else:\n raise\n except Exception as e: # Program crash\n logging.exception('Failed to process comment:')\n else:\n if response:\n logging.info('The comment would have this response: %s', response) \n else:\n logging.info('Comment processed successfully')\n \n # Delete the processed comment\n try:\n soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.warning('Comment already deleted')\n else:\n raise\n\n if config.use_advanced_description and should_update_description:\n update_description()", "def comment_remover(text):\n\n def replacer(match):\n s = match.group(0)\n if s.startswith(\"/\"):\n return \"\"\n else:\n return s\n\n pattern = re.compile(\n r'//.*?$|/\\*.*?\\*/|\\'(?:\\\\.|[^\\\\\\'])*\\'|\"(?:\\\\.|[^\\\\\"])*\"',\n re.DOTALL | re.MULTILINE,\n )\n return re.sub(pattern, replacer, text)", "def remove_comments(ctx, files):\n # CD into Salt's repo root directory\n ctx.cd(CODE_DIR)\n\n # Unfortunately invoke does not support nargs.\n # We migth have been passed --files=\"foo.py bar.py\"\n # Turn that into a list of paths\n _files = []\n for path in files:\n if not path:\n continue\n _files.extend(path.split())\n if not _files:\n utils.exit_invoke(0)\n\n _files = [\n pathlib.Path(fname).resolve() for fname in _files if fname.endswith(\".py\")\n ]\n\n fixes = 0\n exitcode = 0\n comments_regex = re.compile(r\"^# ([I|i])mports? .*(([L|l])ibs?)?\\n\", re.MULTILINE)\n for path in _files:\n contents = path.read_text()\n fixed = comments_regex.sub(\"\", contents)\n if fixed == contents:\n continue\n fixes += 1\n exitcode = 1\n path.write_text(fixed)\n if exitcode:\n utils.error(\"Fixed {} files\", fixes)\n utils.exit_invoke(exitcode)", "def remove_c_style_comments(fd):\n ret = []\n comment_state = False\n for line in fd:\n while True:\n # seems we have nothing left\n if len(line) < 2:\n break\n # we're still inside a comment\n if comment_state:\n idx = line.find(\"*/\")\n if idx > -1:\n line = line[idx + 2:]\n comment_state = False\n continue\n # comment doesn't seem to end on this line\n break\n # we're not inside any comment\n else:\n idx = line.find(\"/*\")\n if idx > -1:\n line = line[idx + 2:]\n comment_state = True\n continue\n if \"//\" in line:\n line = line.split(\"//\", 1)[0]\n # only now we can actually do our job\n line = line.strip()\n if len(line) > 0:\n ret.append(line)\n break\n return ret", "def run_standardize_comments():\n df = pd.read_csv('politics_past_30_months_comments_cleaned.csv')\n df = df.drop(['Unnamed: 0'], axis=1)\n\n standardized_df = standardize_comments(df, 'body')\n print(standardized_df.head())\n print()\n print('original length:', len(df))\n print('standardized length:', len(standardized_df))\n print('removed', len(df) - len(standardized_df), 'comments')\n\n # THIS MIGHT BRING BACK THE UTF-8 ENCODING EMOJIS. MIGHT HAVE TO WRITE TO CSV IN ASCII\n standardized_df.to_csv('politics_past_30_months_comments_cleaned_standardized.csv')", "def issues_comments_undelete(self, mar, request):\n return self.aux_delete_comment(mar, request, False)", "def generic_run_standardize_comments(raw_input_file, clean_output_file):\n df = pd.read_csv(raw_input_file)\n df = df.drop(['Unnamed: 0'], axis=1)\n\n standardized_df = standardize_comments(df, 'body')\n print(standardized_df.head())\n print()\n print('original length:', len(df))\n print('standardized length:', len(standardized_df))\n print('removed', len(df) - len(standardized_df), 'comments')\n\n # THIS MIGHT BRING BACK THE UTF-8 ENCODING EMOJIS. MIGHT HAVE TO WRITE TO CSV IN ASCII\n standardized_df.to_csv(clean_output_file)", "def filter_comments(asm_utf):\n comments = []\n # removes nones\n a = filter(lambda x: x != None, asm_utf)\n # splits on comment token\n comments = [re.split(\";\", line) for line in a]\n # takes only those that have a comment token\n comments = list(filter(lambda x: len(x) > 1, comments))\n # strips the whitespace from those tokens\n comments = [line[1].strip() for line in comments]\n # removes the singleton chars\n comments = list(filter(lambda x: len(x) > 1, comments))\n # regex to remove section markers and extraneous tabs\n # left over by poor reading of files\n comments = [re.sub('([-=].*[-=]|\\t)', '', line) for line in comments]\n comments = list(filter(lambda x: x != '', comments))\n return comments", "def remove_comments(code):\n state = ReadState.NORMAL\n escape = False\n result = ''\n i = 0\n while i < (len(code)):\n c = code[i]\n if state == ReadState.NORMAL:\n if c == '\"':\n state = ReadState.STRING\n escape = False\n if i + 1 < len(code):\n if c + code[i + 1] == '//':\n state = ReadState.SINGLE_COMMENT\n i += 2\n continue\n if c + code[i + 1] == '/*':\n state = ReadState.MULTI_COMMENT\n i += 2\n continue\n result += c\n elif state == ReadState.STRING:\n if escape:\n escape = False\n else:\n if c == '\"':\n state = ReadState.NORMAL\n if c == '\\\\':\n escape = True\n result += c\n elif state == ReadState.SINGLE_COMMENT:\n if c == '\\n':\n state = ReadState.NORMAL\n result += c\n elif state == ReadState.MULTI_COMMENT:\n if i + 1 < len(code):\n if c + code[i + 1] == '*/':\n state = ReadState.NORMAL\n i += 1\n i += 1\n return result", "def process_comments(session, comments):\n for c in tqdm(comments, desc=\"Injecting comments into DB\"):\n db_comment = session.query(Comment).get(c['id'])\n if db_comment:\n db_comment.update(session, **c)\n else:\n Comment.create(session, **c)" ]
[ "0.6856925", "0.64137363", "0.6254925", "0.6239691", "0.6239691", "0.62174577", "0.6139259", "0.5952024", "0.5854225", "0.5823274", "0.5761137", "0.5758094", "0.5758094", "0.5758094", "0.5758094", "0.5737992", "0.573623", "0.5734576", "0.5660582", "0.5650429", "0.56369406", "0.5579267", "0.555296", "0.55428225", "0.55419505", "0.55317813", "0.548162", "0.54573303", "0.544922", "0.54212207" ]
0.7856755
0
Parses the certificates specific to the United States.
def parse_certificates(soup): # removes the first item because it does not needed rating_tags = soup.find_all('a')[1:] rating_codes = [code.string for code in rating_tags] mpaa = [] if rating_codes: for rating in rating_codes: # sorry international folks, only interested in the US ratings if rating.startswith('United States'): mpaa.append(rating) return mpaa
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_country_states(self):\n pass", "def test_addr_country_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_country(input_val)\n self.assertEqual(output_val, self.line.addr_country)", "def test_county_limit_by_state__valid_arg(self):\n response_01 = self.client.get(self.url, {\"state\": \"01\"})\n self.assertEqual(response_01.status_code, 200)\n self.assertEqual(\n \"Autauga County\", response_01.data[\"data\"][0][\"county\"]\n )\n response_AL = self.client.get(self.url, {\"state\": \"AL\"})\n self.assertTrue(response_01.data[\"data\"] == response_AL.data[\"data\"])\n response_DC = self.client.get(self.url, {\"state\": \"DC\"})\n self.assertEqual(len(response_DC.data[\"data\"]), 1)\n response_VA = self.client.get(self.url, {\"state\": \"VA\"})\n self.assertEqual(len(response_VA.data[\"data\"]), 1)\n self.assertEqual(\n \"Accomack County\", response_VA.data[\"data\"][0][\"county\"]\n )", "def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry", "def read_and_load_email_domains():\n\twith open(\"world_universities_and_domains.json\") as json_file:\n\t\traw_json_text = json_file.read()\n\n\traw_universities_json = json.loads(raw_json_text)\n\tuniversity_lookup = {}\n\tfor university in raw_universities_json:\n\t\t# print(university)\n\t\t# input()\n\t\tfor domain in university.get(\"domains\"):\n\n\t\t\tuniversity_summary = {}\n\n\t\t\tif university.get(\"name\"):\n\t\t\t\tuniversity_summary[\"name\"] = university[\"name\"]\n\t\t\tif university.get(\"country\"):\n\t\t\t\tuniversity_summary[\"country\"] = university[\"country\"]\n\t\t\tif university.get(\"alpha_two_code\"):\n\t\t\t\tuniversity_summary[\"alpha_two_code\"] = university[\"alpha_two_code\"]\n\t\t\tif university.get(\"state-province\"):\n\t\t\t\tuniversity_summary[\"state-province\"] = university[\"state-province\"]\n\n\t\t\tuniversity_lookup[domain] = university_summary\n\n\treturn(university_lookup)", "def parsePemList(self, s):\r\n x509List = []\r\n bList = dePemList(s, \"CERTIFICATE\")\r\n for b in bList:\r\n x509 = X509()\r\n x509.parseBinary(b)\r\n x509List.append(x509)\r\n self.x509List = x509List", "def parse(self, s):\r\n\r\n bytes = dePem(s, \"CERTIFICATE\")\r\n self.parseBinary(bytes)\r\n return self", "def seperate_City_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n city = elem[0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append(city)\n return res, list(values)", "def fill_cites(self):\n response = requests.get(\"https://restcountries.eu/rest/v2/all\")\n json_content = json.loads(response.text)\n i = 0\n for t in json_content:\n currency = t[\"currencies\"][0][\"code\"]\n pop = t[\"population\"]\n state_name = t[\"name\"]\n self.cities_from_api[t[\"capital\"].lower()] = [str(state_name), str(currency), str(pop)]", "def new_X509( # pylint: disable=invalid-name\n country_name: str = \"US\",\n state_or_province_name: str = \"New York\",\n locality: str = \"New York\",\n organization_name: str = \"mitm\",\n organization_unit_name: str = \"mitm\",\n common_name: str = \"mitm\",\n serial_number: Optional[int] = None,\n time_not_before: int = 0, # 0 means now.\n time_not_after: int = 1 * (365 * 24 * 60 * 60), # 1 year.\n) -> OpenSSL.crypto.X509:\n\n cert = OpenSSL.crypto.X509()\n cert.get_subject().C = country_name\n cert.get_subject().ST = state_or_province_name\n cert.get_subject().L = locality\n cert.get_subject().O = organization_name\n cert.get_subject().OU = organization_unit_name\n cert.get_subject().CN = common_name\n cert.set_serial_number(serial_number or random.randint(0, 2**64 - 1))\n cert.set_version(2)\n cert.gmtime_adj_notBefore(time_not_before)\n cert.gmtime_adj_notAfter(time_not_after)\n cert.set_issuer(cert.get_subject())\n return cert", "def _get_countries():\n print('-c, -C [country]\\\n \\n [country]=\\\n \\n AR\\t: Argentina\\\n \\n AT\\t: Austria\\\n \\n BR\\t: Brazil\\\n \\n BY\\t: Belarus\\\n \\n CA\\t: Canda\\\n \\n DE\\t: Germany\\\n \\n FR\\t: France\\\n \\n GB\\t: Great Britain\\\n \\n GH\\t: Ghana\\\n \\n HU\\t: Hungary\\\n \\n ID\\t: Indonesia\\\n \\n IL\\t: Israel\\\n \\n JP\\t: Japan\\\n \\n KR\\t: Korea\\\n \\n MA\\t: Morocco\\\n \\n MY\\t: Malaysia\\\n \\n NL\\t: Netherlands\\\n \\n NO\\t: Norway\\\n \\n OM\\t: Oman\\\n \\n PK\\t: Pakistan\\\n \\n RU\\t: Russia\\\n \\n SA\\t: Saudi Arabia\\\n \\n TH\\t: Thailand\\\n \\n TW\\t: Taiwan\\\n \\n UA\\t: Ukraine\\\n \\n US\\t: United States\\\n \\n UY\\t: Uruguay\\\n \\n VE\\t: Venezuela\\\n \\n VN\\t: Vietnam\\\n \\n .....\\n common usage: opengate -c JP')", "def test_valid_country_format(self, cred, country):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'country': country})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def initialize(self):\r\n state_name = self.state\r\n\r\n state_name = state_name.lower()\r\n\r\n response = requests.get(\"https://cdn-api.co-vin.in/api/v2/admin/location/states\") \r\n\r\n if response.ok:\r\n\r\n df = pd.DataFrame(json.loads(response.text)[\"states\"]) \r\n\r\n state = process.extractOne(state_name, df[\"state_name\"].tolist()) # fuzzy match to get best state match \r\n\r\n self.state_id = df.loc[df.state_name == state[0],[\"state_id\"]].values[0][0] \r\n self.load_districts()", "def parse_citystate(self):\n \n index = self.index\n \n if self.words[index]['tag'] != Vocabulary.NAME:\n return None, None, 0, 0\n \n if self.words[index]['word'] == 'mt':\n city = \"mountain\"\n else:\n city = self.words[index]['word']\n start = index\n \n index += 1\n if index == self.length:\n return None, None, 0, 0\n \n if self.words[index]['word'] == ',':\n index += 1\n if index == self.length:\n return None, None, 0, 0\n elif self.words[index]['tag'] == Vocabulary.NAME: \n # Hack\n state, n = self.state_hack(index)\n if n > 0:\n index += n\n return city, state, index - start + 1, index\n \n #if self.words[index]['word'] == 'medical doctor':\n #return city, \"ISO3166-2:US-MD\", index - start + 1, index\n try:\n state = self._state_dict[self.words[index]['word']]\n return city, state, index - start + 1, index\n except:\n city += ' ' + self.words[index]['word']\n index += 1\n if index == self.length:\n return None, None, 0, 0\n \n if self.words[index]['word'] == ',':\n index += 1\n if index == self.length:\n return None, None, 0, 0\n\n # Hack\n state, n = self.state_hack(index)\n if n > 0:\n index += n\n if index == self.length: index -= 1 # Hack\n return city, state, index - start + 1, index\n \n if self.words[index]['tag'] not in [Vocabulary.NAME, Vocabulary.ACRONYM]:\n return None, None, 0, 0\n \n try:\n state = self._state_dict[self.words[index]['word']]\n return city, state, index - start + 1, index\n except: \n return None, None, 0, 0", "def _derive_country_IE(place):\n derived = []\n if _COUNTY_REGEX.search(place.name):\n stripped = _COUNTY_REGEX.sub(\"\", place.name.lower())\n derived += [\"co \" + stripped, \"county \" + stripped]\n\n #\n # Alternative name cases that aren't as straightforward as the above.\n #\n try:\n derived += {\n \"loch garman\": [\"co wexford\"],\n \"uíbh fhailí\": [\"co offaly\"],\n \"maigh eo\": [\"co mayo\"],\n \"an iarmhí\": [\"co westmeath\"],\n }[place.name.lower()]\n except KeyError:\n pass\n\n return [DerivedName(text, \"en\") for text in derived]", "def parse_filename(self, filename:str) -> bool:\n with open(filename, 'r') as xfh:\n data = xfh.read()\n\n xml = etree.fromstring(data)\n for node in xml.xpath('.//a:Detail', namespaces=self.NSMAP):\n cert = Certificates(node)\n self.certificate_records.append(cert)\n self.station_records.setdefault(cert.name, []).append(cert)\n\n return len(self.certificate_records) > 0", "def seperate_City_State_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n v = list(dictionary.values())\n values = []\n res = []\n for i in range(len(keys)):\n state = tmp[i][1].strip()\n city = tmp[i][0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append((state, city))\n values.append(v[i])\n return res, list(values)", "def parse_usa(text: str, state: str) -> tuple:\n pattern = re.compile(\n r'\\\"statistic-module--statistic--QKc9M\\\">.*?'\n r'\\\"statistic-module--title--MZHLl\\\">(.*?)<.*?'\n r'\\\"statistic-module--value--2qXQD.*?\\\">(.*?)<'\n )\n result = pattern.findall(text)\n final_result = [state.capitalize(), -1, -1, -1, -1]\n for i, res in enumerate(result):\n n = res[1].replace(',', '')\n if not n.isdigit():\n continue\n if res[0] == 'Total cases':\n final_result[1] = int(n)\n elif res[0] == 'Recovered':\n final_result[3] = int(n)\n elif res[0] == 'Deaths' or res[0] == 'Total deaths':\n final_result[4] = int(n)\n final_result = tuple(final_result)\n return final_result", "def test_addr_city_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_city(input_val)\n self.assertEqual(output_val, self.line.addr_city)", "def europe_central_asia_countries():\r\n europe_central_asia_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in europe_central_asia:\r\n europe_central_asia_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in europe_central_asia_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def get_covid_stats_by_county(state, county):\n url = \"https://corona.lmao.ninja/v2/jhucsse/counties/\" + county\n response = requests.get(url)\n data = response.json()\n counties = []\n for res in data:\n if res[\"province\"] == state:\n county1 = res[\"county\"]\n updatedAt = res[\"updatedAt\"]\n stats = res[\"stats\"]\n confirmed = stats[\"confirmed\"]\n deaths = stats[\"deaths\"]\n recovered = stats[\"recovered\"]\n counties.append(\n CountyStats(state, county1, updatedAt, confirmed, deaths, recovered)\n )\n # return CountyStats(state,county,updatedAt,confirmed,deaths,recovered)\n return counties", "def clean_embargoed_countries(self):\r\n embargoed_countries = self.cleaned_data[\"embargoed_countries\"]\r\n if not embargoed_countries:\r\n return ''\r\n\r\n error_countries = []\r\n\r\n for country in embargoed_countries.split(','):\r\n country = country.strip().upper()\r\n if not self._is_valid_code(country):\r\n error_countries.append(country)\r\n\r\n if error_countries:\r\n msg = 'COULD NOT PARSE COUNTRY CODE(S) FOR: {0}'.format(error_countries)\r\n msg += ' Please check the list of country codes and verify your entries.'\r\n raise forms.ValidationError(msg)\r\n\r\n return embargoed_countries", "def get_countries():\n call = build_call('attr', 'country')\n return request_data(call)", "def audit_city(osmfile):\r\n suburb_list_wrong = defaultdict(set)\r\n city_file = open(osmfile, encoding=\"utf8\")\r\n \r\n for event, elem in ET.iterparse(city_file, events=(\"start\",)):\r\n \r\n if elem.tag == \"node\" or elem.tag == \"way\":\r\n \r\n for tag in elem.iter(\"tag\"):\r\n \r\n if tag.attrib['k'] == 'addr:city':\r\n \r\n city = tag.attrib['v']\r\n # province = re.sub(\" \", \"\", tag.attrib['v'].strip())\r\n if city not in expected_suburb:\r\n \r\n suburb_list_wrong[city].add(city)\r\n \r\n city_file.close()\r\n return suburb_list_wrong", "def fetch_domain_certs(domain):\n url = BASE_URL.format(domain)\n result = requests.get(url)\n if result.status_code != 200:\n result.raise_for_status()\n return result.json()", "def _derive_country_MX(place):\n lname = place.name.lower()\n derived = []\n match = _PARENTHETICAL.search(lname)\n if match:\n derived.append(_PARENTHETICAL.sub(\"\", lname).strip())\n derived.append(match.group(1).strip())\n\n if _MX_COLONIA.search(place.name):\n derived.append(_MX_COLONIA.sub(\"col\", lname))\n\n if _MX_DELEG.search(place.name):\n derived.append(_MX_DELEG.sub(\"delegación\", lname))\n derived.append(_MX_DELEG.sub(\"del\", lname))\n derived.append(_MX_DELEG.sub(\"deleg\", lname))\n\n if _MX_CIUDAD.search(place.name):\n derived.append(_MX_CIUDAD.sub(\"cd\", lname))\n\n alternative_names = _MX_SUPPORT[\"alternative_names\"][\"es\"]\n try:\n derived += alternative_names[lname]\n except KeyError:\n pass\n\n return [DerivedName(text, \"es\") for text in derived]", "def east_asia_pacific_countries():\r\n east_asia_pacific_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in east_asia_pacific:\r\n east_asia_pacific_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in east_asia_pacific_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def parse_china(text: str) -> list:\n pattern = re.compile(\n r'\\{\\\"provinceName\\\":\\\".*?\\\",'\n r'\\\"provinceShortName\\\":\\\"(.*?)\\\".*?'\n r'\\\"confirmedCount\\\":(.*?),.*?'\n r'\\\"suspectedCount\":(.*?),'\n r'\\\"curedCount\\\":(.*?),'\n r'\\\"deadCount\\\":(.*?),'\n )\n result = pattern.findall(text)\n for i, res in enumerate(result):\n res = list(res)\n res[0] = to_pinyin(res[0]).capitalize()\n result[i] = tuple(res)\n return result", "def test_parser():\n test_list = [\n \"ACIBOE\",\n \"AZOHEC\",\n \"BADJAU\",\n \"ACOLIP\",\n \"QAGWIG\",\n \"GOCBAD\",\n \"BUVYIB01\",\n \"GIRNIH\",\n \"FURVEU\",\n \"GAHJUW\",\n ]\n\n expected = {\n \"ACIBOE\": {\"Zn\": [np.nan]},\n \"AZOHEC\": {\"Zn\": [2]},\n \"BADJAU\": {\"Sc\": [np.nan]},\n \"ACOLIP\": {\"Zn\": [2]},\n \"QAGWIG\": {\"Fe\": [2]},\n \"GOCBAD\": {\"Cu\": [2]},\n \"BUVYIB01\": {\"Fe\": [2]},\n \"GIRNIH\": {\"Cd\": [2]},\n \"FURVEU\": {\"Fe\": [2]},\n \"GAHJUW\": {\"Fe\": [0]},\n }\n\n getoxstates = GetOxStatesCSD(test_list)\n result = getoxstates.run_parsing()\n\n assert expected == result", "def get_domains_from_csr(csr_file):\n logging.info(\"Parsing CSR...\")\n proc = subprocess.Popen([\"openssl\", \"req\", \"-in\", csr_file, \"-noout\", \"-text\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n if proc.returncode != 0:\n raise IOError(\"Error loading {0}: {1}\".format(csr, err))\n return ACMEClient._parse_domains_from_openssl_output(out.decode('utf8'))" ]
[ "0.52510595", "0.5242316", "0.50890213", "0.5040012", "0.49885833", "0.49216333", "0.49208397", "0.49044296", "0.48793742", "0.47967193", "0.4782242", "0.47812676", "0.4777004", "0.47749475", "0.47335306", "0.47322056", "0.47285786", "0.46640974", "0.46594405", "0.46486455", "0.46388897", "0.4636068", "0.46249333", "0.46237603", "0.4608417", "0.45780727", "0.45779064", "0.45614", "0.45545778", "0.4550156" ]
0.54531664
0
Parses the given section.
def parse_section(soup): section_tag = soup.find_all('a', {'class': 'advisory-severity-vote__message'}) section_scale = [code.string for code in section_tag] section = section_scale[0] if section_scale else None section_comment_tags = soup.find_all('li', {'class': 'ipl-zebra-list__item'}) section_comment_list = [comment.text.strip() for comment in section_comment_tags] comments = cleanup_comments(section_comment_list) return section, comments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_section(self, root, fmt):\n return self.parse_tag(root, fmt)", "def parse(self):\n for section in self.sections:\n section.parse()", "def do_section(parser, token, template='parts/section.html', end='endsection'):\n bits = token.split_contents()[1:]\n if len(bits) is 0:\n title, attrs = '', {}\n elif len(bits) is 1:\n title, attrs = bits[0], {}\n elif len(bits) % 2 is 0:\n raise template.TemplateSyntaxError(\"Your attributes don't match up: %s\" % ', '.join(bits[1:]))\n else:\n title = bits[0]\n attrs = dict(zip(bits[1::2], bits[2::2]))\n nodelist = parser.parse((end,))\n parser.delete_first_token()\n return SectionNode(template, title, attrs, nodelist)", "def parse(self, content):\n self._sections = {}\n self._filters = []\n section = None\n\n def error(msg):\n print('autodl.cfg: line {}: {}'.format(i + 1, msg))\n # log('autodl.cfg: line {}: {}'.format(i + 1, msg))\n\n first_prog = re.compile(ur'^\\[\\s*([\\w\\-]+)\\s*(?:([^\\]]+))?\\s*]$')\n second_prog = re.compile(ur'^([\\w\\-]+)\\s*=(.*)$')\n lines = content['data'].split('\\n')\n for line in lines:\n i = 0\n line = line.strip()\n if line == '':\n continue\n\n first_array = first_prog.match(line)\n second_array = second_prog.match(line)\n if line[0] == '#':\n if section:\n section.add_comment(line)\n elif first_array:\n _type = first_array.group(1).strip().lower()\n try:\n _name = first_array.group(2).strip().lower()\n except AttributeError:\n _name = None\n section = self.get_section(_type, _name)\n elif second_array:\n if section is None:\n error('Missing a [section]')\n else:\n _option = second_array.group(1).strip().lower()\n _value = second_array.group(2).strip().lower()\n section.add_option(_option, _value)\n else:\n error('Ignoring line')\n i += 1", "def parse_section(section):\n data = {}\n for line in section.splitlines(False):\n if not line:\n continue\n if not line.startswith(' '):\n # new key/value\n key, _, value = line.partition(': ')\n data[key] = value\n else:\n # continuation of the previous value\n data[key] += line[1:]\n return data", "def parse_create_section(xml_course):\n\n attrs = [\n \"section\",\n 'crn',\n \"start-time\",\n \"end-time\",\n \"meeting-days\",\n \"location\",\n \"section-number\",\n \"instructor\"\n ]\n\n section = pull_attributes_from_xml(xml_course, attrs)\n\n section[\"places\"] = []\n\n # Create Place attribute pointer based on location string\n # Get places from Parse\n places = get_places()[\"results\"]\n # Get location info from section (of form [\"BRK 101\", \"TBA\"])\n all_locations = section[\"location\"].split(\", \")\n # Filter out TBA\n # TODO Maybe do something else with them\n locations = [location for location in all_locations if location != \"TBA\"]\n\n for location in locations:\n building_code = location.split(\" \")[0]\n for place in places:\n if place.get(\"symbol\") and place[\"symbol\"] == building_code:\n section[\"places\"].append(place[\"objectId\"])\n break;\n\n\n return section", "def parse_text(self, text: str) -> SectionDict:", "def _parse_psf_section(psf):\n conv = OplsPsfFile._convert\n line = psf.readline()\n while not line.strip():\n if not line:\n raise CharmmPsfEOF('Unexpected EOF in PSF file')\n else:\n line = psf.readline()\n if '!' in line:\n words = line[:line.index('!')].split()\n title = line[line.index('!')+1:].strip().upper()\n # Strip out description\n if ':' in title:\n title = title[:title.index(':')]\n else:\n raise CharmmPSFError('Could not determine section title')\n if len(words) == 1:\n pointers = conv(words[0], int, 'pointer')\n else:\n pointers = tuple([conv(w, int, 'pointer') for w in words])\n line = psf.readline().strip()\n if not line and title.startswith('NNB'):\n # This will correctly handle the NNB section (which has a spurious\n # blank line) as well as any sections that have 0 members.\n line = psf.readline().strip()\n data = []\n if title == 'NATOM' or title == 'NTITLE' or title == 'NUMLP NUMLPH' or title == 'NUMANISO':\n # Store these four sections as strings (ATOM section we will parse\n # later). The rest of the sections are integer pointers\n while line:\n data.append(line)\n line = psf.readline().strip()\n else:\n while line:\n words = line.split()\n data.extend([conv(w, int, 'PSF data') for w in words])\n line = psf.readline().strip()\n return title, pointers, data", "def parseSection(self, response):\n sel = Selector(response)\n sections = sel.xpath('//table[@class=\"sections responsive\"]//tr[not(@class=\"headers\")]')\n for s in sections:\n item = CourseItem(response.request.meta[\"item\"])\n item['section'] = s.xpath('@data-section-id').get().strip()\n item['instructors'] = s.css('.instructor::text').get()\n if item['instructors'] != None:\n item['instructors'].strip()\n item['instructors'] = [x.strip() for x in re.split(',', item['instructors'])]\n item['syllabus'] = s.css('.syllabus a::attr(href)').get()\n if item['syllabus'] != None:\n item['syllabus'].strip()\n return item\n \n\n \"\"\"\n Ignore the code below this. I was trying to get\n the times, days, and number registered from the class sections\n \"\"\"\n #times = s.xpath('//td[@class=\"time\"]/text()').get().strip()\n #times = re.split('-', times)\n #starttime = times[0]\n #endtime = times[1]\n #endt = dt.datetime.strptime(endtime, '%H:%M%p')\n # TODO: Check if \"am\"/\"pm\" from endt, & if endt hour is greater/less than startt \n #startt = dt.datetime.strptime(starttime, '%H:%M')\n #days = s.xpath('//td[@class=\"days\"]/text()').get().strip()\n #days = re.split(',', days)\n #numdays = len(days]\n \n #cap = s.xpath('//td[@class=\"registered\"]//a/text()').get().strip()\n #cap = re.split(' of ', cap.strip())\n #item['capacity'] = cap[1]", "def section(data):\n if len(data['index']) == 2 and data['index'][1][0].isdigit():\n element = {}\n element['is_section'] = True\n element['section_id'] = '-'.join(data['index'])\n if u\"§§ \" == data['title'][:3]:\n element['is_section_span'] = True\n else:\n element['is_section_span'] = False\n match = SECTION_TITLE_REGEX.match(data['title'])\n element['label'] = match.group(1)\n element['sub_label'] = match.group(2)\n return element", "def visit_section(self, node):\n self.section_level += 1\n self.body.append(self.starttag(node, \"section\"))", "def parse_get_section(xml_course):\n parse_section = parse_create_section(xml_course)\n query_constraints = {\n \"crn\": parse_section[\"crn\"]\n }\n params = urllib.urlencode({\"where\": json.dumps(query_constraints)})\n connection = httplib.HTTPSConnection(PARSE_API_URL, PARSE_API_PORT)\n connection.connect()\n connection.request(\n \"GET\",\n \"%s?%s\" % (SECTIONS_ENDPOINT, params),\n '',\n {\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )\n response = json.loads(connection.getresponse().read())\n if response.get(\"results\"):\n return response[\"results\"][0]\n else:\n return None", "def find_section(amdpar_xml):\n siblings = [s for s in amdpar_xml.itersiblings()]\n\n if len(siblings) == 0:\n return find_lost_section(amdpar_xml)\n\n for sibling in siblings:\n if sibling.tag == 'SECTION':\n return sibling\n\n paragraphs = [s for s in siblings if s.tag == 'P']\n if len(paragraphs) > 0:\n return fix_section_node(paragraphs, amdpar_xml)", "def _get_section_data(self, section):\n \n # unit number\n apt_name = section['name']\n \n try:\n # get the number of bedrooms and bathrooms based \n # on the specific section dictionary\n bedrooms_text = section['bedrooms']['fullValue']\n bathrooms_text = section['bathrooms']['fullValue']\n bedrooms = self._extract_num(bedrooms_text)\n bathrooms = self._extract_num(bathrooms_text)\n except:\n bedrooms, bathrooms = np.nan, np.nan\n\n try:\n # get the square foot area of the unit \n space = float(section['floorSpace']['max'])\n except:\n space = np.nan\n\n try:\n # get the rent price of the unit \n price_text = section['priceRange']['formattedPrice']\n price_text = price_text.replace(',', '') \\\n .replace('$', '')\n price = self._extract_num(price_text)\n except:\n price = np.nan\n \n # construct the section data\n section_data = [\n apt_name,\n bedrooms,\n bathrooms,\n space,\n price,\n ]\n \n return section_data", "def parse_data(self, section):\n data = {}\n # We need to first search down the section to look for where the\n # first TEMPERATURE section starts.\n regex = re.compile(\"^TEMPERATURE\", re.M)\n search = regex.search(section)\n if search is None:\n raise CLIException(\"Failed to find TEMPERATURE, aborting\")\n pos = search.start()\n # Strip extraneous spaces\n meat = \"\\n\".join([s.rstrip() for s in section[pos:].split(\"\\n\")])\n # replace any 2+ \\n with just two\n meat = re.sub(r\"\\n{2,}\", \"\\n\\n\", meat)\n sections = meat.split(\"\\n\\n\")\n for _section in sections:\n lines = _section.split(\"\\n\")\n if lines[0].startswith(\"TEMPERATURE\"):\n parse_temperature(self, self.regime, lines[1:], data)\n elif lines[0].startswith(\"PRECIPITATION\"):\n parse_precipitation(self.regime, lines[1:], data)\n elif lines[0].startswith(\"SNOWFALL\"):\n parse_snowfall(self.regime, lines[1:], data)\n elif lines[0] in [\"SKY COVER\"]:\n parse_sky_coverage(lines, data)\n elif lines[0] in [\"WIND (MPH)\"] and len(lines) > 1:\n parse_wind(lines, data)\n\n return data", "def parse(self, section_dict):\n self.dict = section_dict\n for option in section_dict:\n if option not in self.optionnames:\n print(\"Warning: Unknown option: {:s} in section {:s}\".format(\n option, self.name), file=sys.stderr\n )\n for option, name in zip(self.options, self.optionnames):\n self.dict[name] = option.parse(self)\n return self.dict", "def _section(self, node, offset_mngr):\n infon = self.infon_dict(node)\n type_ = infon.get('type')\n text = self._text(node)\n if not text:\n # Text and annotations at sentence level.\n offset = offset_mngr.start(node)\n text, anno = [], []\n for sent in self._iterfind(node, 'sentence'):\n text.append(self._sentence(sent, offset_mngr))\n anno.extend(self._get_annotations(sent, offset_mngr))\n else:\n # Text and annotations at passage level.\n offset = offset_mngr.update(node, text)\n anno = list(self._get_annotations(node, offset_mngr))\n return type_, text, offset, infon, anno", "def _read_section(self, pointer, nr_of_leads):\n if pointer.id == 1:\n return self._section1(pointer)\n if pointer.id == 2:\n return self._section2(pointer)\n elif pointer.id == 3:\n return self._section3(pointer)\n elif pointer.id == 4:\n return self._section4(pointer)\n elif pointer.id == 5:\n return self._section5(pointer, nr_of_leads)\n elif pointer.id == 6:\n return self._section6(pointer, nr_of_leads)\n elif pointer.id == 7:\n return self._section7(pointer)\n elif pointer.id == 8:\n return self._section8(pointer)\n elif pointer.id == 9:\n return self._section9(pointer)\n elif pointer.id == 10:\n return self._section10(pointer)\n elif pointer.id == 11:\n return self._section11(pointer)\n elif pointer.id == 12:\n return self._section12(pointer)\n elif pointer.id > 12:\n print(\"WARN: Section Id %s is not implemented\" % str(pointer.id))\n return None", "def _parse_section(self, lines_iter, expected_header=None):\r\n if expected_header:\r\n line = lines_iter.next()\r\n if expected_header + ':\\n' != line:\r\n raise ParseError('Expected: \"%s:\". Found: \"%s\"' % (expected_header, line))\r\n n = self._parse_num_items(lines_iter)\r\n relation = defaultdict(list) # Values are lists, to accommodate relations.\r\n for i in xrange(n):\r\n k, _, v = lines_iter.next().partition(' -> ')\r\n if len(v) == 1: # Value on its own line.\r\n v = lines_iter.next()\r\n relation[k].append(v[:-1])\r\n return relation", "def preprocess_section(self, section):\n\n if self.is_google_format(section.content):\n return self._google_preprocessor.preprocess_section(section)\n\n return self._rst_preprocessor.preprocess_section(section)", "def _parse_data(self):\n current_block = []\n current_section = \"docstring\"\n\n # if we get a line that starts with #, this is a new comment or\n # part of a block comment. Otherwise, it means the current block\n # comment has ended.\n\n for this in self.data:\n # Beginning of a new section at top level\n if self.regex_section.findall(this):\n name = self.regex_section.findall(this)[0]\n current_section = name.strip(\":\")\n self.sections[current_section] = \"\".join(current_block)\n current_block = []\n current_section = None\n elif this.startswith(\"#\"): # a comment at top level\n current_block.append(this)\n elif this.strip() == \"\": # an empty line\n # this was the main comment, or an isolated comment\n current_block = []\n else: # a non-empty line to skip\n current_block = []\n\n for key in self._get_expected_sections():\n if key not in self.sections.keys():\n logger.warning(\"section %s not dealt by the parsing function\" % key)", "def makesection(section):\n s = []\n if section is None:\n return s\n try:\n for i in section.split(':'):\n s.append(int(i))\n except Exception as e:\n msg = 'Not able to convet section to list because %s' % e\n raise SpecError(msg)\n return s", "def _parse(self):\n\n self.specification = {}\n\n while True:\n try:\n line = self._lines.current\n if ':' in line:\n self.specification.update(self._parse_spec())\n elif line.startswith('NODE_COORD_SECTION'):\n next(self._lines)\n self.coords = self._parse_coords()\n elif line.startswith('EDGE_WEIGHT_SECTION'):\n next(self._lines)\n self.weights = self._parse_weights()\n elif line.startswith('DISPLAY_DATA_SECTION'):\n next(self._lines)\n self.display = self._parse_coords()\n else:\n break\n except StopIteration:\n break\n\n del self._lines", "def handle_section_import(section):\n for prop in section.properties:\n handle_property_import(prop)\n\n # Make sure properties down the rabbit hole are also treated.\n for sec in section.sections:\n handle_section_import(sec)", "def extract_section(soup, symbol):\n section = []\n\n # assume this is only happens at the end of the file\n if soup.contents[0] == u'\\n':\n return None, [], \"\"\n\n if len(soup.contents) == 2:\n if soup.contents[1].strip() == u'None.':\n # the section is noted as empty, forward to next section\n return soup.nextSibling.nextSibling, [], \"\"\n\n # it's most likely it's here, but not sure. oh well!\n title = soup.contents[0].string\n #print >> sys.stderr, \"SYMBOL:\", symbol, \"[\", title, \"]\"\n\n soup = soup.nextSibling.nextSibling\n\n lines = []\n while soup and len(soup.findAll(text=re.compile(\"[A-Z][a-z]+:\"))) == 0:\n # fix for Examples\n line = [e.strip() for e in soup.recursiveChildGenerator()\n if isinstance(e, unicode)]\n lines.append(' '.join(line))\n soup = soup.nextSibling\n\n if len(lines):\n soup_data = '\\n'.join(lines)\n\n # xml-ish markup fixup\n section = xml_markup_fixup(soup_data)\n\n return soup, section, title", "def parse_section_header(data, elf_header):\n if elf_header[\"shoff\"] == 0:\n print \" No section header\"\n return None\n \n if is64bit(elf_header):\n section_entry_str = section_64_entry_str\n section_entry_spec = section_64_entry_spec\n else:\n section_entry_str = section_32_entry_str\n section_entry_spec = section_32_entry_spec\n \n entry_len = struct.calcsize(section_entry_str)\n entries = {}\n offset = elf_header[\"shoff\"] \n for entry in range(elf_header[\"shnum\"]):\n vals = {}\n if len(data) < offset+entry_len:\n break\n val_data = struct.unpack(section_entry_str, data[offset:offset+entry_len]) \n for i, elem in enumerate(section_entry_spec):\n vals[elem[0]] = val_data[i] \n \n vals[\"flags\"] = get_section_flags(vals[\"flags\"])\n vals[\"type\"] = get_section_type(vals[\"type\"])\n \n entries[entry] = vals\n offset += entry_len\n \n if not entries:\n return {}\n \n sections = assign_section_names(data, entries, elf_header[\"shstrndx\"])\n return sections", "def _getsec(self, line):\n m = sectionheader_re.match(line)\n if not m:\n return\n self.anchors = anchors = []\n self.d[m.group(1)] = anchors\n self.getline = self._getanchor", "def parse_section(config, config_type):\n\n\n if 'also_skip' in config:\n also_skip = config['also_skip'].lower() == 'true' or config['also_skip'].lower() == '1'\n config.pop('also_skip')\n else:\n also_skip = False\n\n if config_type == 'SAVGOL':\n config = parse_savgol(config)\n elif config_type == 'BASELINE':\n config = {}\n elif config_type == 'SNV':\n config = {}\n elif config_type == 'RNV':\n config = parse_rnv(config)\n elif config_type == 'LSNV':\n config = parse_lsnv(config)\n elif config_type == 'TRIM':\n config = parse_trim(config)\n elif config_type == 'DETREND':\n config = parse_detrend(config)\n elif config_type == 'MSC':\n config = {}\n elif config_type == 'EMSC':\n config = parse_emsc(config)\n elif config_type == 'NORML':\n config = parse_norml(config)\n elif config_type == 'CLIP':\n config = parse_clip(config)\n elif config_type == 'SMOOTH':\n config = parse_smooth(config)\n elif config_type == 'RESAMPLE':\n config = parse_resample(config)\n elif config_type == 'DERIVATE':\n config = parse_derivate(config)\n else:\n raise TypeError('Preprocessing option \"{}\" not recognized!'.format(config_type))\n\n if also_skip:\n config['also_skip'] = also_skip\n\n return config", "def get_section(self, section: str, item: str = '') -> Union[dict, str]:\n\n if section.lower() in self.ini_config.sections():\n if item != '':\n return dict(self.ini_config[section.lower()])[item.lower()]\n else:\n return dict(self.ini_config[section.lower()])\n\n else:\n logging.error(f'No {section} in .ini config')\n return {}", "def _read(self, fp, fpname):\n cursect = None # None, or a dictionary\n optname = None\n lineno = 0\n e = None # None, or an exception\n while True:\n line = fp.readline()\n if not line:\n break\n lineno = lineno + 1\n # comment or blank line?\n if line.strip() == '' or line[0] in '#;':\n continue\n if line.split(None, 1)[0].lower() == 'rem' and line[0] in \"rR\":\n # no leading whitespace\n continue\n # continuation line?\n if line[0].isspace() and cursect is not None and optname:\n value = line.strip()\n if value:\n cursect[optname].append(value)\n # a section header or option header?\n else:\n # is it a section header?\n mo = self.SECTCRE.match(line)\n if mo:\n sectname = mo.group('header')\n if sectname in self._sections:\n cursect = self._sections[sectname]\n elif sectname == DEFAULTSECT:\n cursect = self._defaults\n else:\n cursect = self._dict()\n cursect['__name__'] = sectname\n self._sections[sectname] = cursect\n # So sections can't start with a continuation line\n optname = None\n # no section header in the file?\n elif cursect is None:\n raise MissingSectionHeaderError(fpname, lineno, line)\n # an option line?\n else:\n mo = self._optcre.match(line)\n if mo:\n optname, vi, optval = mo.group('option', 'vi', 'value')\n optname = self.optionxform(optname.rstrip())\n # This check is fine because the OPTCRE cannot\n # match if it would set optval to None\n if optval is not None:\n if vi in ('=', ':') and ';' in optval:\n # ';' is a comment delimiter only if it follows\n # a spacing character\n pos = optval.find(';')\n if pos != -1 and optval[pos-1].isspace():\n optval = optval[:pos]\n optval = optval.strip()\n # allow empty values\n if optval == '\"\"':\n optval = ''\n cursect[optname] = [optval]\n else:\n # valueless option handling\n cursect[optname] = optval\n else:\n # a non-fatal parsing error occurred. set up the\n # exception but keep going. the exception will be\n # raised at the end of the file and will contain a\n # list of all bogus lines\n if not e:\n e = ParsingError(fpname)\n e.append(lineno, repr(line))\n # if any parsing errors occurred, raise an exception\n if e:\n raise e\n\n # join the multi-line values collected while reading\n all_sections = [self._defaults]\n all_sections.extend(self._sections.values())\n for options in all_sections:\n for name, val in options.items():\n if isinstance(val, list):\n options[name] = '\\n'.join(val)" ]
[ "0.7374695", "0.6986738", "0.68131596", "0.68070054", "0.6706419", "0.6671083", "0.65692985", "0.6440196", "0.64307415", "0.63216704", "0.62620395", "0.6214296", "0.6185266", "0.61728215", "0.6125363", "0.6123458", "0.61100674", "0.6085565", "0.6084057", "0.6030608", "0.6025441", "0.59840393", "0.59615666", "0.5952685", "0.5939843", "0.5919707", "0.5905797", "0.59011376", "0.589904", "0.5894113" ]
0.71823096
1
Return True if ``value`` is an health check.
def is_healthcheck(self, value): return is_healthcheck(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self, value) -> bool:\n return self._check_helper(value, raise_exceptions=False)", "def has_value(cls, value):\n return bool(isinstance(value, numbers.Number) or isinstance(value, time) or \\\n isinstance(value, datetime) or value)", "def is_true(value):\n \n return (value is True)", "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n if not valid:\n return False\n return True", "def check(self, key, value):\n return self._check_key(key) is True and self._check_value(value) is True", "def is_valid_value(self, value):\n return value in self.values", "def is_bool(value):\n return isinstance(value, bool)", "def is_valid_value(self, value):\n return value in self.categories", "def isPass(value: Any) -> bool: # pragma: no cover\n if isinstance(value, bool):\n return True\n return PASS in value", "def has_value(cls, value):\n return any(value == item.value for item in cls)", "def has_value(cls, value):\n return any(value == item.value for item in cls)", "def check(self, value: ATTRIBUTE_TYPES) -> bool:\n if self.type == ConstraintTypes.EQUAL:\n return self.value == value\n if self.type == ConstraintTypes.NOT_EQUAL:\n return self.value != value\n if self.type == ConstraintTypes.LESS_THAN:\n return self.value < value\n if self.type == ConstraintTypes.LESS_THAN_EQ:\n return self.value <= value\n if self.type == ConstraintTypes.GREATER_THAN:\n return self.value > value\n if self.type == ConstraintTypes.GREATER_THAN_EQ:\n return self.value >= value\n if self.type == ConstraintTypes.WITHIN:\n low = self.value[0]\n high = self.value[1]\n return low <= value <= high\n if self.type == ConstraintTypes.IN:\n return value in self.value\n if self.type == ConstraintTypes.NOT_IN:\n return value not in self.value\n if self.type == ConstraintTypes.DISTANCE:\n if not isinstance(value, Location): # pragma: nocover\n raise ValueError(\"Value must be of type Location.\")\n location = cast(Location, self.value[0])\n distance = self.value[1]\n return location.distance(value) <= distance\n raise ValueError(\"Constraint type not recognized.\") # pragma: nocover", "def is_valid(self, value) -> 'True|str':\n if self.base_type is not None and not isinstance(value, self.base_type):\n return f'Value {value} is not type of {self.base_type}.'\n return True", "def _is_valid(self, value):\n\n # Entities have an istypeof method that can perform more sophisticated\n # type checking.\n if hasattr(self._type, \"istypeof\"):\n return self._type.istypeof(value)\n else:\n return isinstance(value, self._type)", "def has_value(value):\n return IsDictContainingValue(wrap_matcher(value))", "def parse_value(cls, value):\n return bool(value)", "def accepts(cls, value: Any) -> bool:\n try:\n cls.convert(value)\n return True\n except ValueError:\n return False", "def is_bool(value):\n try:\n strtobool(value)\n except ValueError:\n return False\n else:\n return True", "def is_valid_value(self, value):\n if not self.range:\n return False\n\n return value >= self.range[0] and value <= self.range[1]", "def is_valid_value(self, value: Any) -> bool:\n return self.type_registry.is_valid_nested(value)", "def is_valid_confidence(self, value: List) -> bool:\n\n if self._confidence_values is None or value is None:\n return True\n else:\n return value in self._confidence_values", "def validate(self, value):\n\n return True", "def validate_value(self, value: valueType) -> bool:\n if value is None:\n raise Exception\n return True", "def is_value_legit(self, value):\n return value in self.domain and value in self.possible_domain", "def matches(self, value):\n return value == self.attributes[AT.VALUE]", "def check(self, value):\n raise NotImplementedError", "def _check_helper(self, value, raise_exceptions=True) -> bool:\n if not isinstance(value, self.value_type):\n if raise_exceptions:\n raise InvalidParameterException(\n '%s: invalid type given: %s (required %s)' % (\n self.name, type(value),\n ', '.join([str(x) for x in self.value_type])\n )\n )\n return False\n\n return True", "def validate(self, value):\n return True", "def check(self, value):\n\t\t\n\t\tif value <= self.current_rate:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def check_value(value, sensor):\n if not GraphModel.check_if_int(value):\n return False\n\n return (sensor == 't' and ba.min_temp < int(value) < ba.max_temp) or \\\n (sensor == 'l' and ba.min_light < int(value) < ba.max_light)" ]
[ "0.70862806", "0.6380764", "0.61718976", "0.598836", "0.5963879", "0.59439", "0.5902455", "0.5843611", "0.5777476", "0.57587725", "0.57587725", "0.57569546", "0.56969887", "0.56509334", "0.5631042", "0.55776083", "0.554896", "0.5548796", "0.55426407", "0.5515357", "0.55148315", "0.55135566", "0.5509645", "0.54972845", "0.5492789", "0.54825586", "0.54809374", "0.54606754", "0.5438963", "0.5437047" ]
0.867911
0
Return copy of TestSuite where only health checks remain.
def filter_suite(self, suite): if isinstance(suite, unittest.TestSuite): suite_copy = self.suiteClass() for sub in suite: if isinstance(sub, unittest.TestSuite): suite_copy.addTest(self.filter_suite(sub)) else: if self.is_healthcheck(sub): suite_copy.addTest(sub) elif self.is_healthcheck(suite): suite_copy = suite.copy() return suite_copy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dry_run(self):\n self.result.report = self._new_test_report()\n\n for pyunit_testcase in self.cfg.testcases:\n testsuite_report = TestGroupReport(\n name=pyunit_testcase.__name__,\n uid=pyunit_testcase.__name__,\n category=ReportCategories.TESTSUITE,\n entries=[\n TestCaseReport(\n name=self._TESTCASE_NAME, uid=self._TESTCASE_NAME\n )\n ],\n )\n self.result.report.append(testsuite_report)\n\n return self.result", "def loadTestsFromTestCase(self, testCaseClass):\n suite = super(HealthCheckLoader, self).loadTestsFromTestCase(\n testCaseClass)\n return self.filter_suite(suite)", "def test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestIntegration))\n suite.addTest(unittest.makeSuite(TestSection))\n return suite", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(SourceHeavyFootprintTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n return unittest.makeSuite(OpenedTestCase)", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(StatisticsTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(GetDailyReportV1TestCase))\n suite.addTest(unittest.makeSuite(GetDailyReportV2TestCase))\n return suite", "def getTestSuite():\n test_suite = unittest.TestSuite([])\n\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistReaders))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPySnpTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistributedBed))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFileCache))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestUtilTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestIntRangeSet))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKrDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpGen))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGenerate))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestExampleFile))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstMemMap))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpMemMap))\n test_suite.addTests(NaNCNCTestCases.factory_iterator())\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstReader))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKernelReader))\n\n return test_suite", "def getTestSuite():\n\n suite1 = unittest.TestLoader().loadTestsFromTestCase(TestDataProcs)\n return unittest.TestSuite([suite1,suite2])", "def suite():\n loader = unittest.TestLoader()\n mysuite = unittest.TestSuite()\n mysuite.addTest(loader.loadTestsFromTestCase(TestWorldComposite))\n\n return mysuite", "def suite():\n loader = unittest.TestLoader()\n mysuite = unittest.TestSuite()\n mysuite.addTest(loader.loadTestsFromTestCase(TestUtils))\n \n return mysuite", "def suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(unittest.makeSuite(globalOptimizerTest))\n test_suite.addTest(unittest.makeSuite(recursiveStepTest))\n return test_suite", "def get_tests(self):\n return self.tests[:]", "def runtestsuite(self, testsuite):\n if testsuite.status == TestStatus.READY:\n results = testsuite.run()\n else:\n results = ResultList()\n # Disable \"Expression is assigned to nothing\" warning\n # pylint: disable=W0106\n [handler.flush() for handler in self.logger.handlers]\n results.save(heads={'Build': '', 'Branch': self.args.branch})\n sys.stdout.flush()\n self._cleanup_resourceprovider()\n return results", "def remove_empty_suites(self):\n self.visit(EmptySuiteRemover())", "def suite():\n tsuite = unittest.TestSuite()\n tsuite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))\n tsuite.addTest(unittest.defaultTestLoader.loadTestsFromModule(commandtests))\n tsuite.addTest(unittest.defaultTestLoader.loadTestsFromModule(locktests))\n return tsuite", "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(HscDistortionTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n suite_obj = unittest.TestSuite()\n suite_obj.addTest(TestEssentials())\n return suite_obj", "def suite():\n tests.init()\n\n suites = []\n suites += unittest.makeSuite(MeasureSourcesTestCase)\n suites += unittest.makeSuite(ForcedMeasureSourcesTestCase)\n suites += unittest.makeSuite(tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_polarization.test_suite())\n testSuite.addTest(test_xray.test_suite())\n testSuite.addTest(test_emspectrum.test_suite())\n return testSuite", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(ListV1TestCase))\n return suite", "def loadTestsFromModule(self, module, *args, **kwargs):\n suite = super(HealthCheckLoader, self).loadTestsFromModule(\n module, *args, **kwargs)\n return self.filter_suite(suite)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(UpdateV1TestCase))\n return suite", "def make_suite():\n suite = unittest.TestSuite()\n return suite", "def suite():\n\tts = unittest.TestSuite()\n\tfor test_module in __all__:\n\t\tm = importlib.import_module(\"pyroclast.test.\" + test_module)\n\t\tfor n in dir(m):\n\t\t\tc = getattr(m, n)\n\t\t\tif is_test_case(c):\n\t\t\t\ts = unittest.TestLoader().loadTestsFromTestCase(c)\n\t\t\t\tts.addTests(s)\n\treturn ts", "def _flattenTestSuite(self, testSuite):\n l = []\n try:\n for test_suite in testSuite._tests:\n l = l + self._flattenTestSuite(test_suite)\n except AttributeError:\n l.append(testSuite)\n return l", "def suite():\n # patch it to work here\n package_def = 'app.test'\n\n suite = unittest.TestSuite()\n\n for other_suite in iter_suites(package_def):\n suite.addTest(other_suite)\n return suite", "def suite():\n utilsTests.init()\n suites = []\n suites += unittest.makeSuite(TestTrackingDb)\n return unittest.TestSuite(suites)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(CreateV1TestCase))\n suite.addTest(unittest.makeSuite(CreateV2TestCase))\n return suite" ]
[ "0.63560736", "0.6105774", "0.59605616", "0.58906287", "0.588734", "0.58871996", "0.5851725", "0.58468413", "0.5832818", "0.5828046", "0.57777184", "0.57675064", "0.57567066", "0.5658222", "0.5646921", "0.56121224", "0.56025547", "0.56018823", "0.5576602", "0.5569374", "0.55601853", "0.5557575", "0.55571836", "0.5554578", "0.5551725", "0.5541882", "0.55130064", "0.5491539", "0.54873747", "0.5486702" ]
0.7639181
0
Load healthchecks from TestCase.
def loadTestsFromTestCase(self, testCaseClass): suite = super(HealthCheckLoader, self).loadTestsFromTestCase( testCaseClass) return self.filter_suite(suite)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_health_get(self):\n pass", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def loadTestsFromModule(self, module, *args, **kwargs):\n suite = super(HealthCheckLoader, self).loadTestsFromModule(\n module, *args, **kwargs)\n return self.filter_suite(suite)", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def test_fake_health_get(self):\n pass", "def _load_tests(self):\n tests = {\"enabled\":defaultdict(list),\n \"disabled\":defaultdict(list)}\n\n for test_path, test_type, test in self.iter_tests():\n enabled = not test.disabled()\n if not self.include_https and test.environment[\"protocol\"] == \"https\":\n enabled = False\n key = \"enabled\" if enabled else \"disabled\"\n tests[key][test_type].append(test)\n\n self.tests = tests[\"enabled\"]\n self.disabled_tests = tests[\"disabled\"]", "def test_load(self):\n (spec, check) = bundylogging.load()\n # It returns the checking function\n self.assertEqual(check, bundylogging.check)\n # The plugin stores it's spec\n self.assertEqual(spec, bundylogging.spec)", "def test_check_health(self):\n cache = DummyCache()\n ok, msg = cache.check_health()\n self.assertTrue(ok)", "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "def test_health(self):\n self.assert_request('get', '/_health')", "def loadTestsFromNames(self, names, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromNames(names,\n module)\n return self.filter_suite(suite)", "def useFailures(self):\n self.setupTests(tests = self.failures)", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def test_health_check(self):\n self.url = reverse(\"health-check\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_healthcheck(self):\n self.assertEqual(\"OK\", \"OK\")", "def test_health_check(self):\n result = self.app.get('/v1/health')\n\n # assert the status code of the response 200 (OK)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, b'UP')", "def test_health(self):\n res = self.client().get('/')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn('health', data)\n self.assertEqual(data['health'], 'Running!!')", "def test_customize_test_loads(self):\n self.create_user_with_role(\n self.user.name, self.user.email, self.user.password, Role.tester)\n self.create_forktest(\"own-fork-commit\", TestPlatform.linux, regression_tests=[2])\n self.create_completed_regression_t_entries(3, [2])\n response = self.app.test_client().get('/test/3')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/by_id.html')\n regression_tests = RegressionTest.query.all()\n self.assertIn(regression_tests[1].command, str(response.data))\n self.assertNotIn(regression_tests[0].command, str(response.data))", "def test_load_testcase_in_module(self):\n tests = self.loader.load(\"tests.sampletest.InitTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest import InitTest\n\n self.assertEqual(type(tests[0]), InitTest)", "def test_health_checks_constructed(self):\n\n node = Node(\n {\n 'healthchecks': [\n {\n 'command': '/some/basic/example',\n 'on_failure': None,\n 'on_failure_even_if_security_violation': False\n },\n\n {\n 'command': '/some/basic/example',\n 'on_failure': '/some/rescue-command',\n 'on_failure_even_if_security_violation': True\n },\n\n {\n 'command': '/some/basic/example'\n }\n ]\n },\n {},\n mock.Mock()\n )\n\n self.assertEqual(3, len(node.get_health_checks()))", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def test_get_hyperflex_health_list(self):\n pass", "def test_simple_health_check(self):\n response = self.client.open(\n '/awadallah/VaultsManager/1.0.0/health',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_healthz(client):\n response = client.get(\"/healthz\")\n assert response.status_code == 200", "def test_health_endpoint(self):\n url = f\"{BASE_URL}/health\"\n response = requests.get(url)\n response_json = response.json()\n assert response.status_code == 200\n assert response_json['status'] == 200", "async def test_health_check(client: AsyncClient):\n\n response = await client.get(f\"/health-check\")\n assert response.status_code == 200\n\n data = response.json()\n assert data[\"service\"][\"status\"] == \"healthy\"\n assert data[\"service\"][\"error\"] is None\n assert data[\"database\"][\"status\"] == \"healthy\"\n assert data[\"database\"][\"error\"] is None", "def load_regression_tests():\n with open(TEST_RESOURCES_DIR / \"regression_vault.pickle\", \"rb\") as p:\n tests = pickle.load(p)\n\n return tests", "def test_health_monitor_basic(self):\n self._create_servers()\n self._start_servers()\n self._create_load_balancer()\n self._create_health_monitor()\n self._check_load_balancing()\n # stopping the primary server\n self._stop_server()\n # Asserting the traffic is sent only to the secondary server\n self._traffic_validation_after_stopping_server()", "def load_status_table():" ]
[ "0.6312396", "0.62286896", "0.61631715", "0.6092404", "0.6075559", "0.60417944", "0.6005269", "0.6000439", "0.5768636", "0.57478154", "0.57459795", "0.57209116", "0.5693739", "0.5679516", "0.56593746", "0.56551856", "0.56339574", "0.55915415", "0.55771744", "0.5574354", "0.5552758", "0.55081075", "0.54977244", "0.5481446", "0.5387548", "0.53715044", "0.5365683", "0.53294945", "0.5291307", "0.5280294" ]
0.68875015
0
Load healthchecks from module.
def loadTestsFromModule(self, module, *args, **kwargs): suite = super(HealthCheckLoader, self).loadTestsFromModule( module, *args, **kwargs) return self.filter_suite(suite)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "def loadTestsFromNames(self, names, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromNames(names,\n module)\n return self.filter_suite(suite)", "def test_load(self):\n (spec, check) = bundylogging.load()\n # It returns the checking function\n self.assertEqual(check, bundylogging.check)\n # The plugin stores it's spec\n self.assertEqual(spec, bundylogging.spec)", "def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])", "def load_module(self, module_name): # pragma: no cover\r\n try:\r\n module = import_module('SoftLayer.CLI.modules.%s' % module_name)\r\n for _, obj in inspect.getmembers(module):\r\n if inspect.isclass(obj) and issubclass(obj, CLIRunnable):\r\n self.add_plugin(obj)\r\n return module\r\n except ImportError:\r\n raise InvalidModule(module_name)", "def load_module(module):\n try:\n return import_module(module)\n except ImportError:\n sys.stderr.write('Unable to load the module: %s.\\n' % module)\n exit(-1)", "def test_health_get(self):\n pass", "def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()", "async def _load_hsm_status(self) -> None:\n hsm: Dict[str, str] = await self._api_request(\"hsm\")\n _LOGGER.debug(\"Loaded hsm status\")\n self._hsm_status = hsm[\"hsm\"]", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def loadModule(*args, allModules: bool=True, load: AnyStr=\"\", scan: bool=True,\n **kwargs)->List[AnyStr]:\n pass", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def __init__(self):\n ScriptedLoadableModuleLogic.__init__(self)", "def test_load_testcase_in_module(self):\n tests = self.loader.load(\"tests.sampletest.InitTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest import InitTest\n\n self.assertEqual(type(tests[0]), InitTest)", "def test_fake_health_get(self):\n pass", "def test_check_health(self):\n cache = DummyCache()\n ok, msg = cache.check_health()\n self.assertTrue(ok)", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def load_module(cls, *args, **kwargs): # real signature unknown\n pass", "def _load_module(self):\n self.log(logging.INFO, \"Checking file\", (self.filename, os.getpid()))\n\n try:\n return self.load_module(self.filename)\n except KeyboardInterrupt:\n raise\n except BaseException as e:\n # don't re-raise the error, just proceed without a module object\n # this can happen with scripts that aren't intended to be imported\n if not self.has_file_level_ignore():\n traceback.print_exc()\n if self.tree.body:\n node = self.tree.body[0]\n else:\n node = None\n self.show_error(\n node,\n \"Failed to import {} due to {!r}\".format(self.filename, e),\n error_code=ErrorCode.import_failed,\n )\n return None, False", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def health_check():\n return dict(api_status='OK')", "def load(cfg):\n\n checks = collections.OrderedDict()\n if cfg and cfg.get('checks', None):\n for name, options in cfg['checks'].iteritems():\n check_type = options.get('type', 'command')\n\n if not options:\n check_type = 'override'\n\n if check_type not in _types:\n msg = \"unknown check type '{}'\".format(check_type)\n raise CheckInvalid(msg)\n\n checks[name] = _types[check_type](name=name, **options)\n\n return checks", "def test_simple_health_check(self):\n response = self.client.open(\n '/awadallah/VaultsManager/1.0.0/health',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def load_snakes():\n\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'snakes.pickle'))", "def modules_load(machine_config):\n\t#---modules in LOCAL configuration must be loaded before checking version\n\timport importlib\n\tif 'module_path' in machine_config: module_path = machine_config['module_path']\n\telse:\n\t\tmodule_parent = os.environ.get('MODULESHOME','/usr/share/Modules/default')\n\t\tmodule_path = os.path.join(module_parent,'init','python.py')\n\tincoming = {}\n\tif sys.version_info<(3,0): execfile(module_path,incoming)\n\telse: exec(open(module_path).read(),incoming)\n\t#---note that modules that rely on dynamically-linked C-code must use EnvironmentModules\n\tmodlist = machine_config['modules']\n\tif type(modlist)==str: modlist = modlist.split(',')\n\tfor mod in modlist:\n\t\t#---always unload gromacs to ensure correct version\n\t\tincoming['module']('unload','gromacs')\n\t\tprint('[STATUS] module load %s'%mod)\n\t\tincoming['module']('load',mod)", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def test_get_hyperflex_health_list(self):\n pass" ]
[ "0.6399756", "0.6244733", "0.600857", "0.5915704", "0.57699925", "0.569914", "0.565888", "0.5624556", "0.5607272", "0.560578", "0.55228066", "0.5457219", "0.541206", "0.5375242", "0.53745407", "0.53611326", "0.5358373", "0.5354235", "0.53232366", "0.53232366", "0.53232366", "0.5315311", "0.5311305", "0.52602094", "0.5241662", "0.5178809", "0.51606846", "0.51494527", "0.51355046", "0.5128314" ]
0.6768524
0
Load healthchecks from name.
def loadTestsFromName(self, name, module=None): suite = super(HealthCheckLoader, self).loadTestsFromName(name, module) return self.filter_suite(suite)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadTestsFromNames(self, names, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromNames(names,\n module)\n return self.filter_suite(suite)", "def load(name):\n return []", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def do_load(self, name):\n try:\n self.runner.run()\n\n except():\n print('Loading failed')", "def health_check(name, target='TCP:22', healthy_threashold=2, unhealthy_threashold=3, interval=30, timeout=3):\n hc = HealthCheck(title=name + 'healthcheck')\n hc.HealthyThreshold = healthy_threashold\n hc.UnhealthyThreshold = unhealthy_threashold\n hc.Interval = interval\n hc.Target = target\n hc.Timeout = timeout\n return hc", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def __load__(self, name):\n raise KeyError(name)", "def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()", "def load(self, name: str):\n result = self.l2.load(name)\n if result is not None:\n logging.debug(f'{name} l2 hit')\n return result\n\n result = self.l3.load(name, self.l2)\n if result is not None:\n logging.debug(f'{name} l3 hit')\n return result\n logging.debug(f'{name} cache miss')\n return None # Cache Miss", "def load(cfg):\n\n checks = collections.OrderedDict()\n if cfg and cfg.get('checks', None):\n for name, options in cfg['checks'].iteritems():\n check_type = options.get('type', 'command')\n\n if not options:\n check_type = 'override'\n\n if check_type not in _types:\n msg = \"unknown check type '{}'\".format(check_type)\n raise CheckInvalid(msg)\n\n checks[name] = _types[check_type](name=name, **options)\n\n return checks", "def load(name):\n\n update(settings.all())\n\n config_specific_settings = _config.pop('config', None) or {}\n if name:\n if name not in names():\n errors.string_exit('config {} not found in .ssha file'.format(name))\n if name in config_specific_settings:\n update(config_specific_settings[name])\n add('config.name', name)\n\n if not _get('ssh.username'):\n add('ssh.username', '$(whoami)')\n\n if _get('bastion') and not _get('ssh.proxy_command'):\n add('ssh.proxy_command', 'ssh -W %h:%p ${bastion.address}')\n\n iam_group_specific_settings = get('iam.group')\n if iam_group_specific_settings:\n from . import iam\n for group in iam.groups():\n if group in iam_group_specific_settings:\n update(iam_group_specific_settings[group])", "def load_scenario(self, name):\n if name[:-5] != \".json\":\n name += \".json\"\n scr_path = os.path.dirname(os.path.abspath(__file__))\n f_path = os.path.join(scr_path, \"scenarios\", name)\n if not os.path.exists(f_path):\n raise IOError\n f = open(f_path, 'r')\n sc = Json(f)\n self._loaded_sc = sc\n self._scenario_script_elements = len(sc[\"script\"])\n self._scenario_script_cur = 0", "def load_data(name):\n with open(f\"tests/data/{name}.json\", \"r\") as json_file:\n return json.load(json_file)", "def from_name(self, name):\n return self._name_to_loadout.get(name.lower())", "def _load_support(name):\n curr = P.dirname(P.abspath(__file__))\n with open(P.join(curr, \"data\", \"%s.yml\" % name)) as fin:\n return yaml.full_load(fin)", "def load(self, name):\n # ext = os.path.splitext(name)[1]\n # if ext == '.mat':\n # self.load_matlab(name)\n # else:\n # self.load_pkl(name)\n self.load_pkl(name)\n nhashes = sum(self.counts)\n # Report the proportion of dropped hashes (overfull table)\n dropped = nhashes - sum(np.minimum(self.depth, self.counts))\n print(\"Read fprints for\", sum(n is not None for n in self.names),\n \"files (\", nhashes, \"hashes) from\", name,\n \"(%.2f%% dropped)\" % (100.0 * dropped / max(1, nhashes)))", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])", "def _load_vulnerabilities_report_file(file_name):\n with open(os.path.join(module_path, test_name, file_name)) as file:\n json_data = json.load(file)\n return ImageVulnerabilitiesReport.from_json(json_data)", "def get_by_name(self, name):\n # type: (str) -> BoundLoadBalancer\n return super(LoadBalancersClient, self).get_by_name(name)", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def test_health_get(self):\n pass", "def ensure_dataset_loaded(self, name):\n if name not in self.datasets:\n print(f'Loading dataset \"{name}\"')\n pd_data = pd.read_excel(self.datafiles[name])\n data = pd.DataFrame.to_dict(pd_data, 'records')\n self.datasets[name] = data", "def loadTestsFromModule(self, module, *args, **kwargs):\n suite = super(HealthCheckLoader, self).loadTestsFromModule(\n module, *args, **kwargs)\n return self.filter_suite(suite)", "def __getitem__ (self, name):\n try:\n return self.load_module (name)\n except ImportError: pass\n raise KeyError (name)", "def load_life(name):\n\tif not '.json' in name:\n\t\tname += '.json'\n\t\n\twith open(os.path.join(LIFE_DIR, name), 'r') as e:\n\t\treturn json.loads(''.join(e.readlines()))", "def health(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.health)", "def load(self, file_path, name=None):\n self.yaml_dict = u.load_yaml(file_path)\n if name is None:\n name = u.get_file_name(file_path)\n self.name = name\n self._check_scenario_sections_valid()\n\n self._parse_subnets()\n self._parse_topology()\n self._parse_os()\n self._parse_services()\n self._parse_processes()\n self._parse_sensitive_hosts()\n self._parse_exploits()\n self._parse_privescs()\n self._parse_scan_costs()\n self._parse_host_configs()\n self._parse_firewall()\n self._parse_hosts()\n self._parse_step_limit()\n return self._construct_scenario()", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def load(self, path):\n\n try:\n with open(path) as f:\n try:\n self.hooks = yaml.load(f.read())\n except ScannerError:\n self.warning('Error loading {0} hooks - Is it '\n 'correctly formatted?'.format(path))\n else:\n self.out('Loading hooks')\n except IOError:\n self.warning('{0} not found'.format(path))" ]
[ "0.6285826", "0.5823373", "0.5733951", "0.56802326", "0.56788814", "0.56367904", "0.5605188", "0.5547318", "0.5484213", "0.5442093", "0.5427454", "0.53923076", "0.53107464", "0.5194388", "0.51722986", "0.51498437", "0.5135828", "0.511639", "0.5114361", "0.50697315", "0.504881", "0.5044498", "0.502523", "0.5025077", "0.49866137", "0.4954228", "0.49527237", "0.4944374", "0.49270475", "0.4920355" ]
0.65243924
0
Load healthchecks from names.
def loadTestsFromNames(self, names, module=None): suite = super(HealthCheckLoader, self).loadTestsFromNames(names, module) return self.filter_suite(suite)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = HealthCheck()\n hc._HealthCheck__data = v\n hc.name = k\n hc.script = v.get(\"Script\", \"\")\n hc.interval = v.get(\"Interval\", 0)\n hc.timeout = v.get(\"Timeout\", 0)\n hc.kill_count_limit = v.get(\"KillCountLimit\", default[\"KillCountLimit\"])\n hc.kill_exit_codes = v.get(\"KillExitCodes\", default[\"KillExitCodes\"])\n healthchecks.append(hc)\n return healthchecks", "def load(name):\n return []", "def loadTestsFromNames(self, names, module=None):\r\n suites = [self.loadTestsFromName(name, module) for name in names]\r\n return self.suiteClass(suites)", "def load(cfg):\n\n checks = collections.OrderedDict()\n if cfg and cfg.get('checks', None):\n for name, options in cfg['checks'].iteritems():\n check_type = options.get('type', 'command')\n\n if not options:\n check_type = 'override'\n\n if check_type not in _types:\n msg = \"unknown check type '{}'\".format(check_type)\n raise CheckInvalid(msg)\n\n checks[name] = _types[check_type](name=name, **options)\n\n return checks", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def load_names(args):\n # NAMES is a json document which is just a list of names\n if os.path.isfile(args.names):\n with open(args.names, 'r') as n:\n try:\n names = json.load(n)\n except:\n sys.exit(\"ERROR: {0} is invalid JSON\".format(args.names))\n else:\n sys.exit(\"ERROR {0} file not found.\".format(args.names))\n if len(names) <= 1:\n sys.exit(\"ERROR: {0} needs to have more than 1 name in it\".format(args.names))\n return names", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def loadFirsts(self, names):\n\n if os.path.exists(names):\n self.firsts, self.w_firsts = self.load(names)\n else:\n self.firsts = [names]\n self.w_firsts = None\n\n return", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def load_checks_to_execute(\n bulk_checks_metadata: dict,\n bulk_compliance_frameworks: dict,\n checks_file: str,\n check_list: list,\n service_list: list,\n severities: list,\n compliance_frameworks: list,\n categories: set,\n provider: str,\n) -> set:\n checks_to_execute = set()\n\n # Handle if there are checks passed using -c/--checks\n if check_list:\n for check_name in check_list:\n checks_to_execute.add(check_name)\n\n # Handle if there are some severities passed using --severity\n elif severities:\n for check in bulk_checks_metadata:\n # Check check's severity\n if bulk_checks_metadata[check].Severity in severities:\n checks_to_execute.add(check)\n\n # Handle if there are checks passed using -C/--checks-file\n elif checks_file:\n try:\n checks_to_execute = parse_checks_from_file(checks_file, provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are services passed using -s/--services\n elif service_list:\n checks_to_execute = recover_checks_from_service(service_list, provider)\n\n # Handle if there are compliance frameworks passed using --compliance\n elif compliance_frameworks:\n try:\n checks_to_execute = parse_checks_from_compliance_framework(\n compliance_frameworks, bulk_compliance_frameworks\n )\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are categories passed using --categories\n elif categories:\n for cat in categories:\n for check in bulk_checks_metadata:\n # Check check's categories\n if cat in bulk_checks_metadata[check].Categories:\n checks_to_execute.add(check)\n\n # If there are no checks passed as argument\n else:\n try:\n # Get all check modules to run with the specific provider\n checks = recover_checks_from_provider(provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n else:\n for check_info in checks:\n # Recover check name from import path (last part)\n # Format: \"providers.{provider}.services.{service}.{check_name}.{check_name}\"\n check_name = check_info[0]\n checks_to_execute.add(check_name)\n\n return checks_to_execute", "def load_all_extensions(self, names):\n loaded = True\n for name in names:\n if not self.load(name):\n loaded = False\n return loaded", "def prechecks(verbose_level=1, hostnames=[], servicenames=[]):\n # type: (int, List[str], List[str]) -> Job\n check_arg(hostnames, u._('Host names'), list,\n empty_ok=True, none_ok=True)\n check_arg(verbose_level, u._('Verbose level'), int)\n check_arg(servicenames, u._('Service names'), list,\n empty_ok=True, none_ok=True)\n\n check_kolla_args(hostnames=hostnames,\n servicenames=servicenames)\n\n hostnames = safe_decode(hostnames)\n servicenames = safe_decode(servicenames)\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='site.yml')\n ansible_job = action.precheck(hostnames, servicenames)\n return Job(ansible_job)", "def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()", "def loadTestsFromModule(self, module, *args, **kwargs):\n suite = super(HealthCheckLoader, self).loadTestsFromModule(\n module, *args, **kwargs)\n return self.filter_suite(suite)", "def checks(self, all=False):\n if all:\n warn_states = [\"unknown\", \"passing\", \"warning\", \"critical\"]\n else:\n warn_states = [\"unknown\", \"warning\", \"critical\"]\n checks = {}\n for warn_state in warn_states:\n for state in self.consul.health.state(warn_state):\n if not state['Node'] in checks:\n checks[state['Node']] = dict()\n if not state['ServiceID'] in checks[state['Node']]:\n checks[state['Node']][state['ServiceID']] = {\n 'checks': [],\n 'name': state['ServiceName']\n }\n checks[state['Node']][state['ServiceID']]['checks'].append(\n (state['Name'], state['Status'], state['Output'])\n )\n return checks", "def _check_availability(self, names: Iterable) -> None:\n unavailable = [x for x in names if x not in self.__by_name.keys()]\n if unavailable:\n raise ValueError(f'datasets: {unavailable} not available in the {self.region} region.')", "def get_healthchecks(\n self, service_namespace_config: ServiceNamespaceConfig\n ) -> List[HealthcheckDict]:\n\n mode = self.get_healthcheck_mode(service_namespace_config)\n\n graceperiodseconds = self.get_healthcheck_grace_period_seconds()\n intervalseconds = self.get_healthcheck_interval_seconds()\n timeoutseconds = self.get_healthcheck_timeout_seconds()\n maxconsecutivefailures = self.get_healthcheck_max_consecutive_failures()\n\n if mode == \"http\" or mode == \"https\":\n http_path = self.get_healthcheck_uri(service_namespace_config)\n protocol = f\"MESOS_{mode.upper()}\"\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": protocol,\n \"path\": http_path,\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"tcp\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"TCP\",\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"portIndex\": 0,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode == \"cmd\":\n healthchecks = [\n HealthcheckDict(\n {\n \"protocol\": \"COMMAND\",\n \"command\": self.get_healthcheck_cmd(),\n \"gracePeriodSeconds\": graceperiodseconds,\n \"intervalSeconds\": intervalseconds,\n \"timeoutSeconds\": timeoutseconds,\n \"maxConsecutiveFailures\": maxconsecutivefailures,\n }\n )\n ]\n elif mode is None:\n healthchecks = []\n else:\n raise InvalidHealthcheckMode(\n \"Unknown mode: %s. Only acceptable healthcheck modes are http/https/tcp/cmd\"\n % mode\n )\n return healthchecks", "def xontribs_load(names, verbose=False):\n ctx = builtins.__xonsh__.ctx\n res = ExitCode.OK\n for name in names:\n if verbose:\n print(\"loading xontrib {0!r}\".format(name))\n try:\n update_context(name, ctx=ctx)\n except Exception:\n res = ExitCode.INIT_FAILED\n print_exception(\"Failed to load xontrib {}.\".format(name))\n if hasattr(update_context, \"bad_imports\"):\n res = ExitCode.NOT_FOUND\n prompt_xontrib_install(update_context.bad_imports)\n del update_context.bad_imports\n return res", "def do_load(self, name):\n try:\n self.runner.run()\n\n except():\n print('Loading failed')", "def health_check(name, target='TCP:22', healthy_threashold=2, unhealthy_threashold=3, interval=30, timeout=3):\n hc = HealthCheck(title=name + 'healthcheck')\n hc.HealthyThreshold = healthy_threashold\n hc.UnhealthyThreshold = unhealthy_threashold\n hc.Interval = interval\n hc.Target = target\n hc.Timeout = timeout\n return hc", "def load_sql_rules():\n logger.info('Loading SQL-based validation rules')\n SQLLoader.load_sql(\"sqlRules.csv\")\n logger.info('Loading non-SQL-based validation labels')\n LabelLoader.load_labels(\"validationLabels.csv\")", "def test_get_hyperflex_health_list(self):\n pass", "def do_health_checks(self, list_of_ips):\n # Calculate a decent overall timeout time for a ping attempt: 3/4th of\n # the monitoring interval. That way, we know we're done with this ping\n # attempt before the next monitoring attempt is started.\n ping_timeout = self.get_monitor_interval() * 0.75\n\n # Calculate a decent number of retries. For very short intervals we\n # shouldn't have any retries, for very long ones, we should have\n # several ones. Converting the timeout to an integer gives us what we\n # want: For timeouts less than 1 we have no retry at all.\n num_retries = int(ping_timeout)\n\n try:\n self.ping_count += len(list_of_ips)\n responses, no_responses = multiping.multi_ping(\n list_of_ips, ping_timeout, num_retries)\n self.update_stats(responses, no_responses)\n\n except Exception as e:\n logging.error(\"Exception while trying to monitor servers: %s\" %\n str(e))\n # Need to assume all IPs failed\n no_responses = list_of_ips\n\n return no_responses, [] # return empty list for questionable IPs", "def health_checks(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"health_checks\")", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def _load_personas(self, names, is_custom=False):\n names = names or [path.stem for path in\n self.persona_dir[is_custom].iterdir()\n if path.is_dir()]\n for name in names:\n try:\n self.update_persona_dicts(self.process_name(name),\n is_custom=is_custom)\n except:\n warnings.warn(f'Could not load files for {name}.')", "def load_aliases():\n for name in COMMANDS:\n load_alias(name)", "def load(name):\n\n update(settings.all())\n\n config_specific_settings = _config.pop('config', None) or {}\n if name:\n if name not in names():\n errors.string_exit('config {} not found in .ssha file'.format(name))\n if name in config_specific_settings:\n update(config_specific_settings[name])\n add('config.name', name)\n\n if not _get('ssh.username'):\n add('ssh.username', '$(whoami)')\n\n if _get('bastion') and not _get('ssh.proxy_command'):\n add('ssh.proxy_command', 'ssh -W %h:%p ${bastion.address}')\n\n iam_group_specific_settings = get('iam.group')\n if iam_group_specific_settings:\n from . import iam\n for group in iam.groups():\n if group in iam_group_specific_settings:\n update(iam_group_specific_settings[group])" ]
[ "0.6379476", "0.59809893", "0.58267564", "0.5661063", "0.56127495", "0.5603886", "0.5571648", "0.55102366", "0.5353344", "0.5339407", "0.53231657", "0.5272784", "0.5235252", "0.5182474", "0.5097241", "0.5064218", "0.5057405", "0.504696", "0.5042437", "0.5037706", "0.49992388", "0.49555063", "0.4928204", "0.49271446", "0.49230534", "0.49207217", "0.49136704", "0.4906005", "0.49029723", "0.49004737" ]
0.7157677
0
Validate the public key if it is related to the given EC curve and formats the public key to a uncompressed byte string. Afterwards the function create a hash value of the uncompressed public key value
def get_public_key_fingerprint(curve: object, temp_public_key: object) \ -> object: vk = VerifyingKey.from_string(bytes.fromhex(temp_public_key), curve=curve) uncompressed_pub_key = vk.to_string('uncompressed') pub_key_hash_fingerprint = hashlib.sha256(uncompressed_pub_key) return pub_key_hash_fingerprint.hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrecognized key type: #{prefix}')", "def derive_public_key(private_key):\r\n\r\n Q = int.from_bytes(private_key, byteorder='big') * BIP32_CURVE.generator\r\n xstr = Q.x().to_bytes(32, byteorder='big')\r\n parity = Q.y() & 1\r\n return (2 + parity).to_bytes(1, byteorder='big') + xstr", "def generate_ecc_public_key(private_key: EllipticCurvePrivateKeyWithSerialization) -> EllipticCurvePublicKey:\n return private_key.public_key()", "def parse_public_key(data: bytes) -> str:\n key_prefix = {\n b'\\x00': b'edpk',\n b'\\x01': b'sppk',\n b'\\x02': b'p2pk'\n }\n return base58_encode(data[1:], key_prefix[data[:1]]).decode()", "def get_public_compressed_curve_point(private_key):\n encoded_point = private_key.public_key().public_numbers().encode_point()\n return base64.b64encode(encoded_point)", "def convert_public_key_to_ecdsa(self, public_key):\n return PublicKey.fromPem('\\n-----BEGIN PUBLIC KEY-----\\n'+public_key+'\\n-----END PUBLIC KEY-----\\n')", "def PublicKey(self) -> _n_9_t_1:", "def PublicKey(self) -> _n_9_t_1:", "def test_public_key_ec(self):\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBiTCCAS+gAwIBAgIJAINtiwRC4eBJMAoGCCqGSM49BAMCMCExDzANBgNVBAMM\nBkVDIDI1NjEOMAwGA1UECgwFV2ViQ0EwHhcNMTgwNTI3MTAyNTIyWhcNMTgwNjI2\nMTAyNTIyWjAhMQ8wDQYDVQQDDAZFQyAyNTYxDjAMBgNVBAoMBVdlYkNBMFkwEwYH\nKoZIzj0CAQYIKoZIzj0DAQcDQgAEIg6eBOPv5M2z4ANtsJukbimKWX04lanEdALs\nbu2xNCDBXJ0IJ4Sd3u4G1qvrKX0mBHd7yUPGui+7bvp084mNaqNQME4wHQYDVR0O\nBBYEFEmE51rEUz4TuD8oEAw2lvMfvi6LMB8GA1UdIwQYMBaAFEmE51rEUz4TuD8o\nEAw2lvMfvi6LMAwGA1UdEwQFMAMBAf8wCgYIKoZIzj0EAwIDSAAwRQIgfiKDoHB3\nWzRO1juSMyVBuBw2p1o0ab+3fBNDvff8PXcCIQCUKIyzTnM7Wz6TkABfqOcmx7n4\nsbRvdOg3CepLGW3Ytw==\n-----END CERTIFICATE-----\"\"\"\n x509 = crypto.load_certificate(PEM, cert)\n self.assertEqual(utils.public_key_type(x509), c.KEY_EC)", "def generate_hash(self):\n if not self.public_key:\n raise ValueError('Requires a public publicKey')\n return self.public_key.encode(encoding='bytes')", "def import_public_key(self, hex_bytes: str) -> str:\n return self.context.post(\n \"/dsum/public_key\", {\"key\": hex_bytes}, None, \"DSum: failed importing a Curve 25519 public key\")['uid']", "def public_key_to_address(public_key):\n\toutput = []\n\talphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n\tvar = hashlib.new('ripemd160')\n\tencoding = binascii.unhexlify(public_key.encode())\n\tvar.update(hashlib.sha256(encoding).digest())\n\tvar_encoded = ('00' + var.hexdigest()).encode()\n\tdigest = hashlib.sha256(binascii.unhexlify(var_encoded)).digest()\n\tvar_hex = '00' + var.hexdigest() + hashlib.sha256(digest).hexdigest()[0:8]\n\tcount = [char != '0' for char in var_hex].index(True) // 2\n\tn = int(var_hex, 16)\n\twhile n > 0:\n\t\tn, remainder = divmod(n, 58)\n\t\toutput.append(alphabet[remainder])\n\tfor i in range(count):\n\t\toutput.append(alphabet[0])\n\treturn ''.join(output[::-1])", "def _ecdsa_key(self,private_key):\n numbers = private_key.private_numbers()\n content = WriteMessage()\n\n public_key = private_key.public_key()\n serialized = public_key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n\n # The SSH agent format somehow combines the elliptic curve's\n # `x` and `y` values (in `numbers.public_numbers`) into a single\n # `Q` value. I couldn't figure the specifics out exactly, but\n # the format is used exactly the same way int the OpenSSH\n # public key format, so we'll just reuse that one instead.\n\n pk_data = b64decode(serialized.split(None,2)[1])\n content.data.extend(pk_data)\n\n # nist = self._ecdsa_nists[private_key.curve.name]\n # content.write_string('ecdsa-sha2-{}'.format(nist))\n # content.write_string(nist)\n #\n # buffer = bytearray()\n # buffer.extend(b'0x04')\n #\n # x = numbers.public_numbers.x\n # y = numbers.public_numbers.y\n # for number in [x,y]:\n # tmp = WriteMessage()\n # tmp.write_mpint(number)\n # buffer.extend(tmp.data[4:])\n\n content.write_mpint(numbers.private_value)\n return content.data", "def get_key_id(self):\n jwk_data = {\n \"crv\": \"P-256\",\n \"kty\": \"EC\",\n \"x\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().x.to_bytes(32, \"big\")).decode().replace(\"=\", \"\"),\n \"y\": base64.urlsafe_b64encode(self.public_key_obj.public_numbers().y.to_bytes(32, \"big\")).decode().replace(\"=\", \"\")\n }\n jwk = json.dumps(jwk_data, separators=(',', ':'))\n return hashlib.sha256(jwk.encode()).digest()", "def q_hashpubkey(abe, page, chain):\n pubkey = wsgiref.util.shift_path_info(page['env'])\n if pubkey is None:\n return \\\n \"Returns the 160-bit hash of PUBKEY.\\n\" \\\n \"For example, the Bitcoin genesis block's output public key,\" \\\n \" seen in its transaction output scriptPubKey, starts with\\n\" \\\n \"04678afdb0fe..., and its hash is\" \\\n \" 62E907B15CBF27D5425399EBF6F0FB50EBB88F18, corresponding\" \\\n \" to address 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa.\\n\" \\\n \"/q/hashpubkey/PUBKEY\\n\"\n try:\n pubkey = pubkey.decode('hex')\n except Exception:\n return 'ERROR: invalid hexadecimal byte string.'\n return util.pubkey_to_hash(pubkey).encode('hex').upper()", "def validate_handshake_public_key(cls, public_key: bytes) -> None:\n ...", "def encode_public_key(value: PublicKey) -> bytes:\n return bytes([value.algo.value]) + value.pbk", "def h160_from_pubkey(Q: Point, compressed: bool) -> bytes:\n\n # also check that the Point is on curve\n pubkey = octets_from_point(ec, Q, compressed)\n return h160(pubkey)", "def from_public_parts(self, x: bytes, y: bytes):\n return asymmetric.ec.EllipticCurvePublicNumbers(\n int.from_bytes(x, 'big'),\n int.from_bytes(y, 'big'),\n asymmetric.ec.SECP256R1()\n ).public_key()", "def calculate_key_signature(public_key: str) -> str:\n rsa_obj = RSA.import_key(public_key)\n rsa_der = rsa_obj.export_key(\"DER\")\n\n hasher = SHA1.new()\n hasher.update(rsa_der)\n fingerprint = base64url_encode(hasher.digest())\n\n return fingerprint.decode(\"utf8\")", "def public_key_to_address(public_key: PublicKey) -> Address:\n key_bytes = public_key.format(compressed=False)\n return Address(keccak(key_bytes[1:])[-20:])", "def save_ecc_public_key(ec_public_key: EllipticCurvePublicKey, file_path: str,\n encoding: Encoding = Encoding.PEM) -> None:\n pem_data = ec_public_key.public_bytes(encoding=encoding, format=serialization.PublicFormat.SubjectPublicKeyInfo)\n with open(file_path, 'wb') as f:\n f.write(pem_data)", "def publickey_unsafe(sk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n A = scalarmult_B(a)\n return encodepoint(A)", "def get_public_key(self, uid: str) -> str:\n return self.context.get(\n \"/dsum/public_key/%s\" % uid, None, \"DSum: failed retrieving the Curve 25519 private key with uid: %s\" % uid)['key']", "def public_key(self):", "def private_key_to_public_key(private_key):\n\tpk = PrivateKey().fromString(bytes.fromhex(private_key))\n\treturn '04' + pk.publicKey().toString().hex().upper()", "def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')", "def __init__(self, pubkey, e=65537):\n if isinstance(pubkey, int):\n self.key = RSA.RsaKey(n=pubkey, e=e)\n\n else:\n if not isinstance(pubkey, str):\n raise ValueError('pubkey must be str or int.')\n\n if '----' in pubkey:\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)\n else:\n if pubkey == pubkey.lower():\n pubkey = int(pubkey, 16)\n self.key = RSA.RsaKey(n=pubkey, e=e)\n else:\n pubkey = '-----BEGIN PUBLIC KEY-----\\n' + pubkey + '\\n-----END PUBLIC KEY-----'\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)", "def public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|publickey'\"\n )\n\n data_dec = ctx.data\n if ctx.ref_encoding == \"base64\":\n data_dec = base64.b64decode(data_dec).decode()\n\n private_key = serialization.load_pem_private_key(\n data_dec.encode(), password=None, backend=default_backend()\n )\n public_key = private_key.public_key()\n\n ctx.data = str(\n public_key.public_bytes(\n encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo\n ),\n \"UTF-8\",\n )", "def rawPubkey(self):\n # note the first byte determines what type of address\n # and the last four are checksums\n return a2b_base58(self.pubkey)[1:-4]" ]
[ "0.70852506", "0.69108117", "0.6854899", "0.68222594", "0.6751205", "0.65805644", "0.65697986", "0.65697986", "0.64765847", "0.6456603", "0.6373267", "0.6364895", "0.6352023", "0.63497204", "0.6304584", "0.6302101", "0.6292437", "0.620116", "0.6189922", "0.6148473", "0.61129415", "0.60908484", "0.6088895", "0.6088762", "0.60707957", "0.60512316", "0.6021014", "0.600546", "0.60008925", "0.5997902" ]
0.70131516
1
Save model hyperparameters/metadata to output directory. model_options is an argparse Namespace, and is converted to a dictionary and pickled.
def save_model_options(output_dir, model_options, predictor='classify'): if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data output_file = construct_filename(output_dir, 'model_options', '.pkl', training_data, predictor, model_options.model, s=model_options.seed) with open(output_file, 'wb') as f: pkl.dump(vars(model_options), f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(output_dir, model, gene, model_options, predictor='classify'):\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n output_file = construct_filename(output_dir,\n 'model',\n '.pkl',\n gene,\n training_data,\n model_options.model,\n predictor,\n s=model_options.seed)\n\n with open(output_file, 'wb') as f:\n pkl.dump(model, f)", "def save_model(self, output_model: ModelEntity):\n logger.info(\"called save_model\")\n buffer = io.BytesIO()\n hyperparams_str = ids_to_strings(cfg_helper.convert(self._hyperparams, dict, enum_to_str=True))\n labels = {label.name: label.color.rgb_tuple for label in self._labels}\n model_ckpt = torch.load(self._model_ckpt)\n modelinfo = {\n \"model\": model_ckpt,\n \"config\": hyperparams_str,\n \"labels\": labels,\n \"VERSION\": 1,\n }\n\n torch.save(modelinfo, buffer)\n output_model.set_data(\"weights.pth\", buffer.getvalue())\n output_model.set_data(\n \"label_schema.json\",\n label_schema_to_bytes(self._task_environment.label_schema),\n )\n output_model.precision = self._precision", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def save_model(model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(self, model, model_filepath):\n joblib.dump(model, model_filepath)", "def save_model(model, model_filepath):", "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))", "def save_model(model, filename):\n with open(filename, 'wb') as f:\n joblib.dump(model, f)", "def save_model(model, model_filepath):\n dump(model, model_filepath)", "def save(self, path=None):\n path = self.opt.get('model_file', None) if path is None else path\n\n if path and hasattr(self, 'model'):\n model = {'model': self.model.state_dict(),\n 'longest_label': self.model.longest_label,\n 'optimizer': self.optimizer.state_dict(),\n 'optimizer_type': self.opt['optimizer']}\n\n with open(path, 'wb') as write:\n torch.save(model, write)\n\n # save opt file as json\n with open(path + \".opt\", 'wb') as handle:\n pickle.dump(self.opt, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def save_model(model, model_filepath):\n\n outfile = open('model_filepath','wb')\n pickle.dump(model, outfile)\n outfile.close()", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def save_model(model, model_filepath):\n\n logging.info(\"run save_model\")\n\n # save model with jolib library\n joblib.dump(model, model_filepath)", "def save_model(self, output_path):\n joblib.dump(self.dtr, output_path)", "def save_model(model, model_filepath): \n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(self, model):\n # serialize model to JSON\n model_json = model.to_json()\n os.makedirs(os.path.dirname(self.model_json_path), exist_ok=True)\n with open(self.model_json_path, \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n model.save_weights(self.model_weights_path)\n print(\"Saved model to disk\")", "def save_model(model, model_path):\n pickle.dump(model.best_estimator_,open(model_path,'wb'))", "def save(self, model_name = 'mr-senti'):\n\n\t\tjoblib.dump(self.classifier, os.path.join('model', model_name + '.pkl'))", "def export_model(self, output_model_dir):\n logger.info(\"Exporting model to directory : {}\".format(output_model_dir))\n self.model.export(output_model_dir=output_model_dir)", "def save(self, model_name):\n\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Save the Keras models\n if self.mol_to_latent_model is not None:\n self.mol_to_latent_model.save(dirpath + \"/mol_to_latent_model.h5\")\n\n self.latent_to_states_model.save(dirpath + \"/latent_to_states_model.h5\")\n self.batch_model.save(dirpath + \"/batch_model.h5\")\n\n # Exclude unpicklable and unwanted attributes\n excl_attr = [\n \"_DDC__mode\",\n \"_DDC__train_gen\",\n \"_DDC__valid_gen\",\n \"_DDC__mol_to_latent_model\",\n \"_DDC__latent_to_states_model\",\n \"_DDC__batch_model\",\n \"_DDC__sample_model\",\n \"_DDC__multi_sample_model\",\n \"_DDC__model\",\n ]\n\n # Cannot deepcopy self.__dict__ because of Keras' thread lock so this is\n # bypassed by popping and re-inserting the unpicklable attributes\n to_add = {}\n # Remove unpicklable attributes\n for attr in excl_attr:\n to_add[attr] = self.__dict__.pop(attr, None)\n\n # Pickle metadata, i.e. almost everything but the Keras models and generators\n pickle.dump(self.__dict__, open(dirpath + \"/metadata.pickle\", \"wb\"))\n\n # Zip directory with its contents\n shutil.make_archive(model_name, \"zip\", dirpath)\n\n # Finally, re-load the popped elements for the model to be usable\n for attr in excl_attr:\n self.__dict__[attr] = to_add[attr]\n\n print(\"Model saved.\")", "def save_model(self, suffix: str = '', unwrap_parallel: bool = True) -> None:\n # TODO: Logging\n model = self.model\n # We do this awkard check because there are too many different\n # parallel wrappers in PyTorch and some of them have changed names\n # in different releases (DataParallel, DistributedDataParallel{,CPU}).\n is_wrapped = (\n hasattr(model, 'module') and\n 'parallel' in str(type(model)).lower() and\n isinstance(model.module, torch.nn.Module)\n )\n if is_wrapped and unwrap_parallel:\n # If a parallel wrapper was used, the only thing we should save\n # is the model.module, which contains the actual model and params.\n # If we saved the wrapped module directly, deserialization would\n # get unnecessarily difficult.\n model = model.module\n\n state_dict_path = os.path.join(self.save_path, f'state_dict{suffix}.pth')\n model_path = os.path.join(self.save_path, f'model{suffix}.pt')\n\n torch.save(model.state_dict(), state_dict_path)\n torch.save(model, model_path)", "def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, \"wb\"))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, \"wb\"))", "def saveModel(model, outfile, train_opts, model_opts, view_names=None, sample_names=None, feature_names=None):\n\n # QC checks\n assert model.trained == True, \"Model is not trained yet\"\n assert len(np.unique(view_names)) == len(view_names), 'View names must be unique'\n assert len(np.unique(sample_names)) == len(sample_names), 'Sample names must be unique'\n\n # Create output directory\n if not os.path.isdir(os.path.dirname(outfile)):\n print(\"Output directory does not exist, creating it...\")\n os.makedirs(os.path.dirname(outfile))\n\n # For some reason h5py orders the datasets alphabetically, so we have to sort the likelihoods accordingly\n idx = sorted(range(len(view_names)), key=lambda k: view_names[k])\n tmp = [model_opts[\"likelihood\"][idx[m]] for m in range(len(model_opts[\"likelihood\"]))]\n model_opts[\"likelihood\"] = tmp\n\n # Open HDF5 handler\n hdf5 = h5py.File(outfile,'w')\n\n # Save expectations\n saveExpectations(model,hdf5,view_names)\n\n # Save parameters\n # saveParameters(model,hdf5,view_names)\n\n # Save training statistics\n saveTrainingStats(model,hdf5)\n\n # Save training options\n saveTrainingOpts(train_opts,hdf5)\n\n # Save model options\n saveModelOpts(model_opts,hdf5)\n\n # Save training data\n saveTrainingData(model, hdf5, view_names, sample_names, feature_names, model_opts[\"likelihood\"])\n\n # Close HDF5 file\n hdf5.close()", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))", "def save_model(model, model_filepath):\n pickle.dump(model, open(model_filepath, 'wb'))" ]
[ "0.7630544", "0.7089318", "0.7027596", "0.6957321", "0.6957321", "0.6957321", "0.6866317", "0.68396884", "0.6824873", "0.6792376", "0.6774526", "0.66822237", "0.6664956", "0.6664538", "0.66604686", "0.6630448", "0.66119593", "0.6600588", "0.6593003", "0.65876275", "0.6582033", "0.6567448", "0.6563555", "0.6555261", "0.65513647", "0.65513647", "0.654198", "0.6537048", "0.6537048", "0.6537048" ]
0.7801818
0
Charge given price to the card, assuming sufficient card limit Return True if charge was processed;False if charge was denied
def charge(self,price): if price + self._balance> self._limit: return False else: self._balance+=price return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError('Price must be numeric')\n if price + self._balance > self._limit: # if charge would exceed limit\n return False # cannot accept charge\n self._balance += price\n return True", "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError()\n \n if self._balance + price <= self._limit:\n self._balance += price\n return True\n else: return False", "def charge(self, price):\n '''try:\n type(price) == int or type(price) == float\n except ValueError: \n print 'Not a number!'\n \n if type(price) != int or type(price) != float:\n raise ValueError(\"Not a number!\")\n '''\n if price < 0:\n return False\n elif price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True", "def price_check(cash, price, shares):\n affordable = (cash - (price * shares)) > 0\n\n if affordable:\n return affordable\n\n else:\n return False", "def make_payment(self, cost):\n self.process_coins()\n if self.money_received >= cost:\n change = round(self.money_received - cost, 2)\n print(f\"Here is {self.CURRENCY}{change} in change.\")\n self.profit += cost\n self.money_received = 0\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n self.money_received = 0\n return False", "def charge(self, other):\n if self.flag:\n self.credit += other\n return \"{} Tomans has been added to your card credit and now the credit of your card is {} Tomans\".format(\n other, self.credit)\n else:\n return \"Sorry, your card has expired.\"", "def check_limit(self):\n self.ensure_one()\n partner = self.partner_id\n moveline_obj = self.env['account.move.line']\n movelines = moveline_obj.\\\n search([('partner_id', '=', partner.id),\n ('account_id.user_type_id.type', 'in',\n ['receivable', 'payable']),\n ('full_reconcile_id', '=', False)])\n\n debit, credit = 0.0, 0.0\n today_dt = datetime.strftime(datetime.now().date(), DF)\n for line in movelines:\n if line.date_maturity < today_dt:\n credit += line.debit\n debit += line.credit\n\n if (credit - debit + self.amount_total) > partner.credit_limit:\n # Consider partners who are under a company.\n if partner.over_credit or (partner.parent_id and partner.parent_id.over_credit):\n partner.write({\n 'credit_limit': credit - debit + self.amount_total})\n return True\n else:\n msg = '%s Can not confirm Sale Order,Total mature due Amount ' \\\n '%s as on %s !\\nCheck Partner Accounts or Credit ' \\\n 'Limits !' % (partner.over_credit,credit - debit, today_dt)\n raise UserError(_('Credit Over Limits !\\n' + msg))\n else:\n return True", "def process_payment(money_received, drink_cost):\n if money_received >= drink_cost:\n change = round(money_received - drink_cost, 2)\n print(f\"Here is ${change} in change.\")\n global profit\n profit += drink_cost\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False", "def pay(self, amount):\n if amount > self.balance:\n print(f\"Not enough balance! Only ${self.balance} left.\")\n return False\n self.balance -= amount\n return True", "def use(self):\n if self.price_of_trip == 0:\n print(\"Sorry your card has been used\")\n else:\n self.price_of_trip -= self.price_of_trip\n print(\"Done\")", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False", "def check_price(self, price_diff):\n chance = exp(price_diff / self.T)\n\n if price_diff < 0 and not chance > random():\n return True\n \n return False", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def use(self):\n if self.flag:\n if self.credit < self.price_of_trip:\n return \"Your credit is not enough, please increase your credit\"\n else:\n self.credit -= self.price_of_trip\n return \"Done\"\n else:\n return \"Sorry, your card has expired.\"", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def charge_customer(customer, amount):\n stripe.api_key = Config.STRIPE_SECRET_KEY\n\n if not customer.cards:\n return False # This situation is impossible, but anyway\n try:\n charge = stripe.Charge.create(\n amount=int(amount * 100),\n currency='AUD',\n customer=customer.stripe_customer_id,\n source=customer.cards[-1].stripe_card_id,\n description='Payment for donations.'\n )\n except Exception as e:\n print(e.args[0])\n return False\n\n if charge.status == 'succeeded':\n return True\n return False", "def recharge(self, amount):\n self.action.recharge(self.cardUid, amount)\n self.start()", "def can_accept_credit(self, value):\n return value >= 0", "def payment_approval(self, house_cost: (int, float)):\n if self.money_available >= house_cost: # Person has enough available money to make a deal with Realtor\n self.money_available -= house_cost\n print(f'Payment from {self.name} was approved')\n return True\n print(f'{self.name} doesn\\'t have enough money to buy this house')\n return False", "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "def check_risk(self, action, amount=None):\n if amount is None:\n # amount not specified, so determines max amount to trade\n if action == 'buy':\n amount = int((self.upper_bound-self.owned_value)/self.price) # A unit is 1 dollar here? TODO:\n elif action == 'sell':\n amount = int((self.lower_bound-self.owned_value)/self.price)\n else:\n raise ValueError(f\"action should be buy or sell, got {action}\")\n if action == 'buy':\n if self.owned_value + amount <= self.upper_bound:\n # Allowed to buy up to upper bound\n return True, amount\n else:\n # Trying to buy too much\n print(\"Trade not allowed, attempting to increase total amount to more than upper bound.\")\n return False, amount\n elif action == 'sell':\n if self.owned_value + amount >= self.lower_bound:\n # Allowed to buy down to lower_bound\n return True, amount\n else:\n print(\"Trade not allowed, attempting to increase debt to more than lower bound.\")\n return False, amount", "def calc_price(self):\n price = self.price\n action = self.action\n mortage = 5 # here set mortage multiplier \n\n if action == 'RESIDENTIAL_SALE':\n return price * 12 * mortage\n\n\n if price >= 10000:\n return price * 0.7\n elif price < 10000 & price >= 5000:\n return price * 0.55\n elif price < 5000 & price >= 2800:\n return price * 0.475\n else:\n return price * 0.4", "def check_price():\n global NUMBER_OF_TOTAL_COINS, BEVERAGE_PRICE\n\n if NUMBER_OF_TOTAL_COINS == BEVERAGE_PRICE:\n return True\n elif NUMBER_OF_TOTAL_COINS < BEVERAGE_PRICE:\n return False\n else:\n return \"FATAL\"", "def price(self, value):\n self.price_ = max(value, 0)\n\n if self.price_ == 0:\n self.mark_as_paid()", "def cc_charge(self, battery_instance=None, inverter_instance=None, start_timestamp=None, timeout_seconds = 0):\n #TODO\n #1. Place inverter in charge mode.\n #2. Every 2 seconds check for battery data and timeout.\n inverter_instance.charge()\n log_test_case.info('Issued charge mode to inverter on port %s.', inverter_instance.com_port)\n while (time.time()-start_timestamp)>timeout_seconds:\n if battery_instance.pack_variables['is_not_safe_level_1']:\n log_test_case.info('Reached level 1 limits during charging on battery on port: %s.', battery_instance.com_port)\n break\n \n time.sleep(2)\n \n inverter_instance.rest()\n battery_instance.clear_level_1_error_flag()\n log_test_case.info('CC charge mode on inverter on port %s finished.', inverter_instance.com_port)\n return True", "def charge_credit_card(amount,save_to_cim=False):\n\n # Create a merchantAuthenticationType object with authentication details\n # retrieved from the constants file\n merchantAuth = apicontractsv1.merchantAuthenticationType()\n merchantAuth.name = CONSTANTS.apiLoginId\n merchantAuth.transactionKey = CONSTANTS.transactionKey\n\n\n # Create the payment data for a credit card\n creditCard = apicontractsv1.creditCardType()\n card_types = ['visa','discover','mastercard','jcb']\n creditCard.cardNumber = fake.credit_card_number(card_type=random.choice(card_types))\n creditCard.expirationDate = fake.credit_card_expire()\n creditCard.cardCode = fake.credit_card_security_code()\n\n # Add the payment data to a paymentType object\n payment = apicontractsv1.paymentType()\n payment.creditCard = creditCard\n\n # Create order information\n order = apicontractsv1.orderType()\n order.invoiceNumber = str(random.randint(1000,3000))\n order.description = fake.bs()\n\n # Set the customer's Bill To address\n customerAddress = apicontractsv1.customerAddressType()\n customerAddress.firstName = fake.first_name()\n customerAddress.lastName = fake.last_name()\n customerAddress.company = fake.bs()\n customerAddress.address = fake.street_address()\n customerAddress.city = fake.city()\n customerAddress.state = fake.address().split()[-1].split()[0]\n customerAddress.zip = fake.postalcode_in_state()\n customerAddress.country = fake.country()\n customerAddress.phoneNumber = fake.phone_number()\n\n\n # Set the customer's identifying information\n customerData = apicontractsv1.customerDataType()\n customerData.type = \"individual\"\n customerData.id = fake.upc_e()\n customerData.email = fake.email()\n\n # Add values for transaction settings\n duplicateWindowSetting = apicontractsv1.settingType()\n duplicateWindowSetting.settingName = \"duplicateWindow\"\n duplicateWindowSetting.settingValue = \"600\"\n settings = apicontractsv1.ArrayOfSetting()\n settings.setting.append(duplicateWindowSetting)\n\n # setup individual line items\n random_num = random.randint(2000,5000)\n line_item_1 = apicontractsv1.lineItemType()\n line_item_1.itemId = str(random.randint(1,9))\n line_item_1.name = \"first\"\n line_item_1.description = fake.catch_phrase()\n line_item_1.quantity = str(random.randint(1,9))\n line_item_1.unitPrice = \"12.95\"\n line_item_2 = apicontractsv1.lineItemType()\n line_item_2.itemId = str(random.randint(1,9))\n line_item_2.name = \"second\"\n line_item_2.description = fake.catch_phrase()\n line_item_2.quantity = str(random.randint(1,9))\n line_item_2.unitPrice = \"7.95\"\n line_item_3 = apicontractsv1.lineItemType()\n line_item_3.itemId = str(random.randint(1,9))\n line_item_3.name = \"third\"\n line_item_3.description = fake.catch_phrase()\n line_item_3.quantity = str(random.randint(1,9))\n line_item_3.unitPrice = \"100.00\"\n\n\n # build the array of line items\n line_items = apicontractsv1.ArrayOfLineItem()\n line_items.lineItem.append(line_item_1)\n line_items.lineItem.append(line_item_2)\n line_items.lineItem.append(line_item_3)\n\n # Create a transactionRequestType object and add the previous objects to it.\n transactionrequest = apicontractsv1.transactionRequestType()\n transactionrequest.transactionType = \"authCaptureTransaction\"\n transactionrequest.amount = amount\n transactionrequest.payment = payment\n transactionrequest.order = order\n transactionrequest.billTo = customerAddress\n transactionrequest.customer = customerData\n transactionrequest.transactionSettings = settings\n transactionrequest.lineItems = line_items\n\n # Assemble the complete transaction request\n createtransactionrequest = apicontractsv1.createTransactionRequest()\n createtransactionrequest.merchantAuthentication = merchantAuth\n createtransactionrequest.refId = \"1234-3432\"\n createtransactionrequest.transactionRequest = transactionrequest\n # Create the controller\n createtransactioncontroller = createTransactionController(\n createtransactionrequest)\n createtransactioncontroller.execute()\n\n response = createtransactioncontroller.getresponse()\n\n if response is not None:\n # Check to see if the API request was successfully received and acted upon\n if response.messages.resultCode == \"Ok\":\n # Since the API request was successful, look for a transaction response\n # and parse it to display the results of authorizing the card\n if hasattr(response.transactionResponse, 'messages') is True:\n print(\n 'Successfully created transaction with Transaction ID: %s'\n % response.transactionResponse.transId)\n if save_to_cim:\n # create CIM profile\n cim_create.append(response.transactionResponse.transId)\n create_customer_profile_from_transaction(str(cim_create[0]))\n print('Transaction Response Code: %s' %\n response.transactionResponse.responseCode)\n print('Message Code: %s' %\n response.transactionResponse.messages.message[0].code)\n print('Description: %s' % response.transactionResponse.\n messages.message[0].description)\n else:\n print('Failed Transaction.')\n if hasattr(response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(response.transactionResponse.\n errors.error[0].errorCode))\n print(\n 'Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n # Or, print errors if the API request wasn't successful\n else:\n print('Failed Transaction.')\n if hasattr(response, 'transactionResponse') is True and hasattr(\n response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(\n response.transactionResponse.errors.error[0].errorCode))\n print('Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n else:\n print('Error Code: %s' %\n response.messages.message[0]['code'].text)\n print('Error message: %s' %\n response.messages.message[0]['text'].text)\n else:\n print('Null Response.')\n\n return response", "def amount_to_charge(opportunity):\n amount = float(opportunity.amount)\n if opportunity.agreed_to_pay_fees:\n total = (amount + 0.30) / (1 - 0.022)\n else:\n total = amount\n return quantize(total)", "def limit_chase(self, oq, max_chase=3.0, failsafe=False, double_check=False):\n ret = self.send_order(oq=oq, ot='limit', price=None)\n order_id = ret[0]['orderID']\n last_price = ret[0]['price']\n side = ret[0]['side']\n max_chase_buy = float(last_price) + float(max_chase)\n max_chase_sell = float(last_price) - float(max_chase)\n avg = last_price\n time.sleep(1)\n self.logger.info(\n f'Chasing {side} order {order_id}, order_price: {avg}, last_price: {last_price}, '\n f'current price: {last_price} max chase: {max_chase_buy}')\n count = 0\n while True:\n count += 1\n o = self.ws_orders(order_id)\n if o:\n if side == 'Buy':\n if double_check:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n _price = quote['buy']\n else:\n _price = self.ws.get_ticker()['buy']\n if float(_price) <= float(max_chase_buy):\n if float(last_price) < float(_price):\n self.logger.info(f'Amending order {order_id} to price {_price}')\n ret = self.client.Order.Order_amend(orderID=order_id, price=_price).result()\n self.logger.info(ret)\n last_price = _price\n else:\n self.logger.debug(f'Sleeping, order_price: {last_price}, current price: {_price}')\n if double_check:\n time.sleep(0.5)\n\n else:\n if failsafe:\n self.logger.info(f'Order {order_id} exceeded max chase. Placing a market order.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n self.send_order(oq, 'market', text='OrderChase Market Failsafe')\n else:\n self.logger.info(f'Price {_price} exceeded max chase {max_chase_buy}, giving up.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n break\n elif side == 'Sell':\n if double_check:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n _price = quote['sell']\n else:\n _price = self.ws.get_ticker()['sell']\n if float(_price) >= float(max_chase_sell):\n if float(last_price) > float(_price):\n self.logger.info(f'Amending order {order_id} to price {_price} ')\n ret = self.client.Order.Order_amend(orderID=order_id, price=_price).result()\n self.logger.info(ret)\n last_price = _price\n else:\n self.logger.debug(f'Sleeping, order_price: {last_price}, current price: {_price}')\n if double_check:\n time.sleep(0.5)\n\n else:\n if failsafe:\n self.logger.info(f'Order {order_id} exceeded max chase. Placing a market order.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n self.send_order(oq, 'market', text='OrderChase Market Failsafe')\n else:\n self.logger.info(f'Price {_price} exceeded max chase {max_chase_buy}, giving up.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n break\n else:\n time.sleep(0.5)\n if o:\n self.logger.info(f'{side} Order manually Canceled!')\n self.logger.info('Order Filled')\n break", "def charge_charity_plan(charity):\n stripe.api_key = Config.STRIPE_SECRET_KEY_FOR_PLAN\n price = charity.plan.price\n cards = charity.cards\n if price > 0:\n if len(cards) > 0:\n try:\n payment = stripe.Charge.create(\n amount=int(price * 100),\n currency='AUD',\n # source=charity.stripe_custom_account_id,\n customer=charity.stripe_customer_id,\n source=cards[-1].stripe_card_id,\n description=f'Payment for {charity.plan.name} plan.'\n )\n except stripe.error.StripeError as e:\n current_app.logger.error(f\"charge_charity_plan. Stripe error: {e.json_body['error']['message']}\")\n return False\n if payment.status == 'succeeded':\n charity.last_plan_payment = datetime.now()\n db.session.commit()\n return True\n return False\n charity.last_plan_payment = datetime.now()\n db.session.commit()\n return True" ]
[ "0.8322736", "0.8296711", "0.7829243", "0.6570418", "0.6408199", "0.612218", "0.61032504", "0.60590565", "0.6037275", "0.60188115", "0.5950383", "0.59281176", "0.59213865", "0.5874455", "0.58660394", "0.5860026", "0.5825693", "0.5783239", "0.57501924", "0.5738126", "0.57249475", "0.5714982", "0.5703037", "0.56896293", "0.56810635", "0.5623873", "0.56220025", "0.5594615", "0.5582282", "0.5580885" ]
0.8432956
0
Process customer payment that reduces balance
def make_payment(self,amount): self._balance-=amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_payment(self, payment):\n self._balance -= payment", "def make_payment(self, amount):\n if not isinstance(amount, (int, float)):\n raise TypeError('Amount must be numeric')\n self._balance -= amount", "def make_payment(self, amount):\n if not isinstance(amount, (int, float)):\n raise TypeError()\n if amount < 0: raise ValueError()\n self._balance -= amount", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def process_payment(money_received, drink_cost):\n if money_received >= drink_cost:\n change = round(money_received - drink_cost, 2)\n print(f\"Here is ${change} in change.\")\n global profit\n profit += drink_cost\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False", "def deposit(account, amount):\n pass", "def withdraw(self, amount):\n self.balance -= amount\n if self.balance < 10:\n self.balance -= 5\n self.fees += 5", "def OperateAccount(self, user_id, amount_money):\n user_data = self.db_manager.GetData(user_id)\n user_data = self._parsetouserDTO(user_data)\n old_balance = user_data.GetAmountMoney()\n new_balance = int(old_balance) + int(amount_money)\n if new_balance >= 0:\n user_data.SetAmountMoney(new_balance)\n self.db_manager.UpdateData(user_id, user_data.GetAmountMoney())\n return JsonSerializer.SerializeObject(user_data)\n else:\n return \"{\\\"ERROR\\\":\\\"Operation denied insufficient money\\\"}\"", "def cash_deposit(name, bank_id, password):\n amount = int(raw_input(\"Enter Amount to Deposit:\"))\n for i in range(0, len(MY_MEMBER)):\n if MY_MEMBER[i].Name == name and \\\n MY_MEMBER[i].Password == password and \\\n MY_MEMBER[i].BankID == bank_id:\n old_balance = MY_MEMBER[i].balance\n MY_MEMBER[i].balance += amount\n new_balance = MY_MEMBER[i].balance\n print\"*************************\"\n print\"****Depositing Cash******\"\n print\"your Old Bank balance: %r\" % old_balance\n print\"Amount Deposited: %r\" % amount\n print\"your New Bank balance: %r\" % new_balance\n print\"*************************\"\n what_to_do(name, bank_id, password)", "def deposit(self, deposit_money):\r\n self.balance += deposit_money", "def process_month(self):\n if self.balance > 0:\n # if positive balance, convert APR to monthly multiplicative factor\n monthly_factor = pow(1 + self.apr, 1 / 12)\n self.balance *= monthly_factor", "def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount", "def __init__(self, customer, bank, account, limit, bank_bal = 0):\n\n self._customer = customer\n self._bank = bank\n self._account = account\n self._limit = limit\n self._balance = bank_bal # To store customer spendings.", "def deposit(self, amount):\n self.balance += amount", "def deposit(self, amount):\n self.balance += amount", "def handle_balance_update(self, form):\n\n # Update balances of old and new accounts\n account_object: Account = form.cleaned_data.get('account', None)\n if account_object:\n if account_object == self.data_previous_account:\n \"\"\"\n Case 1: New account is same as previous account\n \"\"\"\n # Find difference between new and old balances, and deduct the difference from account\n balance_diff = form.cleaned_data.get('amount', None) - self.data_previous_amount\n account_object.balance -= balance_diff\n account_object.save()\n else:\n \"\"\"\n Case 2: New account is not the same as previous account\n \"\"\"\n # Add old amount to the previous account\n self.data_previous_account.balance += self.data_previous_amount\n self.data_previous_account.save()\n\n # Remove new amount from new account\n account_object.balance -= self.object.amount\n account_object.save()\n elif self.data_previous_account:\n \"\"\"\n Case 3:\n Previous account exists but was removed from expense; \n no account listed on submitted form\n \"\"\"\n # Add old amount to previous account\n self.data_previous_account.balance += self.data_previous_amount\n self.data_previous_account.save()", "def final_step_customer(Xaction_type, Xcredit_type, Xcredit_file, Xusers_account):\n ####################################################\n if Xaction_type == \"deposit\" and Xcredit_type == \"savings\":\n #deposit the money into the account.\n amt_entered = amount_entered()\n Xusers_account.deposit_savings(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_sav_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"deposit\" and Xcredit_type == \"current\":\n #deposit the money into the account.\n amt_entered = amount_entered()\n Xusers_account.deposit_current(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_cur_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"withdraw\" and Xcredit_type == \"savings\":\n amt_entered = amount_entered()\n #check if funds is sufficient\n if amt_entered > Xusers_account.get_sav_bal():\n print(\"Insufficient funds.\")\n else: #withdraw the money from the account.\n Xusers_account.withdraw_savings(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_sav_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"withdraw\" and Xcredit_type == \"current\":\n amt_entered = amount_entered()\n #check if funds is sufficient\n if amt_entered > Xusers_account.get_cur_bal():\n print(\"Insufficient funds.\")\n else: #withdraw the money from the account.\n Xusers_account.withdraw_current(amt_entered)\n #add record to the file.\n add_this_line = \"\" + date.today().strftime(\"%m-%d-%Y\") +\"\\\\t\" + Xaction_type + \"\\\\t\" + str(amt_entered) + \"\\\\t\" + str(Xusers_account.get_cur_bal())\n #append line to file\n append_new_line(Xcredit_file, add_this_line)\n\n if Xaction_type == \"balance\" and Xcredit_type == \"savings\":\n print(\"savings total is #\" + f'{users_account.get_sav_bal():,}')\n\n if Xaction_type == \"balance\" and Xcredit_type == \"current\":\n print(\"current total is #\" + f'{users_account.get_cur_bal():,}')\n\n if Xaction_type == \"history\" and Xcredit_type == \"savings\":\n #print necessary information from the file\n print_history(Xcredit_file)\n\n if Xaction_type == \"history\" and Xcredit_type == \"current\":\n #print necessary information from the file\n print_history(Xcredit_file)", "def withdraw(self, amount):\n self.balance -= amount", "def test_payment(self):\n debit_jobs([(self.job, A(480), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(480),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def make_payment(self, cost):\n self.process_coins()\n if self.money_received >= cost:\n change = round(self.money_received - cost, 2)\n print(f\"Here is {self.CURRENCY}{change} in change.\")\n self.profit += cost\n self.money_received = 0\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n self.money_received = 0\n return False", "def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))", "def update_balance(self):\n if self.calculated_balance < 0:\n raise AccountBalanceError('calculated_balance on account {} is below 0'.format(self))\n self.balance = self.calculated_balance\n self.save()", "def pay_off_fully(balance, annualInterestRate):\n\n #variable assignment\n currentBalance = balance\n monthlyInterestRate = annualInterestRate/12", "def Credit(self):\n self.Deposit()\n self.balance += self.amount\n print \"balance credited\"\n print \" Total balance =\",self.balance\n return self.balance", "def calculate(self):\r\n if self.__calculation_type == self.__DIFFERENTIATED_PAY:\r\n for month in range(1, self.__principal_term+1):\r\n self.__differentiated_pay.append(\r\n ceil(\r\n (self.__credit_principal/self.__principal_term)\r\n + self.__credit_interest*(self.__credit_principal\r\n - (self.__credit_principal\r\n * (month-1))\r\n / self.__principal_term)\r\n )\r\n )\r\n self.__overpayment = sum(self.__differentiated_pay) - self.__credit_principal\r\n\r\n for i, dp in enumerate(self.__differentiated_pay, 1):\r\n print(f'Month {i}: paid out {dp}')\r\n print()\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n elif self.__calculation_type == self.__ANNUITY:\r\n if self.__user_choice == self.__SEEK_ANNUITY_MONTHLY:\r\n self.__annuity_monthly = ceil(\r\n self.__credit_principal * ((self.__credit_interest\r\n * pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = (self.__annuity_monthly * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n print(f'Your annuity payment = {self.__annuity_monthly}!')\r\n\r\n elif self.__user_choice == self.__SEEK_TERM:\r\n self.__principal_term = ceil(\r\n log(self.__annuity_monthly / (self.__annuity_monthly\r\n - (self.__credit_interest\r\n * self.__credit_principal))\r\n , 1+self.__credit_interest)\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n years = self.__principal_term // 12\r\n months = self.__principal_term % 12\r\n\r\n print(f'You need {years} year{\"s\" if self.__principal_term > 1 else \"\"}'\r\n f'{\" and \" + str(months) + \" months\" if months > 0 else \"\"}'\r\n f' to repay this credit!')\r\n\r\n elif self.__user_choice == self.__SEEK_CREDIT_PRINCIPAL:\r\n self.__credit_principal = ceil(\r\n self.__annuity_monthly\r\n / ((self.__credit_interest\r\n * pow(1+self.__credit_interest, self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest, self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal)\r\n\r\n print(f'Your credit principal = {self.__credit_principal}!')\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n else:\r\n print('Incorrect parameters')\r\n self.usage()", "def _balance_update(self):\n return_rate = self.df.loc[self.currentStep, \"return_Close\"]\n self.buy_amount += return_rate * self.buy_amount\n self.sell_amount -= return_rate * self.sell_amount", "def pay(self, amount):\n if amount > self.balance:\n print(f\"Not enough balance! Only ${self.balance} left.\")\n return False\n self.balance -= amount\n return True", "def finalize(state, coinbase):\n delta = int(state.config['COLLATOR_REWARD'])\n state.delta_balance(coinbase, delta)", "def test_adjusted_payment_still_below_invoice(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(580), # debited (600) + adjustment (-20) = invoiced (580)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "async def balance(self, ctx: commands.Context, user: discord.Member = None):\r\n if user is None:\r\n user = ctx.author\r\n\r\n bal = await bank.get_balance(user)\r\n currency = await bank.get_currency_name(ctx.guild)\r\n max_bal = await bank.get_max_balance(ctx.guild)\r\n if bal > max_bal:\r\n bal = max_bal\r\n await bank.set_balance(user, bal)\r\n await ctx.send(\r\n _(\"{user}'s balance is {num} {currency}\").format(\r\n user=user.display_name, num=humanize_number(bal), currency=currency\r\n )\r\n )" ]
[ "0.66964227", "0.6128591", "0.61197674", "0.5983254", "0.5962717", "0.59586847", "0.5945208", "0.59426755", "0.59344083", "0.59324026", "0.5862457", "0.58423245", "0.5832974", "0.58243793", "0.58243793", "0.58215094", "0.582141", "0.5816743", "0.58033234", "0.5802297", "0.5776027", "0.5766413", "0.57551765", "0.5743205", "0.57422805", "0.5722566", "0.5703514", "0.56864107", "0.5672723", "0.56633234" ]
0.6793116
0
Generate random bytes to use as csrf secret
def gen_csrf_secret(): return Random.new().read(csrf_secret_len)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_password():\n return urlsafe_b64encode(urandom(32)).decode('utf-8')", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def _generateSecretKey():\n return ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(20))", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def generate_csrf_token() -> int:\r\n ...", "def generate_key(self)->bytes:\n return os.urandom(32)", "def gen_secret() -> str:\n r = random.randrange(0, 255) # INSECURE, just for demo\n r = hex(r)[2:]\n if len(r) == 1:\n return f'0{r}'\n return r", "def _create_shared_secret():\n\n randint = random.SystemRandom().randint\n bits = load_config(\"instavpn.json\")[\"shared_secret_bits\"]\n return urlsafe_b64encode(\"\".join(chr(randint(0, 255)) for _ in xrange(bits/8)))", "def gen_sig():\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def get_random_secret_key():\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n return get_random_string(50, chars)", "def _get_encode_random(self):\n return os.urandom(16).encode('hex')", "def random_bytes(self, length: int) -> bytes:\n return token_bytes(length)", "def create_temporary_secret():\n return uuid.uuid4().hex", "def generate_token():\n return uuid4()", "def get_request_authentication():\n return os.urandom(16)", "def generate_nonce():\n return uuid4().hex", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def generate_secret_key():\n return b64encode(Fernet.generate_key()).decode('utf-8')", "def gensalt():\n return hexlify(os.urandom(24)).decode()", "def generate_key():\n return get_random_bytes(KEY_SIZE)", "def generate_key():\n\tkey = [ randint(0,255) for i in range(16) ]\n\treturn bytes( key )", "def create_challenge():\n\treturn os.urandom(12)", "def create_csrf_token(salt=''):\n\tif not salt:\n\t\tsalt = Random.new().read(csrf_salt_len).encode('hex')\n\th = SHA256.new()\n\th.update(get_csrf_secret() + salt)\n\treturn h.hexdigest() + salt", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def _oauth_nonce_generate(self):\n\t\traw_data = random.getrandbits(32 * 8)\n\t\traw_str = ''\n\t\tfor i in range(32):\n\t\t\tnew_part = raw_data % 256\n\t\t\traw_data /= 256\n\t\t\traw_str += chr(new_part)\n\t\n\t\tencoded = base64.b64encode(raw_str) \n\t\treturn encoded.rstrip('=').replace('+', 'A').replace('/', 'B')", "def _generate_token_value():\n return secrets.token_urlsafe()", "def create_secret_code():\n characters = string.ascii_uppercase + string.digits\n size = 6\n return ''.join(random.choice(characters) for _ in range(size))", "def make_token():\n return secrets.token_urlsafe(36)" ]
[ "0.751621", "0.7423662", "0.7333086", "0.7329716", "0.7236352", "0.7232258", "0.71953297", "0.71489805", "0.7082189", "0.70771956", "0.7053713", "0.70158905", "0.70001644", "0.6955148", "0.69448227", "0.6937748", "0.69271857", "0.692591", "0.69196445", "0.6904558", "0.6870434", "0.68677014", "0.68434286", "0.6815161", "0.68140215", "0.6792783", "0.67678404", "0.67356676", "0.67184037", "0.67014724" ]
0.87860125
0
Read csrf secret from session if it exists; otherwise generate it and store in session
def get_csrf_secret(): sess = managers.request_manager.get_request().session() secret = sess.get(csrf_secret_sess_var_name, None) if not secret: secret = gen_csrf_secret() sess[csrf_secret_sess_var_name] = secret return secret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "def getcsrf(session):\n session.get(\"http://anichart.net\")", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def gen_csrfkey(force, randomness):\n\n def gen_randomkey(length):\n \"\"\"Generate random key, given a number of characters\"\"\"\n chars = string.letters + string.digits + string.punctuation\n return ''.join([choice(chars) for _ in xrange(int(str(length)))])\n\n csrf_key = gen_randomkey(randomness)\n session_key = gen_randomkey(randomness)\n\n file_name = '%s/secret_keys.py' % app4name\n file_template = Template('''# CSRF and Session keys\n\nCSRF_SECRET_KEY = '$csrf_key'\nSESSION_KEY = '$session_key'\n''')\n\n output = file_template.safe_substitute(dict(\n csrf_key=csrf_key, session_key=session_key\n ))\n\n if (os.path.exists(file_name)) and (force is False):\n print \"Warning: secret_keys.py file exists. Use '-f' flag to force overwrite.\"\n else:\n f = open(file_name, 'wb')\n f.write(output)\n f.close()", "def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token", "def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token", "def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper", "def get_session_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.session_secret", "def _GetXsrfKey():\n client = memcache.Client()\n xsrf_key = client.get('xsrf_key')\n if not xsrf_key:\n config = models.GetApplicationConfiguration()\n xsrf_key = config.xsrf_key\n client.set('xsrf_key', xsrf_key)\n return xsrf_key", "def generate_csrf_token() -> int:\r\n ...", "def on_GET_request_setup_csrf_cookie(ev) -> None:\n request = ev.request\n if request.method != \"GET\":\n # Skip if not GET. If could detect static requests, would skip too\n return\n token = request.session.get_csrf_token()\n # print(request.session.session_id, token)\n if request.cookies.get(\"XSRF-TOKEN\") != token:\n # Set the Secure flag on the cookie only when serving on https.\n secure: bool = request.registry.settings.get(\n \"scheme_domain_port\", \"\"\n ).startswith(\"https\")\n ev.response.set_cookie(\n COOKIE_NAME,\n token,\n overwrite=True,\n secure=secure,\n httponly=False, # The client reads the cookie to send header\n samesite=\"strict\",\n )", "def get_xsrf_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.xsrf_secret", "def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)", "def extract_csrf(self, url):\r\n\r\n with requests.Session() as client:\r\n client.get(url) \r\n csrf = client.cookies['csrftoken']\r\n return csrf", "def check_csrf(f):\n\n @wraps(f)\n @require_login\n def wrapper(*args, **kwds):\n if \"token\" not in session:\n raise PicoException(\n \"Internal server error\",\n data={\"debug\": \"CSRF token not found in session\"},\n )\n submitted_token = request.headers.get(\"X-CSRF-Token\", None)\n if submitted_token is None:\n raise PicoException(\"CSRF token not included in request\", 403)\n if session[\"token\"] != submitted_token:\n raise PicoException(\"CSRF token is not correct\", 403)\n return f(*args, **kwds)\n\n return wrapper", "def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']", "def init_base_cookie(self):\n url = 'https://ceq.nkust.edu.tw/'\n try:\n res = self.main_session.get(url=url)\n if res.status_code == 200:\n soup = BeautifulSoup(res.text, 'html.parser')\n\n self.csrf_key = soup.find(\n 'input', {'name': '__RequestVerificationToken'}).get('value')\n if self.csrf_key != \"\":\n return True\n except:\n return False\n return False", "def add_csrf_validation(event):\n if event.request.method == 'POST':\n token = event.request.POST.get('_csrf')\n if token is None or token != event.request.session.get_csrf_token():\n headers = forget(event.request) # force a log out\n raise HTTPForbidden('CSRF token is missing or invalid',\n headers=headers)", "def _fetch_csrf(self) -> str:\n login_page = self._session.get(\"https://www.redpocket.com/login\")\n csrf_element = re.search(\n r'<input type=\"hidden\" name=\"csrf\" value=\"([\\w|-]+)\">', login_page.text\n )\n\n if csrf_element:\n csrf = csrf_element.group(1)\n self._logger.debug(\"Using CSRF: %s\", csrf)\n return csrf\n\n raise RedPocketException(\"Failed to get CSRF token from login page!\")", "def get_csrf_token(url,cookie):\r\n\r\n session = requests.Session()\r\n headers = {\"Origin\":url,\r\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\r\n \"Upgrade-Insecure-Requests\":\"1\",\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0\",\r\n \"Connection\":\"close\",\r\n \"Referer\":url + \"/admin/\",\r\n \"Accept-Language\":\"es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3\",\r\n \"Accept-Encoding\":\"gzip, deflate\"}\r\n cookies = {\"BLUDIT-KEY\":cookie}\r\n response = session.get(url + \"/admin/dashboard\",\r\n headers=headers,\r\n cookies=cookies\r\n )\r\n csrf_token = response.text.split('var tokenCSRF = \"')[1].split('\"')[0]\r\n\r\n print(\"csrf_token: \" + csrf_token)\r\n return csrf_token", "def _get_csrf(self):\n\n csrf_token_header_name = \"X-CsrfToken\"\n if csrf_token_header_name not in self.headers:\n home_head_response = requests.head(self.BASE_URL)\n self.cookies.update(home_head_response.cookies)\n csrf_token = self.cookies[\"csrftoken\"]\n csrf_header = {csrf_token_header_name: csrf_token}\n self.headers.update(csrf_header)", "def generate_csrf_token(app_key, app_secret, user_key, user_secret):\n # We authenticate the user using the keys\n auth = OAuth1(app_key, app_secret, user_key, user_secret)\n\n # Get token\n token_request = requests.get('https://commons.wikimedia.org/w/api.php', params={\n 'action': 'query',\n 'meta': 'tokens',\n 'format': 'json',\n }, auth=auth)\n token_request.raise_for_status()\n\n # We get the CSRF token from the result to be used in editing\n CSRF_TOKEN = token_request.json()['query']['tokens']['csrftoken']\n return CSRF_TOKEN, auth", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def valid_session():\n def wrapper(f):\n @wraps(f)\n def decorated_view(*args, **kwargs):\n\n secret_key = current_app.config['SESSION_PASSWORD']\n session_token = request.cookies.get(SESSION_COOKIE_NAME, None)\n if not session_token or not session_token_valid(secret_key, session_token):\n # .. when there are views\n # -- url_for('authentication_views.user_login')\n login_url = request.host_url[:-1] + \"/login/\"\n assert request.url.startswith(request.host_url)\n next_hop_path = request.url[len(request.host_url):]\n next_hop = urlencode({'next': next_hop_path})\n return redirect(login_url + \"?\" + next_hop)\n\n return f(*args, **kwargs)\n return decorated_view\n return wrapper", "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def get_initial_token():\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(EDX_HOMEPAGE)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''", "def _server_cookie_secret() -> str:\n return secrets.token_hex()", "def _shib_get_token(self): # pragma: no cover\n\n shibCookie = None\n for cookie in self._session.cookies:\n if \"shibsession\" in cookie.name:\n shibCookie = cookie\n break\n\n if not shibCookie:\n warnings.warn(\"No session token found.\", AuthenticationWarning)\n\n return shibCookie", "def test_csrf_token_session_rotation(self):\n\n csrf_client = Client(enforce_csrf_checks=True)\n csrf_client.login(username='archen', password='mytestpassword')\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token1 = \"{0}\".format(response.context['csrf_token'])\n\n csrf_client.logout()\n csrf_client.login(username='archen', password='mytestpassword')\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token2 = \"{0}\".format(response.context['csrf_token'])\n\n self.assertNotEqual(token1, token2, msg='CSRF Token is not rotated per session')", "def rotate_token(request: http.Request):\n if hasattr(request, '_csrf_hook'):\n request._csrf_hook.rotate_token()" ]
[ "0.7415861", "0.74125195", "0.72343606", "0.69048315", "0.686525", "0.6681955", "0.65113086", "0.64043695", "0.6368639", "0.6332918", "0.6305956", "0.625523", "0.6235409", "0.6150814", "0.6095577", "0.60392517", "0.6010801", "0.59956753", "0.59555525", "0.59422135", "0.59069234", "0.5896854", "0.584515", "0.57109857", "0.56914985", "0.5688088", "0.5679138", "0.5660914", "0.56388474", "0.5632423" ]
0.7971118
0
Generate csrf token based on existing/new csrf secret and provided/new salt
def create_csrf_token(salt=''): if not salt: salt = Random.new().read(csrf_salt_len).encode('hex') h = SHA256.new() h.update(get_csrf_secret() + salt) return h.hexdigest() + salt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "def generate_csrf_token() -> int:\r\n ...", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']", "def get_csrf_secret():\n\tsess = managers.request_manager.get_request().session()\n\tsecret = sess.get(csrf_secret_sess_var_name, None)\n\tif not secret:\n\t\tsecret = gen_csrf_secret()\n\t\tsess[csrf_secret_sess_var_name] = secret\n\treturn secret", "def get_csrf_token(self):\n h = hashlib.new('sha256')\n h.update(self.__current_authentication_token())\n return h.hexdigest()", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()", "def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()", "def make_token():\n return secrets.token_urlsafe(36)", "def _generate_token_value():\n return secrets.token_urlsafe()", "def gen_csrfkey(force, randomness):\n\n def gen_randomkey(length):\n \"\"\"Generate random key, given a number of characters\"\"\"\n chars = string.letters + string.digits + string.punctuation\n return ''.join([choice(chars) for _ in xrange(int(str(length)))])\n\n csrf_key = gen_randomkey(randomness)\n session_key = gen_randomkey(randomness)\n\n file_name = '%s/secret_keys.py' % app4name\n file_template = Template('''# CSRF and Session keys\n\nCSRF_SECRET_KEY = '$csrf_key'\nSESSION_KEY = '$session_key'\n''')\n\n output = file_template.safe_substitute(dict(\n csrf_key=csrf_key, session_key=session_key\n ))\n\n if (os.path.exists(file_name)) and (force is False):\n print \"Warning: secret_keys.py file exists. Use '-f' flag to force overwrite.\"\n else:\n f = open(file_name, 'wb')\n f.write(output)\n f.close()", "def generate_csrf_token(app_key, app_secret, user_key, user_secret):\n # We authenticate the user using the keys\n auth = OAuth1(app_key, app_secret, user_key, user_secret)\n\n # Get token\n token_request = requests.get('https://commons.wikimedia.org/w/api.php', params={\n 'action': 'query',\n 'meta': 'tokens',\n 'format': 'json',\n }, auth=auth)\n token_request.raise_for_status()\n\n # We get the CSRF token from the result to be used in editing\n CSRF_TOKEN = token_request.json()['query']['tokens']['csrftoken']\n return CSRF_TOKEN, auth", "def generate_state_token():\n chars = (ascii_letters + digits)\n rand = SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(len(chars)))\n return hmac.new(\n config.SECRET_KEY.encode('utf-8'),\n random_string.encode('utf-8'),\n hashlib.sha256\n ).hexdigest()", "def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))", "def generate_token(ip: Text):\n return pbkdf2_sha256.encrypt(salt + ip)", "def generate_token(ip: Text):\n return pbkdf2_sha256.encrypt(salt + ip)", "def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token", "def test_gen_and_verify_good_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n self.assertTrue(tool.verify_token(token, 12345, 'test_action'))", "def get_xsrf_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.xsrf_secret", "def get_xsrf_token(self, offset=0):\n if not self.xsrf_secret:\n self.xsrf_secret = os.urandom(8)\n self.put()\n m = md5.new(self.xsrf_secret)\n email_str = self.lower_email\n if isinstance(email_str, unicode):\n email_str = email_str.encode('utf-8')\n m.update(self.lower_email)\n when = int(time.time()) // 3600 + offset\n m.update(str(when))\n return m.hexdigest()", "def long_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def generate_token(payload: Any, secret: str | List[str]) -> str:\n return url_encode_full_stops(URLSafeTimedSerializer(secret).dumps(payload, \"token\"))", "def long_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def short_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()[::2]", "def short_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()[::2]", "def new_token(*args, **kwargs):\n return uuid.uuid4().hex", "def gen_site_secret(self, request, site_id=None, salt='', **kw):\n if site_id is None:\n site_id = self.gen_site_id(request)\n if site_id is None:\n return ''\n \"\"\" Generate site + uid specific secret \"\"\"\n secret_base = site_id + salt\n return sha1(secret_base).hexdigest()", "def get_salt():\n return os.urandom(32)", "def _server_cookie_secret() -> str:\n return secrets.token_hex()" ]
[ "0.768297", "0.7293141", "0.70243514", "0.67706597", "0.6689844", "0.6654554", "0.6647972", "0.66283864", "0.6550067", "0.6501651", "0.64808244", "0.6338237", "0.6312519", "0.6307513", "0.6307513", "0.6238732", "0.62366146", "0.6235569", "0.6225915", "0.6221798", "0.6175029", "0.6172401", "0.61631954", "0.60913724", "0.6085538", "0.60345185", "0.59842396", "0.59575176", "0.59305733", "0.59235656" ]
0.80260617
0
Verify csrf token against csrf secret from the session; if token is not provided it's read from request arguments
def verify_csrf_token(token=''): if not token: token = managers.request_manager.get_request().arguments().arguments().get(csrf_token_arg_name, "") if token: token = token[0] if len(token) != 2 * digest_size + 2 * csrf_salt_len: debug('Incorrect csrf token length') raise VDOM_csrf_exception() salt = token[2*digest_size:] if token != create_csrf_token(salt): debug('Incorrect csrf token value') raise VDOM_csrf_exception()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")", "def validate_csrf_token(event):\n request = event.request\n if request.is_xhr or request.method.upper() in ('POST', 'PUT', 'DELETE'):\n pyramid.session.check_csrf_token(request, token='XSRF_TOKEN',\n header='X-XSRF-TOKEN', raises=True)", "def check_csrf(f):\n\n @wraps(f)\n @require_login\n def wrapper(*args, **kwds):\n if \"token\" not in session:\n raise PicoException(\n \"Internal server error\",\n data={\"debug\": \"CSRF token not found in session\"},\n )\n submitted_token = request.headers.get(\"X-CSRF-Token\", None)\n if submitted_token is None:\n raise PicoException(\"CSRF token not included in request\", 403)\n if session[\"token\"] != submitted_token:\n raise PicoException(\"CSRF token is not correct\", 403)\n return f(*args, **kwds)\n\n return wrapper", "def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper", "def check_csrf(self):\n if (self.HTTP_X_CSRF_TOKEN in os.environ and\n self.is_csrf_token(os.environ[self.HTTP_X_CSRF_TOKEN])):\n pass\n else:\n common.render_error('Invalid CSRF token.')", "def add_csrf_validation(event):\n if event.request.method == 'POST':\n token = event.request.POST.get('_csrf')\n if token is None or token != event.request.session.get_csrf_token():\n headers = forget(event.request) # force a log out\n raise HTTPForbidden('CSRF token is missing or invalid',\n headers=headers)", "def getcsrf(session):\n session.get(\"http://anichart.net\")", "def _request_csrf_token(self, params):\n if params.get(\"action\") == \"query\":\n if params.get(\"meta\"):\n if \"tokens\" not in params[\"meta\"].split(\"|\"):\n params[\"meta\"] += \"|tokens\"\n else:\n params[\"meta\"] = \"tokens\"\n if params.get(\"type\"):\n if \"csrf\" not in params[\"type\"].split(\"|\"):\n params[\"type\"] += \"|csrf\"", "def trusted(req):\n # Get the CRSF token from the user session.\n session = req.environ.get('rex.session', {})\n session_csrf_token = session.get('_csrf_token')\n # Get the token value from the request.\n request_csrf_token = req.environ.get('HTTP_X_CSRF_TOKEN') or \\\n req.params.get('_csrf_token')\n # Check if the values coincide.\n if not session_csrf_token or not request_csrf_token:\n return False\n is_equal = True\n for ch1, ch2 in itertools.zip_longest(session_csrf_token,\n request_csrf_token):\n is_equal &= (ch1 == ch2)\n return is_equal", "def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def has_csrf_token(self,content,url,is_input=True):\n\t\tif content:\n\t\t\tprotected = False\n\t\t\tcontent = content.strip()\n\t\t\tfor token in self.tokens:\n\t\t\t\ttoken = token.lower().strip()\n\t\t\t\tif token in content:\n\t\t\t\t\tprotected = True\n\t\t\t\n\t\t\tif not protected:\n\t\t\t\tif is_input:\n\t\t\t\t\tvul = \"inputs at \"+url+ \" is missing csrf token\"\n\t\t\t\t\tif vul not in self.vuln_inputs:\n\t\t\t\t\t\tself.vuln_inputs.append(vul)\n\t\t\t\telse:\n\t\t\t\t\tvul = \"the url \"+url+\" parameters is missing csrf token\"\n\t\t\t\t\tif vul not in self.vuln_urls:\n\t\t\t\t\t\tself.vuln_urls.append(vul)", "def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token", "def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)", "def extract_csrf(self, url):\r\n\r\n with requests.Session() as client:\r\n client.get(url) \r\n csrf = client.cookies['csrftoken']\r\n return csrf", "def get_csrf_token_from_response(self, response):\n return re.search(CSRF_REGEX, response.body).group(1)", "def validate_against_csrf(event, Validator=CSRFValidator):\n \n request = event.request\n settings = request.registry.settings\n \n # Only validate if enabled.\n if not settings.get('csrf.validate', True):\n return\n \n # Ignore specified routes.\n matched_route = request.matched_route\n ignore_routes = settings.get('csrf.ignore_routes', None)\n if matched_route and ignore_routes:\n if matched_route.name in ignore_routes.split():\n return\n \n # Ignore specified paths.\n ignore_paths = settings.get('csrf.ignore_paths', None)\n if ignore_paths:\n for path in ignore_paths.split():\n if request.path.startswith(path):\n return\n \n session_token = request.session.get_csrf_token()\n try:\n Validator(session_token).validate(request)\n except CSRFError:\n raise HTTPUnauthorized", "def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']", "def test_csrf():\n\n # The authenticate method must not be altered for this test to be valid.\n assert (\n SessionAuthentication.authenticate\n is CsrfExemptSessionAuthentication.authenticate\n )\n\n # The `enforce_csrf` method should just pass with any request.\n assert CsrfExemptSessionAuthentication().enforce_csrf(\"foo\") is None", "def get_csrf_token(self):\n return get_csrf_token(self.REQUEST)", "def verify_token(self, token):\n return False", "def validate_token():\n global vault_token\n global vault_token_time\n\n if vault_token is None:\n return False\n\n return datetime.datetime.now() < vault_token_time", "def is_csrf_token(self, candidate_csrf_token):\n valid_token = bytearray(self.get_csrf_token())\n candidate = bytearray(candidate_csrf_token)\n return constant_time_equals(valid_token, candidate)", "async def validate_token(self, token):", "def on_GET_request_setup_csrf_cookie(ev) -> None:\n request = ev.request\n if request.method != \"GET\":\n # Skip if not GET. If could detect static requests, would skip too\n return\n token = request.session.get_csrf_token()\n # print(request.session.session_id, token)\n if request.cookies.get(\"XSRF-TOKEN\") != token:\n # Set the Secure flag on the cookie only when serving on https.\n secure: bool = request.registry.settings.get(\n \"scheme_domain_port\", \"\"\n ).startswith(\"https\")\n ev.response.set_cookie(\n COOKIE_NAME,\n token,\n overwrite=True,\n secure=secure,\n httponly=False, # The client reads the cookie to send header\n samesite=\"strict\",\n )", "def get_token(request: http.Request) -> str:\n if hasattr(request, '_csrf_hook'):\n return request._csrf_hook.get_token()", "def inbound(request):\n\n try:\n csrf_token = request.headers.cookie.get('csrf_token')\n csrf_token = '' if csrf_token is None else csrf_token.value\n csrf_token = _sanitize_token(csrf_token)\n # Use same token next time\n request.context['csrf_token'] = csrf_token\n except KeyError:\n csrf_token = None\n # Generate token and store it in the request, so it's\n # available to the view.\n request.context['csrf_token'] = _get_new_csrf_key()\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n # Note that get_host() includes the port.\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n raise Response(403, reason)\n\n if csrf_token is None:\n # No CSRF cookie. For POST requests, we insist on a CSRF cookie,\n # and in this way we can avoid all CSRF attacks, including login\n # CSRF.\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)", "def get_csrf_secret():\n\tsess = managers.request_manager.get_request().session()\n\tsecret = sess.get(csrf_secret_sess_var_name, None)\n\tif not secret:\n\t\tsecret = gen_csrf_secret()\n\t\tsess[csrf_secret_sess_var_name] = secret\n\treturn secret", "def validate_token(self, payload, headers, request):\n token = headers.get(self.TOKEN_NAME, \"\")\n\n # no token\n if self.verify == VerificationMethod.NONE:\n # do nothing as no method was chosen\n pass\n\n # static token\n elif self.verify == VerificationMethod.TOKEN:\n if not compare_digest(token, self.token):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n # hmac token\n elif self.verify == VerificationMethod.HMAC:\n digest = hmac.new(self.secret.encode('utf-8'), request.body, hashlib.sha256).digest()\n computed_hmac = base64.b64encode(digest)\n if not hmac.compare_digest(computed_hmac, token.encode('utf-8')):\n raise PermissionDenied(self.MESSAGE_TOKEN_ERROR)\n\n return True", "def csrf_token(context):\r\n csrf_token = context.get('csrf_token', '')\r\n if csrf_token == 'NOTPROVIDED':\r\n return ''\r\n return (u'<div style=\"display:none\"><input type=\"hidden\"'\r\n ' name=\"csrfmiddlewaretoken\" value=\"%s\" /></div>' % (csrf_token))" ]
[ "0.75995755", "0.73856825", "0.73174137", "0.71832883", "0.7028827", "0.6969976", "0.69217205", "0.68938845", "0.68345594", "0.68069303", "0.66359556", "0.66284615", "0.6612116", "0.644829", "0.63917625", "0.6361629", "0.6360583", "0.63481784", "0.6308884", "0.6281791", "0.6273652", "0.6272894", "0.62668514", "0.6259172", "0.6225697", "0.6222117", "0.6212538", "0.6201275", "0.619299", "0.6192089" ]
0.79013884
0
list starter arguments that must be applied conditionally based on version
def get_version_specific_arguments(self, version: str): result = [] semversion = semver.VersionInfo.parse(version) # Extended database names were introduced in 3.9.0 if self.supports_extended_names: result += ["--args.all.database.extended-names-databases=true"] # Telemetry was introduced in 3.11.0 if (semversion.major == 3 and semversion.minor >= 11) or (semversion.major > 3): result += ["--all.server.telemetrics-api=false"] # Column cache if ( self.cfg.enterprise and semver.compare(version, "3.9.5") >= 0 and semver.compare(version, "3.10.0") != 0 and semver.compare(version, "3.10.1") != 0 ): result += ["--args.all.arangosearch.columns-cache-limit=10000"] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_prelim_opts_args(application):\n opts, args = application.parse_preliminary_options(\n ['--foo', '--verbose', 'src', 'setup.py', '--statistics', '--version'])\n\n assert opts.verbose\n assert args == ['--foo', 'src', 'setup.py', '--statistics', '--version']", "def full_args():\n return setup_args()", "def build_args(self, project_update, private_data_dir, passwords):\n args = []\n if getattr(settings, 'PROJECT_UPDATE_VVV', False):\n args.append('-vvv')\n if project_update.job_tags:\n args.extend(['-t', project_update.job_tags])\n return args", "def _get_installation_args(self, install_optional, production_only, force, frozen_lockfile):\n raise NotImplementedError", "def list_opts():\n return [('ironic_lib', utils_opts)]", "def test_pre_cli_list_version(run):\n out, err = run(dork.cli.the_predork_cli, [], *(\"\", \"-l\", \"-v\"))\n assert \"test.yml\" in out and dork.__version__ in out, \\\n \"Failed run the dork.cli.the_predork_cli method: {err}\"\\\n .format(err=err)", "def getargs(ver='%prog 0.0'):\n parser = argparse.ArgumentParser(\n description=open(__file__).read().split(\"'''\")[1],\n formatter_class=argparse.RawDescriptionHelpFormatter) \n #@todo: OptionParser is depreciated in Python 3.2. \n #Need to move to the new style of parser. \n parser.add_argument(\"--fasta_a_name\", \n required=True,\n help = \"REQUIRED: The name of the fasta file a.\")\n parser.add_argument(\"--fasta_b_name\", \n required=True,\n help = \"REQUIRED: The name of the fasta file a.\")\n parser.add_argument(\"--set_operation\", \n default=\"U\",\n help = \"The operation you want to do. \")\n \n args = parser.parse_args()\n \n fasta_a_name = args.fasta_a_name\n fasta_b_name = args.fasta_b_name\n set_operation = args.set_operation\n \n return fasta_a_name,fasta_b_name,set_operation", "def _test_argv(self, verbose, extra_argv):\r\n #self.package_path = os.path.abspath(self.package_path)\r\n argv = [__file__, self.package_path]\r\n argv += ['--verbosity', str(verbose)]\r\n if extra_argv:\r\n argv += extra_argv\r\n return argv", "def GetMissingArguments(self):\n return []", "def get_additional_args(self):\n additional = \"\"\n if not self.workflow.cleanup_scripts:\n additional += \" --skip-script-cleanup \"\n if self.workflow.shadow_prefix:\n additional += \" --shadow-prefix {} \".format(self.workflow.shadow_prefix)\n if self.workflow.use_conda:\n additional += \" --use-conda \"\n if self.workflow.conda_prefix:\n additional += \" --conda-prefix {} \".format(self.workflow.conda_prefix)\n if self.workflow.use_singularity:\n additional += \" --use-singularity \"\n if self.workflow.singularity_prefix:\n additional += \" --singularity-prefix {} \".format(\n self.workflow.singularity_prefix\n )\n if self.workflow.singularity_args:\n additional += ' --singularity-args \"{}\"'.format(\n self.workflow.singularity_args\n )\n\n if self.workflow.use_env_modules:\n additional += \" --use-envmodules\"\n\n return additional", "def add_arguments(cls):\n return [\n (('--yes',), dict(action='store_true', help='clean .git repo')),\n (('--variable', '-s'),\n dict(nargs='+', help='set extra variable,format is name:value')),\n (('--skip-builtin',),\n dict(action='store_true', help='skip replace builtin variable')),\n\n (('--dir',), dict(nargs='?', default=os.getcwd(),\n help='set working directory')),\n (('--debug',), dict(action='store_true', help='open debug mode')),\n (('--dry-run',), dict(action='store_true',\n help='print command instead execute it')),\n (('--verbose', '-v'), dict(action='count')),\n ]", "def setup_cl_args(cls, parser):\n\n parser.add_argument(\n \"spec\", \n nargs=\"?\", \n default=\"\",\n help=\"Print info for this ptask spec. First checks relative to \" + \\\n \"the currently set ptask. If no match is found, checks \" + \\\n \"relative to the project root.\",\n )\n\n parser.add_argument(\n \"-v\", \"--versions\",\n dest=\"versions\",\n nargs=\"*\",\n default=[],\n help=\"Show subscriptions for the supplied verisons. Default \" + \\\n \"is current. A list of integers can be supplied for \" + \\\n \"specific versions, or 'all' for all versions.\"\n )", "def extra_args(self):\n return []", "def _get_arguments(self, rargs):\r\n\r\n args = []\r\n i = 0\r\n count = len(rargs)\r\n while i < count and not self._is_opt(rargs[i]):\r\n args.append(rargs[i])\r\n i += 1\r\n\r\n return args", "def arg_list():\n arg_list = [\n ['-d', '--domain', 'Specify the domain you are using'],\n ['-t', '--template-path', 'Specify template path'],\n ['-s', '--secrets-path', 'Specify template path'],\n ['-p', '--project', 'Specify a project name'],\n ['-c', '--cloud-platform', 'Specify the platform used'],\n ['-so', '--secrets-only', 'Generate secrets only'],\n ['-db', '--database-host', 'Specify the database host'],\n ['-dbc', '--database-connection-name', 'Specify the database connection name (GCP)'],\n ['-sbn', '--storage-bucket-name', 'Specify storage bucket name'],\n ['-sb', '--storage-backend', 'Specify storage backend s3/gcp/filesystem'],\n ['--acm', '--aws-cert-arn', 'Specify AWS ACM'],\n ['--sg-id', '--aws-alg-sg-id', 'Specify AWS SG ID'],\n ['--sentry', '--senty-dsn', 'Specify Sentry DSN'],\n ['-e', '--environment', 'Specify environment'],\n ['-g', '--gather', 'enable Gather yes or no'],\n ['--cm', '--cert-manager', 'Using cert manager?'],\n ['-m', '--modules', 'Aether modules i.e odk,ui,sync'],\n ['-r', '--redis-url', 'Redis endpoint for CouchDB sync'],\n ['-cdb', '--couchdb-url', 'Redis endpoint for CouchDB sync'],\n ['-gc', '--google-client-id', ' Google client ID for CouchDB sync']\n ]\n return arg_list", "def get_cli_arguments(self):\n pass", "def getPositionalArgs():", "def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args", "def _get_add_package_args(self, package, type_option, version_option):\n raise NotImplementedError()", "def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))", "def add_arguments(parser):\n parser.add_argument('-e', '--environment', help='Environment name', required=True)\n parser.add_argument('-w', '--dont-wait', help='Skip waiting for the init to finish', action='store_true')\n parser.add_argument('-l', '--version-label', help='Version label', required=False)", "def common_args(revision=None, branch=None, ssh_username=None, ssh_key=None):\n args = []\n if ssh_username or ssh_key:\n opt = ['-e', 'ssh']\n if ssh_username:\n opt[1] += ' -l %s' % ssh_username\n if ssh_key:\n opt[1] += ' -i %s' % ssh_key\n args.extend(opt)\n if revision:\n args.extend(['-r', revision])\n elif branch:\n if hg_ver() >= (1, 6, 0):\n args.extend(['-b', branch])\n return args", "def required_options():\n return [\n 'projects',\n 'old_milestone_names',\n 'new_milestone_name',\n 'statuses',\n 'bugs_importance',\n 'maximum'\n ]", "def add_version_args(repo_root, build_num, args):\n try:\n semver = semantic_version.Version(args['ZAZU_BUILD_VERSION'])\n except KeyError:\n semver = make_semver(repo_root, build_num)\n args['ZAZU_BUILD_VERSION'] = str(semver)\n args[\"ZAZU_BUILD_NUMBER\"] = str(build_num)\n args['ZAZU_BUILD_VERSION_PEP440'] = pep440_from_semver(semver)", "def optargs(args):\n parser = OptionParser()\n parser.add_option(\"-a\", \"--abandon\", dest=\"abandon_current\", default=False, action=\"store_true\",\n help=\"Abandon outstanding changes when updating to migration\")\n parser.add_option(\"-d\", \"--dry\", dest=\"dry_run\", default=False, action=\"store_true\",\n help=\"Just update the revision number, don't perform updates\")\n (options, args) = parser.parse_args(args)\n return (options, args)", "def get_extras_require() -> Dict[str, List[str]]:\n extras = {\n \"testing\": [\n \"pytest==6.1.2\",\n \"pytest-cov==2.10.1\",\n ],\n \"linting\": [\n \"pylint==2.6.0\",\n \"flake8==3.8.4\",\n \"black>=20.8b1\",\n \"darglint==1.5.5\",\n \"mypy==0.790\",\n # \"data-science-types>=0.2.20\", # pandas, numpy, matplotlib\n ],\n }\n extras[\"all\"] = [item for group in extras.values() for item in group]\n return extras", "def requirement_args(argv, want_paths=False, want_other=False):\n was_r = False\n for arg in argv:\n # Allow for requirements files named \"-r\", don't freak out if there's a\n # trailing \"-r\", etc.\n if was_r:\n if want_paths:\n yield arg\n was_r = False\n elif arg in ['-r', '--requirement']:\n was_r = True\n else:\n if want_other:\n yield arg", "def help_args():\n pass", "def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))", "def test_arg_version(run_nait) -> None: # type: ignore\n expected = nanaimo.version.__version__\n assert run_nait(['--version']).stdout.decode('utf-8').startswith(expected)" ]
[ "0.6652257", "0.6436489", "0.64220834", "0.624852", "0.601735", "0.59371823", "0.5928165", "0.5906079", "0.59029114", "0.5887085", "0.58200306", "0.58132803", "0.57993174", "0.5784199", "0.57690525", "0.5752837", "0.573312", "0.57010204", "0.56947035", "0.5688802", "0.5674345", "0.56628543", "0.5662229", "0.5662091", "0.5643051", "0.5636418", "0.5619789", "0.5614484", "0.5605948", "0.56015736" ]
0.74655694
0
get the list of dbservers managed by this starter
def get_dbservers(self): ret = [] for i in self.all_instances: if i.is_dbserver(): ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_servers(self):\n\t\treturn self.__servers", "def get_all_servers(self) -> List[Server]:\n pass", "def servers(self):\n return self._servers", "def get_databases(self):\n pass", "def databases(self):\n return self._databases", "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def discover_servers():\n servers = set()\n for p in glob.glob1(SPDK_SERVER_APP_DIR, \"*\"):\n m = SERVERS_PATTERN.match(p)\n if m:\n servers.add(m.group())\n return list(servers)", "def get_databases ():\n return _dbobjects[:]", "def list_servers(self, all_tenants=False):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + \\\n self.project_info[\"project_id\"] + \"/servers/detail\"\n if all_tenants:\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + self.project_info[\n \"project_id\"] + \"/servers/detail?all_tenants=1\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from server while listing servers.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"List servers Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Servers List :%s \" % output)\n return output[\"servers\"]", "def get_servers(self) -> dict:\n uri = f\"{self.uri}/servers\"\n\n response = self.request(uri=uri)\n return response.json()", "def list_databases(self):\n r = self.__get_response(settings.LST_DBS)\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetDnsServers', self.handle))", "def get_reachable_servers(self) -> List[Server]:\n pass", "def get_servers(self):\n url = '%s/servers/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['servers']\n else:\n LOG.error('Get servers failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def get_dns_servers(self):\n self.__not_implemented()", "def list_dbs(self):\n return self.get('_all_dbs').json()", "def mmo_mongos_servers(self, mmo_connection):\n mongos_servers = []\n c = mmo_connection[\"config\"].mongos.find({}, { \"_id\": 1 } )\n for doc in c:\n hostname, port = doc[\"_id\"].split(\":\")\n mongos_servers.append({ \"hostname\": hostname, \"port\": int(port) })\n return mongos_servers", "def list_servers():\n (code, message) = rest_api.list_servers(request)\n if (code == 200):\n return message\n else:\n abort(code)", "def list_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_data = self.get_databases()\n all_dbs = []\n for data in all_data:\n all_dbs.append(data[\"system:resource_name\"][\"@value\"])\n return all_dbs", "def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetDnsServers', self.handle))", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevNet_GetDnsServers', self.handle))", "def mmo_config_servers(self, mmo_connection):\n config_servers = []\n c = mmo_connection[\"admin\"].command(\"getCmdLineOpts\")[\"parsed\"][\"sharding\"][\"configDB\"]\n for item in c.split(\",\"):\n hostname, port = item.split(\":\")\n if \"/\" in hostname: # cfg Replset server\n hostname = hostname.partition(\"/\")[2]\n config_servers.append( { \"hostname\": hostname, \"port\": int(port) } )\n return config_servers", "def list_databases(self):\n end_point = '/'.join([self.host, 'api', 'databases', ''])\n resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})\n if resp.status_code != 200:\n raise ClientError('Encountered error getting list of databases: {}'.format(resp.json()))\n return resp.json()", "def run(self):\n self._list_servers()", "def get_servers():\n all_servers = []\n start = 0\n size = 100\n\n while True:\n params = {\n 'start': start,\n 'size': size,\n 'names': 1,\n 'cdata': 1\n }\n\n xml_content = _call(\n servers_base_url + 'get_server_list.php',\n parser='xml',\n params=params\n )\n\n servers = [Server.load(server_node) for server_node in xml_content.xpath('/result/server')]\n\n if not servers:\n break\n\n all_servers.extend(servers)\n\n if servers[-1].is_last:\n break\n\n start += size\n\n _set_servers_location(all_servers)\n _set_server_event(all_servers)\n\n all_servers.sort(\n key=lambda s: s.players.current,\n reverse=True\n )\n\n return all_servers", "def server_names(self):\n return self._server_names", "def list_server(self, feed_id=None):\n resources = self.list_resource(feed_id=feed_id, resource_type_id='WildFly Server')\n resources.extend(self.list_resource(\n feed_id=feed_id,\n resource_type_id='Domain WildFly Server'))\n servers = []\n if resources:\n for resource in resources:\n resource_data = self.get_config_data(\n feed_id=resource.path.feed_id,\n resource_id=self._get_resource_id(resource.path.resource_id))\n server_data = resource_data.value\n servers.append(Server(resource.id, resource.name, resource.path, server_data))\n return servers", "def all_dbs(self):\n return self.cloudant_client.all_dbs()", "def get_schemas(self):\n result = self.sql(\"SHOW DATABASES\").execute()\n return [row[0] for row in result.fetch_all()]" ]
[ "0.7709002", "0.73207456", "0.7205934", "0.72003657", "0.70308435", "0.701856", "0.70004505", "0.69486785", "0.6863328", "0.6820925", "0.6803907", "0.6767398", "0.67585117", "0.67491573", "0.6748919", "0.6736712", "0.67257047", "0.6720556", "0.6672018", "0.6666036", "0.6558561", "0.6557067", "0.6550705", "0.6550474", "0.6539223", "0.65242845", "0.6523582", "0.6475993", "0.64628273", "0.64537567" ]
0.85010105
0
get the list of agents managed by this starter
def get_agents(self): ret = [] for i in self.all_instances: if i.instance_type == InstanceType.AGENT: ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manager_agents(self):\n return self.get(\"manager_agents\")", "def get_agents(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AgentList(self._results, runtime=self._runtime)", "def list(self):\n response = self._client.get('scanners/1/agents')\n return AgentList.from_json(response.text)", "def agents(self):\n return AgentManager(session=self._session)", "def test_list_agents(self):\n admin_resource_id = self.agent['id']\n with (self.override_role_and_validate_list(\n admin_resource_id=admin_resource_id)) as ctx:\n ctx.resources = self.agents_client.list_agents(\n id=admin_resource_id)[\"agents\"]", "def compute_agents(self):\n path = '/os-agents'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack compute agents: %s' % truncate(res))\n return res[0]['agents']", "def oc(self, stimulusID):\r\n global stimulusAPI\r\n try:\r\n pageList = stimulusAPI.getStimulusScope(stimulusID)\r\n agentSet = set([])\r\n for page in pageList:\r\n localAgentList = stimulusAPI.getAllAgentsWithViewOfSpecifiedPage(page)\r\n localAgentSet = set(localAgentList)\r\n agentSet.update(localAgentSet)\r\n agentList = list(agentSet)\r\n return agentList\r\n except Exceptions.InvalidStimulusProcessingType as e:\r\n raise e\r\n except Exceptions.ScriptError as e:\r\n raise e\r\n #self.execute(stimulusID)\r\n except Exception as e:\r\n raise Exceptions.ScriptError(e)", "def station_agents(self):\n return self.get(\"station_agents\")", "def list_agents(self, platform_uuid):\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')", "def customer_agents(self):\n return self.get(\"customer_agents\")", "def list_agents(self):\n\n agents = self.vip.rpc.call(CONTROL, \"list_agents\").get(timeout=5)\n versions = self.vip.rpc.call(CONTROL, \"agent_versions\").get(timeout=5)\n status_running = self.status_agents()\n uuid_to_status = {}\n # proc_info has a list of [startproc, endprox]\n for a in agents:\n pinfo = None\n is_running = False\n for uuid, name, proc_info in status_running:\n if a['uuid'] == uuid:\n is_running = proc_info[0] > 0 and proc_info[1] == None\n pinfo = proc_info\n break\n\n uuid_to_status[a['uuid']] = {\n 'is_running': is_running,\n 'version': versions[a['uuid']][1],\n 'process_id': None,\n 'error_code': None,\n 'permissions': {\n 'can_stop': is_running,\n 'can_start': not is_running,\n 'can_restart': True,\n 'can_remove': True\n }\n }\n\n if pinfo:\n uuid_to_status[a['uuid']]['process_id'] = proc_info[0]\n uuid_to_status[a['uuid']]['error_code'] = proc_info[1]\n\n if 'volttroncentral' in a['name'] or \\\n 'vcplatform' in a['name']:\n uuid_to_status[a['uuid']]['permissions']['can_stop'] = False\n uuid_to_status[a['uuid']]['permissions']['can_remove'] = False\n\n # The default agent is stopped health looks like this.\n uuid_to_status[a['uuid']]['health'] = {\n 'status': 'UNKNOWN',\n 'context': None,\n 'last_updated': None\n }\n\n if is_running:\n identity = self.vip.rpc.call(CONTROL, 'agent_vip_identity',\n a['uuid']).get(timeout=30)\n try:\n status = self.vip.rpc.call(identity,\n 'health.get_status').get(\n timeout=5)\n uuid_to_status[a['uuid']]['health'] = status\n except gevent.Timeout:\n _log.error(\"Couldn't get health from {} uuid: {}\".format(\n identity, a['uuid']\n ))\n except Unreachable:\n _log.error(\n \"Couldn't reach agent identity {} uuid: {}\".format(\n identity, a['uuid']\n ))\n for a in agents:\n if a['uuid'] in uuid_to_status.keys():\n _log.debug('UPDATING STATUS OF: {}'.format(a['uuid']))\n a.update(uuid_to_status[a['uuid']])\n return agents", "def transport_agents(self):\n return self.get(\"transport_agents\")", "def create_agents() -> List[InsuranceAgent]:\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(\n personal_info={\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n },\n call_acceptance_criteria=[\n {\n \"person_attribute\": AGE,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=120,\n ),\n },\n {\n \"person_attribute\": INCOME,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=1000000,\n ),\n },\n {\n \"person_attribute\": KIDS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": CARS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": INSURANCE_OPERATION,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": random.choice((RENT, BUY)),\n },\n ],\n )\n agents.append(insurance_agent)\n return agents", "def agent_arns(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"agent_arns\")", "def test_get_agents( self ):\n\n with self.app.app_context():\n url = '/donation/agents'\n\n # Ensure a GET with no saved agents returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), 0 )\n\n # Create some agents to retrieve.\n agent_models = []\n agent_jsons = get_agent_jsons()\n for agent_json in agent_jsons:\n agent_model = AgentSchema().load( agent_json ).data\n agent_models.append( agent_model )\n database.session.bulk_save_objects( agent_models )\n database.session.commit()\n\n # Ensure GET returns all agents.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( len( json.loads( response.data.decode( 'utf-8' ) ) ), len( agent_jsons ) )", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "def get_possible_absentees(self) -> List[QualifiedAgent]:\n wum: WorklistUpdateManagerApi = self._service_provider.get_service(WorklistUpdateManagerApi)\n return self._rem_iter_handler.consume(\n wum.get_possible_absentees(),\n \"agents\",\n PossAbsRemoteIteratorApi,\n PossAbsRemoteIteratorApi.poss_abs_get_next,\n )", "def _get_agents(self, instance_uuid, groups):\n _log.debug('_get_agents')\n connected_to_pa = self._platform_connections[instance_uuid]\n\n agents = connected_to_pa.agent.vip.rpc.call(\n 'platform.agent', 'list_agents').get(timeout=30)\n\n for a in agents:\n if 'admin' in groups:\n if \"platformagent\" in a['name'] or \\\n \"volttroncentral\" in a['name']:\n a['vc_can_start'] = False\n a['vc_can_stop'] = False\n a['vc_can_restart'] = True\n else:\n a['vc_can_start'] = True\n a['vc_can_stop'] = True\n a['vc_can_restart'] = True\n else:\n # Handle the permissions that are not admin.\n a['vc_can_start'] = False\n a['vc_can_stop'] = False\n a['vc_can_restart'] = False\n\n _log.debug('Agents returned: {}'.format(agents))\n return agents", "def agents_status(self):\n return self._get('agents/status')", "def get_all_l3_agents(self, plugin, context):\n with context.session.begin(subtransactions=True):\n query = context.session.query(agents_db.Agent)\n query = query.filter(\n agents_db.Agent.topic == 'l3_agent')\n query = (query.filter_by(admin_state_up=True))\n\n return [l3_agent\n for l3_agent in query\n if (agentschedulers_db.AgentSchedulerDbMixin.\n is_eligible_agent(True, l3_agent))]", "def _get_partner_agent(self):\n obj_partner = self.env['res.partner']\n args = [('parent_id', '=', False)]\n context = self._context or {}\n res = []\n\n if context.get('type') in ('out_invoice',):\n args.append(('wh_src_agent', '=', True))\n partner_ids = obj_partner.search(args)\n if partner_ids:\n partner_brw = obj_partner.browse(\n partner_ids)\n res = [item.id for item in partner_brw]\n return res", "def load_agents(self, agents):\n self.agents = agents", "def run(self, agent_args=None):\n agent_args = agent_args or {}\n self.neutron.list_agents(**agent_args)", "def test_list_l3_agents_on_router(self):\n with self.override_role():\n # NOTE: It is not empty list since it's a special case where\n # policy.enforce is called from the controller.\n self.ntp_client.list_l3_agents_hosting_router(self.router['id'])", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def display_agents(self):\n for agent in self.scheduler.agents:\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n r = to_geometry(agent.range_)\n print('define agent{} ellipse 4 4 white {} {}'.format(id_, x, y))\n print('define agentr{0} ellipse {1} {1} white {2} {3}'.format(\n id_, r, x, y))\n self.change_agent_status(agent)", "def getAllAgents(self):\n agent_dict ={}\n for member in self.membership.listMembers():\n if member.has_role('Agent'):\n agent_id = member.getUserName()\n agent_dict[agent_id]={}\n agent_dict[agent_id]['email'] = member.getProperty('email')\n agent_dict[agent_id]['areas'] = self.__wrapAreas(member.getProperty('areas'))\n agent_dict[agent_id]['fullname'] = member.getProperty('fullname')\n \n return agent_dict" ]
[ "0.77033234", "0.7416487", "0.7300729", "0.7252941", "0.72281355", "0.7117161", "0.70339304", "0.6690089", "0.666582", "0.66602963", "0.6639476", "0.6470405", "0.6329325", "0.62738615", "0.6230014", "0.61977404", "0.61977404", "0.61977404", "0.61977404", "0.6167947", "0.613414", "0.6121148", "0.61186093", "0.60759854", "0.60475534", "0.60275054", "0.6003428", "0.59731674", "0.59704596", "0.5945911" ]
0.76709455
1
get the first frontendhost of this starter
def get_frontend(self): servers = self.get_frontends() assert servers, "starter: don't have instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFrontend(self):\n return self.header['FRONTEND']", "def head_host(self) -> str:\n return self.head_args.host if self.head_args else None", "def get_host(self):\r\n return self.host", "def getHost():", "def getHost():", "def get_host(self):\n return self.host", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def master_host(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_host\")", "def nscaweb_host(self):\n return self.__get_option('nscaweb_host')", "def host():\n return platform.node()", "def get_homepage(resource):\n return resource.playlist.consumer_site.domain", "def get_host(request):\n return request.META[\"HTTP_HOST\"].split(\":\")[0]", "def fallback_host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"fallback_host\")", "def fallback_host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"fallback_host\")", "def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]", "def getHost(self):\n host = self.url[self.host_head:self.host_tail]\n return host", "def host(self) -> str:\n return self.first_pod_args.host", "def getHostHead(self):\n return self.host_head", "def getHost(self):\n\n\t\treturn HOST", "def home(environ, start_response):\n http_host, host_url = determine_host(environ)\n if http_host == host_url:\n http_host = 'frontpage.' + http_host\n return serve_space(environ, start_response, http_host)", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")", "def getHost(self): #$NON-NLS-1$\r", "def host(self) -> \"IStageHost\":\n return self._values.get(\"host\")", "def host(self) :\n\t\ttry :\n\t\t\treturn self._host\n\t\texcept Exception as e:\n\t\t\traise e", "def getHost(self):\n return self._host", "def master_host(self) -> str:\n raise NotImplementedError", "def getBackend(self):\n return self.header['BACKEND']" ]
[ "0.6992638", "0.6583565", "0.65169436", "0.6433285", "0.6433285", "0.6352371", "0.63019204", "0.63019204", "0.63019204", "0.62645936", "0.6246942", "0.62244636", "0.61690867", "0.6142307", "0.6142307", "0.61053056", "0.61049336", "0.6103162", "0.60861087", "0.60844123", "0.608103", "0.6038698", "0.6038698", "0.6038698", "0.60303277", "0.5974593", "0.596966", "0.5962287", "0.59497374", "0.5947828" ]
0.7496468
0
get the first dbserver of this starter
def get_dbserver(self): servers = self.get_dbservers() assert servers, "starter: don't have instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_stored_primary_server_name(db):\n if \"last_primary_server\" in db.collection_names():\n stored_primary_server = db.last_primary_server.find_one()[\"server\"]\n else:\n stored_primary_server = None\n\n return stored_primary_server", "def get_sync_master(self):\n servers = self.get_sync_masters()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def get_db(server_id):\n DATABASE = \"DATABASE\" + str(server_id)\n print(DATABASE)\n tracktop = _app_ctx_stack.top\n if not hasattr(tracktop, 'track_db0') and server_id == 0:\n tracktop.track_db0 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db0.row_factory = sqlite3.Row\n if not hasattr(tracktop, 'track_db1') and server_id == 1:\n tracktop.track_db1 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db1.row_factory = sqlite3.Row\n if not hasattr(tracktop, 'track_db2') and server_id == 2:\n tracktop.track_db2 = sqlite3.connect(app.config[DATABASE], detect_types=sqlite3.PARSE_DECLTYPES)\n tracktop.track_db2.row_factory = sqlite3.Row\n\n if server_id == 0:\n return tracktop.track_db0\n elif server_id == 1:\n return tracktop.track_db1\n else:\n return tracktop.track_db2", "def current_server():\n if not _current_server:\n create_server()\n return _current_server", "def get_server():\n pass", "def get_server(self):\n\n pass", "def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db", "def get_server(self):\n return self.__server", "def horizon_server(horizon_servers):\n return horizon_servers[0]", "def get_server(name):\n if name in SERVERZ:\n return SERVERZ[name]\n\n server = server_from_config(name)\n return start_server_thread(server)", "def get_server(index=-1):\n #returns a random server\n keys=__servers.keys()\n if index<0 or index>len(keys):key=random.choice(keys)\n else:key=keys[index]\n return (key,__servers[key])", "def find_server(message, db):\n db_list = sql.database_list()\n if db in db_list:\n server = db_list[db]\n message.reply(Strings['DATABASE_SERVER'].format(db, server))\n else:\n message.reply(Strings['DATABASE_UNKNOWN'].format(db))", "def get_primary_db(force_new=False):\n defaults = get_defaults()\n if 'primary' in defaults.keys():\n primary_host = defaults['primary']\n else:\n raise IndraDatabaseError(\"No primary host available in defaults file.\")\n\n global __PRIMARY_DB\n if __PRIMARY_DB is None or force_new:\n __PRIMARY_DB = DatabaseManager(primary_host, label='primary')\n __PRIMARY_DB.grab_session()\n return __PRIMARY_DB", "def get_db():\n top = flask._app_ctx_stack.top\n if not hasattr(top, 'shelve'):\n top.shelve = MODEL\n\n return top.shelve", "def get_server(self, server):\n return self._get(_server.Server, server)", "def get_db():\n client = MongoClient(\"mongodb://admin:therightfit@ds125555.\" +\n \"mlab.com:25555/the_right_fit\")\n db_object = client['the_right_fit']\n return db_object", "def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')", "def get_db_server_name(self):\n if self.db_config_file.key_exists(\"server_name\"):\n return self.db_config_file_value(\"server_name\").strip('\"')\n return self.get_system_id()", "def get_frontend(self):\n servers = self.get_frontends()\n assert servers, \"starter: don't have instances!\"\n return servers[0]", "def get_server(self, id):\n\t\treturn self.__servers.get_server(id)", "def server(self):\n return self.the_server", "def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()", "def get_single_db_name():\n expected_db_name = os.environ.get(\"MONGO_DB\")\n if not expected_db_name and is_testing():\n expected_db_name = f\"Test-{time.time_ns() // 1000000}\"\n\n return expected_db_name", "def get_dbs_obj(self):\n dbs_xml = self.get_DatabaseAndServer_XML()\n return self.get_DatabaseAndServer_obj(dbs_xml)", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]", "def get_db():\n if \"db\" not in g:\n host = current_app.config[\"HOST\"]\n dbname = current_app.config[\"DATABASE\"]\n #params = \"host='{}' dbname='{}' user=root\".format(host, dbname)\n params = \"dbname='{}' user=root\".format(dbname)\n g.db = psycopg2.connect(params)\n # 'g.db' corresponsds to a DB conn\n return g.db", "def db_host(self) -> Optional[str]:\n return pulumi.get(self, \"db_host\")", "def get_db():\n\n if not hasattr(g, 'mongo_db'):\n client = MongoClient(C.MONGODB_DATABASE_URI)\n g.db = client.test\n return g.db", "def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db" ]
[ "0.7175073", "0.68898433", "0.6632575", "0.6597116", "0.65586126", "0.6534296", "0.6407233", "0.63766783", "0.63378835", "0.6261345", "0.6257781", "0.61878526", "0.6167295", "0.6121482", "0.61021817", "0.61021364", "0.6087449", "0.6070265", "0.6051888", "0.599604", "0.59832543", "0.59824157", "0.5965016", "0.59570634", "0.5956557", "0.5927891", "0.5926108", "0.5922536", "0.59124905", "0.59088445" ]
0.8454486
0
get the first agent of this starter
def get_agent(self): servers = self.get_agents() assert servers, "starter: have no instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agent(self):\n return self.__agent", "def agent(self) -> Entity:\n return self.__agent", "def getfirstbot(self):\n\n return self.bots[0]", "def get_first(self):\n raise NotImplementedError(\"get_first: You should have implemented this method!\")", "def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"", "def get_effective_agent(self):\n raise Unimplemented()", "def _get_solver_agent(self):\n # Determine selectable agent(s)\n sctx = self.context.solver\n\n alist = sctx.agent\n if alist is None:\n # Return empty solver agent\n return CpoSolverAgent(self, sctx.params, sctx)\n elif not (is_string(alist) or is_array(alist)):\n raise CpoException(\"Agent identifier in config.context.solver.agent should be a string or a list of strings.\")\n\n # Create agent\n if is_string(alist):\n aname = alist\n agent = self._create_solver_agent(alist)\n else:\n # Search first available agent in the list\n agent = None\n aname = None\n errors = []\n for aname in alist:\n try:\n agent = self._create_solver_agent(aname)\n break\n except Exception as e:\n errors.append((aname, str(e)))\n # Agent not found\n errstr = ', '.join(a + \": \" + str(e) for (a, e) in errors)\n raise CpoException(\"Agent creation error: \" + errstr)\n\n # Log solver agent\n sctx.log(1, \"Solve model '\", self.model.get_name(), \"' with agent '\", aname, \"'\")\n agent.process_infos[CpoProcessInfos.SOLVER_AGENT] = aname\n return agent", "def choose(self):\n # pick agent A\n keys = list(self._agents.keys())\n keyA = random.choice(keys)\n agentA = self.model.schedule.agents[keyA]\n\n # pick pick agent B\n keyB = random.choice(agentA.neighbors)\n agentB = self.model.schedule.agents[keyB]\n\n return agentA, agentB", "def _deads_step_first(self) -> AgentID:\n _deads_order = [\n agent\n for agent in self.agents\n if (self.terminations[agent] or self.truncations[agent])\n ]\n if _deads_order:\n self._skip_agent_selection = self.agent_selection\n self.agent_selection = _deads_order[0]\n return self.agent_selection", "def agent_class(self):\r\n return self._agent_class", "def getFirstWorker(self):\n return self.entries[0]", "def get_first_incident_node(self):\n return self.first_incident_node # return the first incident node", "def whoGoesFirst(self):\n\t\treturn random.randint(1, 2)", "def getAgentID(self):\n\t\treturn self.agentID", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def start(self):\n return self._args[0]", "def retrieve(cls: Type[T], agent_id: int, datastore: Datastore) -> T:\n agent = cls.optionally_retrieve(agent_id, datastore)\n if agent is None:\n raise NotFound\n return agent", "def take_min(self):\n return self.get_first()", "def agent_init(self):\n pass", "def get_agent(self, agent_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"agents\", \"agent_id\", agent_id)", "def get_first_step(self):\n return self.get_step_by_index(0)", "def first(self) -> Task:\n return self._tasks[0]", "def agent_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_id\")", "def first(self):", "def get_worker_from_agent(agent: Agent):\n return agent.mephisto_agent.get_worker()", "def reserve_next_agent_id(self):\n query = \"SELECT NEXTVAL(pg_get_serial_sequence('agents', 'agent_id'))\"\n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n return cur.fetchone()[0]", "def getFirst(self, t):\n index = self._findFirst(t)\n if index >= 0:\n return self.jobs[index]\n else:\n return None", "def first(self, trace):\n return trace[0]", "def systems_manager_agent(self) -> Optional['outputs.ImageRecipeSystemsManagerAgent']:\n return pulumi.get(self, \"systems_manager_agent\")", "def __init__(self, agent):\n self.agent = agent" ]
[ "0.66546506", "0.6309816", "0.61794835", "0.5938089", "0.5912412", "0.5894221", "0.58619547", "0.58138776", "0.57892704", "0.57603174", "0.5742176", "0.57214665", "0.5696438", "0.568106", "0.5669796", "0.5664195", "0.5583705", "0.55532354", "0.55385774", "0.5535078", "0.5532041", "0.5520238", "0.5484903", "0.5462891", "0.5456885", "0.5450928", "0.5431759", "0.54299176", "0.54169655", "0.53836733" ]
0.76360667
0
get the first arangosync master of this starter
def get_sync_master(self): servers = self.get_sync_masters() assert servers, "starter: don't have instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def master(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master\")", "def master(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master\")", "def master(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"master\")", "def getMaster(self, base_path, filename='picloud.json'):\n\t\tmaster = None\n\t\tfor l in listdir(base_path) :\n\t\t\tpath = base_path + \"/\" + l\n\t\t\tmaster = self.checkIs('master', path, '', filename)\n\t\t\tif master != None :\n\t\t\t\treturn master\n\n\t\treturn None", "def FindMasterUsingChubby(ver):\n return core_utils.GetGSAMaster(ver, install_utilities.is_test(ver))", "def master_id(self):\r\n return self._arm.master_id", "def master(self):\n return self._master", "def master(self):\n\n return self._master", "def master(self):\n return self.remappers[self._master_name]", "def getMain(self):\n\n if self.__projects:\n return self.__projects[0]\n else:\n return None", "def GetActiveMaster(slavename=None, default=None):\n master_class_name = os.getenv('TESTING_MASTER')\n if master_class_name:\n return master_class_name\n\n master_class_name = os.getenv('INFRA_BUILDBOT_MASTER_CLASS_NAME')\n if master_class_name:\n return master_class_name\n\n slavename = slavename or GetActiveSlavename()\n for slave in GetAllSlaves():\n if slavename == EntryToSlaveName(slave):\n return slave['master']\n return default", "def master():\n env.branch = 'master'", "def master():\n env.branch = 'master'", "def current_master_version(self) -> str:\n return pulumi.get(self, \"current_master_version\")", "def stallone_master(machine: Machine) -> StalloneMaster:\n return StalloneMaster(machine, name=\"stallone\", add_to_default_env=True)", "def get_master(self):\n\n def watcher(watched_event):\n if watched_event.type and watched_event.path:\n msg = \"child changed, try to get master again.type %s, state %s, path %s.\" % (\n watched_event.type, watched_event.state, watched_event.path)\n logger.info(\"[ %s(%s) ] %s\" % (self.path, \"master\" if self.is_master else \"slave\", msg))\n self.workers = self.get_workers()\n logger.debug(\"watcher call get_master start\")\n self.get_master()\n logger.debug(\"watcher call get_master end\")\n\n try:\n children = self.zk.get_children(self.LEADERSHIP_PATH, watcher)\n except:\n logger.error(traceback.format_exc())\n return\n\n # self register\n infos = []\n for child in children:\n data, stat = self.zk.get(self.LEADERSHIP_PATH + \"/\" + child)\n infos.append(data)\n\n # make sure leadship and services exists\n if self.info not in infos or \\\n not self.zk.exists(self.SERVICE_PATH + \"/\" + self.info):\n logger.debug(\"get_master call register start\")\n self.register_leadership()\n self.register_service()\n logger.debug(\"get_master call register end\")\n\n children.sort()\n logger.debug(\"%s's children: %s\" % (self.LEADERSHIP_PATH, children))\n # check if I'm master\n self.master = children[:self.MASTER_NUM]\n if self.path in self.master:\n self.is_master = True\n logger.info(\"[ %s(%s) ] %s\" % (self.path, \"master\" if self.is_master else \"slave\", \"I am master!\"))\n # get slave status and assign undone task to them\n online_workers = self.get_workers()\n self.assign_task(online_workers)\n self.workers = online_workers", "def is_master(self):\n return self._is_master", "def organization_master_id(self) -> int:\n return pulumi.get(self, \"organization_master_id\")", "def master_mix(self):\n return composition_module.ReagentComposition(\n self._get_attr('master_mix_id'))", "def SyncClockMaster(self):\n if self.force_auto_sync:\n self.get('SyncClockMaster')\n return self._SyncClockMaster", "def get_host_master_id(self):\r\n return self._handler.get_host_master_id()", "def master_account(self):\n return self._master_account", "def get_master_url(self, identifier) -> None:\n # TODO(victorhc): Implement the following method to fetch the cluster\n # master_url from Dataproc.\n return '.'.join([\n self.cluster_metadata.project_id,\n self.cluster_metadata.region,\n self.cluster_metadata.cluster_name\n ])", "def is_master(self):\n return MPControl.is_master", "def choose_master(searchname):\n masters = get_masters()\n masternames = []\n master_lookup = {}\n for mn, path in masters:\n master = {}\n master['mastername'] = mn\n master_lookup[mn] = path\n masternames.append(master)\n\n candidates = [mn for mn in masternames if mn['mastername'] == searchname]\n\n errstring = 'string \\'%s\\' matches' % searchname\n master = only_get_one(candidates, 'mastername', errstring)\n if not master:\n return None\n\n return master_lookup[master]", "async def sync_master(self):\n if not [entity for entity in self._casatunes_entities() if entity.is_client]:\n await self.coordinator.data.zone_master(self.zone_master, False)\n await self.coordinator.async_refresh()\n _LOGGER.debug(\"%s zone is no longer master.\", self.zone_master)", "def zone_master(self) -> None:\n for zone in self.coordinator.data.zones:\n if zone.MasterMode and zone.SharedRoomID == self.zone.SharedRoomID:\n return zone.ZoneID", "def get_master_key():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><masterkey-properties></masterkey-properties></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def get_sync_master_port(self):\n self.sync_master_port = None\n pos = None\n sm_port_text = \"Starting syncmaster on port\"\n sw_text = \"syncworker up and running\"\n worker_count = 0\n logging.info(\"detecting sync master port\")\n while worker_count < 3 and self.is_instance_running():\n progress(\"%\")\n lfs = self.get_log_file()\n npos = lfs.find(sw_text, pos)\n if npos >= 0:\n worker_count += 1\n pos = npos + len(sw_text)\n else:\n time.sleep(1)\n lfs = self.get_log_file()\n pos = lfs.find(sm_port_text)\n pos = lfs.find(sm_port_text, pos + len(sm_port_text))\n pos = lfs.find(sm_port_text, pos + len(sm_port_text))\n if pos >= 0:\n pos = pos + len(sm_port_text) + 1\n self.sync_master_port = int(lfs[pos : pos + 4])\n return self.sync_master_port", "def get_master_offer(self):\n return Offer.objects.get(is_master=True)" ]
[ "0.6829638", "0.6829638", "0.6791485", "0.6693696", "0.6673793", "0.6628041", "0.657018", "0.63935375", "0.62487674", "0.61431384", "0.6016861", "0.5940178", "0.5940178", "0.58904886", "0.5833857", "0.5826365", "0.57743245", "0.5721033", "0.57085377", "0.5699552", "0.56985486", "0.5694681", "0.5671085", "0.5630063", "0.56257045", "0.5607661", "0.5592494", "0.55874914", "0.5582253", "0.5556315" ]
0.72852707
0
get the essentials of all instances controlled by this starter
def get_instance_essentials(self): ret = [] for instance in self.all_instances: ret.append(instance.get_essentials()) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEssentialList(self):\n return self.essentials", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def get_instances(cls):\n raise NotImplementedError", "def get_instance_classes():\n return Base_Instance.instance_classes", "def get_dev_examples(self):\n raise NotImplementedError()", "def get_sweeps(self):\n return self.master.get_sweeps()", "def get_instance_classes():\n return Base_Instance.get_instance_classes()", "def show_instances():\n return get_instances()", "def all_experiments():\n elo_explain_experiments()\n alpha_beta_experiments()\n mtcs_experiments()", "def get_techniques_used_by_tools():\n global techniques_used_by_tools\n\n if not techniques_used_by_tools:\n techniques_used_by_tools = rsh.techniques_used_by_tools(get_srcs())\n \n return techniques_used_by_tools", "def instances(self):\n return self.get('instances')", "def get_fully_solved_instances(self, db):\n numInstances = db.session.query(db.Instance).options(joinedload_all('properties')) \\\n .filter(db.Instance.experiments.contains(self)).distinct().count()\n if numInstances == 0: return 0\n num_jobs_per_instance = db.session.query(db.ExperimentResult) \\\n .filter_by(experiment=self).count() / numInstances\n instances = []\n for i in self.instances:\n if db.session.query(db.ExperimentResult) \\\n .filter(db.ExperimentResult.resultCode.like('1%')) \\\n .filter_by(experiment=self, instance=i, status=1) \\\n .count() == num_jobs_per_instance:\n instances.append(i)\n return instances", "def list_instances(self):\n # list instances\n self._list_instances()", "def instances(cls):\n # clean garbage collected pkgs out of __instances\n cls.__instances[:] = [wkref for wkref in cls.__instances\n if wkref() is not None]\n # return instance references in a tuple\n pkgs = [wkref() for wkref in cls.__instances]\n return tuple(pkgs)", "def all_present_experiments(self):\n return _yield_subdir_names(self.exp_configs)", "def examples(self):\n return self._examples", "def instances(self):\r\n # It would be more efficient to do this with filters now\r\n # but not all services that implement EC2 API support filters.\r\n instances = []\r\n rs = self.connection.get_all_instances()\r\n for reservation in rs:\r\n uses_group = [g.name for g in reservation.groups if g.name == self.name]\r\n if uses_group:\r\n instances.extend(reservation.instances)\r\n return instances", "def experiments_init(self):\n pass", "def getXeprInstances():\n apilib = _loadapilib()\n instances = _findInst(apilib)\n return dict([(p, t) for p, t in instances])", "def get_instances(self):\n connection = self.connection\n\n instances = []\n\n connection.row_factory = sqlite3.Row\n cur = connection.cursor()\n cur.execute(\"SELECT * FROM INSTANCES\")\n rows = cur.fetchall()\n columns = [str(i[0]).lower() for i in cur.description]\n for row in rows:\n object = dict(zip(columns, row))\n instances.append(object)\n\n instancesNoneDict = {}\n\n for instance in instances:\n if instance['harvesterid'] not in instancesNoneDict:\n instancesNoneDict[instance['harvesterid']] = {}\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n elif instance['harvesterid'] in instancesNoneDict:\n if instance['harvesterhost'] not in instancesNoneDict[instance['harvesterid']]:\n instancesNoneDict[instance['harvesterid']][instance['harvesterhost']] = {\n 'availability': instance['availability'], 'errorsdesc': instance['errorsdesc'],\n 'contacts': instance['contacts'].split(','),\n 'active': instance['active'], 'notificated': instance['notificated']}\n if 'none' in instancesNoneDict[instance['harvesterid']]:\n del instancesNoneDict[instance['harvesterid']]['none']\n return instancesNoneDict", "def get_examples(self, env):\n return self.fam.c_get_examples(self, env)", "def get_used_instances(self, instance):\n\n instances = list()\n\n for el in self.net_root.iter('block'):\n inst = el.attrib['instance']\n if instance in inst:\n if len(el.getchildren()) != 0:\n instances.append(get_root_cluster(el).attrib['name'])\n\n return instances", "def instances_used(self):\n return None", "def get_all_exclusives(self):\r\n if self.exclusives is None:\r\n self._propagate_exclusives()\r\n return self.exclusives", "def get_all_instance(self):\n\t\tself.batch_h = Variable(torch.from_numpy(self.config.batch_h)).cuda()\n\t\tself.batch_t = Variable(torch.from_numpy(self.config.batch_t)).cuda()\n\t\tself.batch_r = Variable(torch.from_numpy(self.config.batch_r)).cuda()\n\t\treturn self.batch_h, self.batch_t, self.batch_r", "def requires(self):\n\n return [\n SpecimenLevelExperimentCleaner(),\n MouseSpecimenCrossRef(),\n EmbryoSpecimenCrossRef(),\n ImpressExtractor(),\n ]", "def _entrypoint_iterator(self):\n return self._entry_points", "def detect_instances(self):\n lh.subsection(\"Instance Detection for {0.name}\".format(self))\n jwt = self.get_jwt_header()\n self.all_instances = []\n logging.debug(\"waiting for frontend\")\n logfiles = set() # logfiles that can be used for debugging\n\n # the more instances we expect to spawn the more patient:\n tries = 10 * self.expect_instance_count\n\n # Wait for forntend to become alive.\n all_instances_up = False\n while not all_instances_up and tries:\n self.all_instances = []\n detected_instances = []\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n for root, dirs, files in os.walk(self.basedir):\n for onefile in files:\n # logging.debug(\"f: \" + root + os.path.sep + onefile)\n if onefile.endswith(\"log\"):\n logfiles.add(str(Path(root) / onefile))\n\n for name in dirs:\n # logging.debug(\"d: \" + root + os.path.sep + name)\n match = None\n instance_class = None\n if name.startswith(\"sync\"):\n match = re.match(r\"(syncmaster|syncworker)(\\d*)\", name)\n instance_class = SyncInstance\n else:\n match = re.match(\n r\"(agent|coordinator|dbserver|resilientsingle|single)(\\d*)\",\n name,\n )\n instance_class = ArangodInstance\n # directory = self.basedir / name\n if match and len(match.group(2)) > 0:\n # we may see a `local-slave-*` directory inbetween,\n # hence we need to choose the current directory not\n # the starter toplevel dir for this:\n instance = instance_class(\n match.group(1),\n match.group(2),\n self.cfg.localhost,\n self.cfg.publicip,\n Path(root) / name,\n self.passvoid,\n self.cfg.ssl,\n self.cfg.version,\n self.enterprise,\n jwt=jwt,\n )\n instance.wait_for_logfile(tries)\n instance.detect_pid(\n ppid=self.instance.pid,\n full_binary_path=self.cfg.real_sbin_dir,\n offset=0,\n )\n detected_instances.append(instance.instance_type)\n self.all_instances.append(instance)\n\n print(self.expect_instances)\n detected_instances.sort()\n print(detected_instances)\n attach(str(self.expect_instances), \"Expected instances\")\n attach(str(detected_instances), \"Detected instances\")\n if (self.expect_instances != detected_instances) or (not self.get_frontends()):\n tries -= 1\n time.sleep(5)\n else:\n all_instances_up = True\n\n if not self.get_frontends():\n print()\n logging.error(\"STARTER FAILED TO SPAWN ARANGOD\")\n self.show_all_instances()\n logging.error(\"can not continue without frontend instance\")\n logging.error(\"please check logs in\" + str(self.basedir))\n for logf in logfiles:\n logging.debug(logf)\n message = \"if that does not help try to delete: \" + str(self.basedir)\n logging.error(message)\n raise Exception(message)\n self.show_all_instances()", "def get_all_cur_site_insts():\n return models.Curation_SiteInstance.objects.all()", "def noise_application_instances(self):\n # Add some \"noise\" application instances to the DB for every test, to\n # make the tests more realistic.\n factories.ApplicationInstance.create_batch(size=3)" ]
[ "0.64952594", "0.61609066", "0.6024103", "0.57624537", "0.5747617", "0.5703971", "0.56971", "0.5676145", "0.56370413", "0.5501353", "0.547217", "0.54002124", "0.53892356", "0.53773415", "0.5376807", "0.53593886", "0.5353485", "0.5344247", "0.5336788", "0.5326446", "0.53020716", "0.5293048", "0.5292328", "0.52779114", "0.52238935", "0.5206132", "0.52019435", "0.52008635", "0.51954514", "0.5182392" ]
0.8199698
0
print all instances of this starter to the user
def show_all_instances(self): if not self.all_instances: logging.error("%s: no instances detected", self.name) return instances = "" for instance in self.all_instances: instances += " - {0.name} (pid: {0.pid})".format(instance) logging.info("arangod instances for starter: %s - %s", self.name, instances)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_out():\n pass", "def print_results(self):\n pass", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def show(self):\n i = 0\n print()\n for task in self.tasks:\n print(\"\\t\", i + 1, \". \", task.name, \"(\", task.priority, \")\")\n i += 1", "def help_show(self):\n print(\"print an instance based on the class name and id\")", "def printResults(self):\n for tweet in self.tweets:\n print(tweet)\n print(\"---------------------\\n\")", "def printOutput(self):\n pass", "def print_schedule(self):\n for entry in self.entries:\n print(entry.get_entry_string())", "def printStations(self):\n print(\"Bus numero \" + str(self._num) + \" :\")\n for i in range(len(self._stations)) :\n print(self._stations[i])\n print('\\n')", "def do_show(self, args):\n temp = args.split()\n\n if len(temp) == 0:\n print(\"** class name missing **\")\n return\n elif temp[0] not in self.myclasses:\n print(\"** class doesn't exist **\")\n return\n elif len(temp) < 2:\n print('** instance id missing **')\n return\n else:\n all_objs = storage.all()\n for i in all_objs.keys():\n if i == \"{}.{}\".format(temp[0], temp[1]):\n print(all_objs[i])\n return\n print('** no instance found **')", "def out(self):\n print(self.__class__.__name__)\n for prime in self.primes:\n print(prime)", "def out(self) -> None:\n print(self.__class__.__name__)\n for prime in self._primes:\n print(prime)", "def _display_examples(self):\n\n print(self._usage)\n print(self._examples)", "def display(self):\n print(self)", "def output(self):\n print \"Name:\", self.name\n print \"City:\", self.city\n print \"Country:\", self.country\n print \"Number of Reviews:\", len(self.sentiments)\n print \"Old Reviews (Stars):\", self.stars_avg\n print \"Old Reviews (%):\", self.stars_avg/5\n print \"New Rating (Stars)\", self.new_rating*5\n print \"New Rating (%):\", self.new_rating", "def print_list(self):\r\n print(\"Displaying each metric:\")\r\n print(\"======\")\r\n for metric in self.metrics:\r\n metric.whoami()\r\n print(\"======\")\r\n print(self.metrics)\r\n print(\"END\")\r\n print()", "def printall():\n print listAll()", "def __str__(self):\n #{{{ Nicely print of elements in class.\n\n if config.verbose: print \"Stations():\"\n\n for st in self.stachan_cache.keys():\n chans = self.stachan_cache[st].keys()\n print \"\\t%s: %s\" % (st,chans)", "def print_list(self):\r\n pass", "def out(self):\r\n print(self.__class__.__name__)\r\n for prime in self.primes:\r\n print(prime)", "def printStories(self):\n\t\tself.printHeader()\n\t\tfor i in range(self.firstStoryToShow, self.lastStoryToShow):\n\t\t\tself.outputStory(self.stories[i], self.showDomains, self.showFullTitles, self.collapseOldStories)\n\t\t\n\t\tif self.karmaChange:\n\t\t\tprint self.hnUserName + \"'s karma has changed since the last refresh.\"", "def print(self):\r\n self.print_avec_separateur()", "def printIns(self, stream):\n print(' ', str(self), file=stream)", "def print(self):\n for word in self.words:\n print(word)", "def printCars(self):\n for car in self.cars:\n self.logger.debug(car)", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Test Case ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-JIRA URL:\", self.JIRA_URL, sep='')", "def printSchedule():\r\n print(\"{0:^45}\".format(\"Your Schedule:\\n\"))\r\n print(\" Day Class Time\")\r\n if(len(classes) == 0):\r\n print(\"\\nThere are no classes\\n\")\r\n return\r\n for class_ in classes:\r\n print(class_.scheduleString())\r\n print()", "def show(self):\n\n print(self._walk(self, depth=1))", "def print(self) -> None:\n\n print(\"Name: {}\".format(self.name))\n print(\"Input Queue: {}\".format(self.input_queue))\n print(\"Output Queue: {}\".format(self.output_queue))\n print(\"Restart Required: {}\".format(str(self.restart_required)))\n print(\"Number of Processes: {}\".format(str(self.num_processes)))\n print(\"Process Job: {}\".format(self.process_job.__name__))\n print(\"Timeout Duration: {}\".format(str(self.timeout_duration)))\n self.print_process_list()", "def print(self):\n\n print(self)" ]
[ "0.67142516", "0.6560021", "0.636873", "0.63599074", "0.6321213", "0.62887406", "0.6230574", "0.62254745", "0.6211867", "0.621094", "0.6190211", "0.6186281", "0.61838835", "0.61825544", "0.61808974", "0.6177476", "0.6169501", "0.6163866", "0.61381304", "0.61379516", "0.6135463", "0.6132603", "0.6111109", "0.6088436", "0.6058275", "0.60241723", "0.60188633", "0.6014221", "0.60008454", "0.6000054" ]
0.7568982
0
retrieve token from the JWT secret file which is cached for the future use
def get_jwt_token_from_secret_file(self, filename): # pylint: disable=consider-iterating-dictionary if filename in self.jwt_tokens.keys(): # token for that file was checked already. return self.jwt_tokens[filename] cmd = [ self.cfg.bin_dir / "arangodb", "auth", "header", "--auth.jwt-secret", str(filename), ] print(cmd) jwt_proc = psutil.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) logging.info("JWT starter has PID:" + str(jwt_proc.pid)) (header, err) = jwt_proc.communicate() jwt_proc.wait() if len(str(err)) > 3: raise Exception("error invoking the starter " "to generate the jwt header token! " + str(err)) if len(str(header).split(" ")) != 3: raise Exception("failed to parse the output" " of the header command: " + str(header)) self.jwt_tokens[filename] = str(header).split(" ")[2].split("\\")[0] return self.jwt_tokens[filename]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peek_app_token():\n if not os.path.exists(_token_storage_path):\n return None\n\n try:\n with open(_token_storage_path) as secret_file:\n return json.loads(secret_file.read())\n\n except Exception as exc:\n log.error(f'Could not read secret file.\\n{exc}')\n traceback.print_exc(file=sys.stderr)", "def get_token():\n if os.path.exists(AUTH_TOKEN_PATH):\n with open(str(AUTH_TOKEN_PATH), 'r') as TokenObj:\n try:\n data = TokenObj.read()\n except (OSError, IOError) as e:\n echo(e)\n data = json.loads(data)\n token = data[\"token\"]\n return token\n else:\n echo(\"\\nYour token file doesn't exists.\")\n echo(\"\\nIt should be present at ~/.evalai/token.json\\n\")\n return None", "def get_token_from_secret_file(secret_file_path):\n try:\n with open(secret_file_path, \"r\") as f:\n return f.readline()\n except FileNotFoundError:\n raise BaseSpaceDownloadError(\"Secret file not found\")\n except PermissionError:\n raise BaseSpaceDownloadError(\"No permissions to read secret file\")", "def getCachedToken( self ):\n if ( os.path.exists( TOKEN_PATH )):\n return open( TOKEN_PATH ).read()\n else :\n return None", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def __get_authentication_token(self):\n cache = load_json(self._tokenPath)\n return cache[\"authentication_token\"]", "def get_live_token():\n token_file = open(os.path.dirname(__file__) + TOKEN_FILE_PATH, \"r\")\n keyword = \"GITLAB_API_SECRET\"\n for tokens in token_file:\n token = tokens.split(\"\\n\")\n for token_key in token:\n if keyword in token_key:\n gitlab_token = token_key.split(\"\\\"\")[1]\n token_file.close()\n return gitlab_token", "def __current_authentication_token(self):\n if os.path.isfile(self.token_filename):\n with open(self.token_filename, 'r') as f:\n (stored_token, expires) = f.read().split(' ')\n t = time.time()\n if int(expires) > t:\n return stored_token\n return None", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def get_token():\n req = request.get_json()\n username = str(req['username'])\n password = str(req['password'])\n if User.username_password_match(username, password):\n expiration_date = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=100)\n token = jwt.encode({'exp': expiration_date}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n return Response('', 401, mimetype='application/json')", "def get_token(filename='config.ini'):\n cp = ConfigParser()\n cp.read(filename)\n token = cp.get('githubapi', 'token')\n return token", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def fetch_token():\n bucket = os.environ[\"SPOTIFY_BUCKET_NAME\"]\n path = os.getenv(\"SPOTIFY_BUCKET_PATH\", \"\")\n logger.info(\"Reading Spotify OAuth token from s3://%s/%s/token.json.\" %\n (bucket, path))\n s3 = boto3.client('s3')\n content_object = s3.get_object(Bucket=bucket, Key=\"%s/token.json\" % path)\n file_content = content_object['Body'].read().decode('utf-8')\n token = json.loads(file_content)\n return token", "def get_token(alias, reg_code, privKey):\n data = json.dumps({\n \"namespace\": alias,\n \"reg_code\": reg_code\n })\n url = endpoint('auth')\n r = requests.post(url,data=data) \n token_str = (r.__dict__['_content']).decode()\n r_token_obj = json.loads(token_str)\n token_cipher = ast.literal_eval( r_token_obj[\"token\"] )\n token_obj = dict()\n token_obj = {\n \"authToken\": decrypt_message( privKey, token_cipher),\n \"expiration_minutes\": r_token_obj[\"expiration_minutes\"],\n \"expiration\": str(datetime.datetime.now() + datetime.timedelta(minutes=r_token_obj[\"expiration_minutes\"]))\n }\n expiration = token_obj[\"expiration\"]\n expiration = parser.parse(expiration)\n if datetime.datetime.now() > expiration:\n print(\"Token has expired\")\n else:\n c = expiration - datetime.datetime.now()\n valid_minutes = str(divmod(c.total_seconds(), 60)[0])\n return token_obj[\"authToken\"]", "def load_token(token):\n \n #The Token itself was generated by User.get_auth_token. So it is up to \n #us to known the format of the token data itself. \n \n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which \n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a exipry date, but could be changed by\n #the user, so this feature allows us to enforce the exipry date of the token\n #server side and not rely on the users cookie to exipre. \n max_age = REMEMBER_COOKIE_DURATION.total_seconds()\n \n #Decrypt the Security Token, data = [username, hashpass]\n data = login_serializer.loads(token, max_age=max_age)\n \n #Find the User\n user = load_user(data[0])\n \n #Check Password and return user or None\n if user and data[1] == user.password:\n return user\n return None", "def get_token():\n url = settings.GENERATE_TOKEN_URL\n headers = {\"Authorization\": \"Basic {}\".format(settings.MPESA_APP_AUTHTOKEN)}\n response = get(url, headers)\n return response.json()", "def retrieve_token():\n try:\n deserialized_message = json.loads(peek_app_token())\n\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n if expires_at and check_expired_time(expires_at):\n return deserialized_message.get('token')\n else: # Token expired, refresh it\n refresh_token()\n\n deserialized_message = peek_app_token()\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n try:\n assert(expires_at and check_expired_time(expires_at))\n return deserialized_message.get('token')\n except:\n raise # When all else fails\n\n except Exception as exc:\n log.error(f'Could not refresh token.\\n{exc}')\n traceback.print_exc(file=sys.stderr)\n\n return None", "async def jwt_secret() -> Optional[str]:\n if not jwt_secret_config:\n raise RuntimeError(\"jwt_secret_config not set in auth\")\n if hasattr(jwt_secret_config, \"get_secret_value\"):\n return jwt_secret_config.get_secret_value()\n else:\n return jwt_secret_config", "def get_jwt() -> str:\n LOGGER.debug(\"Retrieving JWT...\")\n\n args = {\n \"url\": \"{0}/auth\".format(CONFIG['dojot']['url']),\n \"data\": json.dumps({\n \"username\": CONFIG['dojot']['user'],\n \"passwd\": CONFIG['dojot']['passwd'],\n }),\n \"headers\": {\n \"Content-Type\": \"application/json\"\n },\n }\n\n res = DojotAPI.call_api(requests.post, args)\n\n LOGGER.debug(\".. retrieved JWT\")\n return res[\"jwt\"]", "def getJWTtoken(self):\n\n token = False\n try:\n res = self.s.get(self.url + 'tokens/jwt', auth=(self.username, self.password), verify=False)\n res.raise_for_status()\n except:\n logger.error(res)\n raise\n token = vsdModels.Token(**res.json())\n try:\n payload = jwt.decode(token.tokenValue, verify=False)\n\n except jwt.InvalidTokenError as e:\n logger.error('token invalid, try using Basic Auth{0}'.format(e))\n raise\n\n return token", "def get_stored_token():\n try:\n parser = SafeConfigParser()\n parser.read(OAUTH_FILE)\n user = parser.get('auth', 'user')\n token = parser.get('auth', 'token')\n token_date_str = parser.get('auth', 'token_date')\n except ConfigParser.Error as e:\n return None, None\n\n if user and token and token_date_str:\n date1 = datetime.datetime.strptime(token_date_str, '%Y-%m-%d').date()\n date2 = datetime.date.today()\n if (date2 - date1).days > OAUTH_EXP_DAYS:\n user, token = None, None\n\n return user, token", "def _get_token(self):\n return user.get_token()", "def get_jwt_header(self):\n if self.jwt_header:\n return self.jwt_header\n self.jwt_header = self.get_jwt_token_from_secret_file(str(self.jwtfile))\n return self.jwt_header", "def deserialize_tokens():\n\ttry:\n\t\twith open(config.TOKENPATH, \"r+\") as f:\n\t\t\tcontext = f.read()\n\t\t\tres = eval(context)\n\t\t\t# load into memory\n\t\t\treturn res[\"access_token\"], res[\"refresh_token\"]\n\texcept:\n\t\t# unexcept token format\n\t\tfrom common import ApplicationException\n\t\traise ApplicationException(\"authorization file is broken, please run init\")", "def retrieve_token(filename):\n with open(filename, 'r') as f:\n token = f.readline()\n\n return token", "def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])", "def load_token():\n try:\n ifile = open('access.cfg', 'r')\n token_string = ifile.read()\n ifile.close()\n return tweepy.oauth.OAuthToken.from_string(token_string)\n\n except IOError:\n print (\"Error: Unable to load credentials, please authenticate.\")\n return None\n\n except tweepy.TweepError:\n print (\"Error: Unable to parse credentials, please remove access.cfg \"\n \"file and try running app again.\")", "async def token(request: Request):\n return get_token()", "def get_token(path = os.getcwd()):\n\n path += \"\\\\.env\"\n load_dotenv(path)\n token = os.environ.get(\"token\")\n return token", "def get_auth_token():\n auth_token_value = memcache.get('authtoken')\n if not auth_token_value:\n entity = Token.get_by_key_name(key_names = 'authtoken')\n if entity:\n auth_token_value= entity.value\n memcache.set('authtoken', auth_token_value)\n else:\n auth_token_value = None\n return auth_token_value" ]
[ "0.77592856", "0.7595998", "0.75481737", "0.7526818", "0.69495", "0.69054925", "0.6903875", "0.6800849", "0.6775595", "0.6755065", "0.6746886", "0.6740278", "0.67240804", "0.6659499", "0.66390276", "0.6621728", "0.6608063", "0.659404", "0.6589575", "0.65768105", "0.6572895", "0.65722185", "0.656959", "0.6553014", "0.65402555", "0.65393645", "0.653337", "0.6527102", "0.6505145", "0.6501232" ]
0.8024519
0
return jwt header from current installation
def get_jwt_header(self): if self.jwt_header: return self.jwt_header self.jwt_header = self.get_jwt_token_from_secret_file(str(self.jwtfile)) return self.jwt_header
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header", "def get_authorization_header(self):\n return {\"Authorization\": \"Bearer {}\".format(self.get_jwt())}", "def get_jwt(self, request):\n auth_header_prefix = self.auth_header_prefix\n try:\n authorization = request.authorization\n except ValueError:\n return None\n if authorization is None:\n return None\n authtype, token = authorization\n if authtype.lower() != auth_header_prefix.lower():\n return None\n return token", "def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}", "def header_token(token):\n return {'Authorization': '{0} {1}'.format('JWT', token)}", "def get_api_header(token):\n return {\n 'Authorization': 'Token ' + str(token)}", "def jwt_header(cert):\n header = {}\n header[\"alg\"] = \"RS256\"\n header[\"x5c\"] = cert\n return header", "def get_jwt_value(self, request):\n auth = get_authorization_header(request).split()\n auth_header_prefix = settings.JWT_AUTH_HEADER_PREFIX.lower()\n if not auth:\n if settings.JWT_AUTH_COOKIE:\n return request.COOKIES.get(settings.JWT_AUTH_COOKIE)\n return None\n # compare JWT_AUTH_HEADER_PREFIX and extractd token refiex \"should be like WWW-athenticate\"\n if smart_text(auth[0].lower()) != auth_header_prefix:\n return None\n if len(auth) == 1:\n msg = _('Invalid Authorization header. No credentials provided.')\n raise exceptions.AuthenticationFailed(msg)\n elif len(auth) > 2:\n msg = _('Invalid Authorization header. Credentials string '\n 'should not contain spaces.')\n raise exceptions.AuthenticationFailed(msg)\n #the auth list should have only 2 element which are:\n # JWT_AUTH_HEADER_PREFIX and the token\n #return the actual token inside the header\n return auth[1]", "def get_headers(self):\n headers = self.headers\n\n if self.jwt_secret:\n current = int(time.time())\n params = {'exp': current + self.jwt_token_length}\n token = jwt.encode(params, self.jwt_secret, algorithm='HS256')\n headers = {\n **headers,\n 'Authorization': 'Bearer {}'.format(token.decode('utf-8')),\n }\n\n return headers", "def get_headers(self):\n return {\n 'Authorization': 'JWT {}'.format(self.token)\n }", "def _get_request_header() -> Dict:\n metas, envs = get_full_version()\n\n header = {\n **{f'jinameta-{k}': str(v) for k, v in metas.items()},\n **envs,\n }\n return header", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "def get_jwt():\n\n try:\n scheme, token = request.headers['Authorization'].split()\n assert scheme.lower() == 'basic'\n return base64.b64decode(token).decode(\"UTF-8\")\n except (KeyError, ValueError, AssertionError):\n raise Forbidden('Invalid Bearer Token.')", "def get_auth_header(self) -> Mapping[str, Any]:\n return {}", "def _get_authorization_header(self):\n return f\"token {self._context.get_github_token()}\"", "def auth_header(self):\n return self._auth_header", "def build_header(self):\n authstring = \"Bearer \" + self.auth_token\n header = {\n \"Authorization\": authstring,\n \"Content-Type\": \"application/json\",\n \"User-Agent\": self.user_agent,\n \"Accept-Encoding\": \"gzip\"\n }\n return header", "def auth_header_value(self):\n return f\"token {self.API_TOKEN}\"", "def get_authenticate_header(self):\n pass", "def build_header(token: str = None):\n return {\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": token or get_project_token(),\n }", "async def jwt_header(\n Authorization: Optional[str] = Header(None),\n) -> Optional[RawAuth]:\n if not Authorization:\n return None\n\n parts = Authorization.split()\n if parts[0].lower() != \"bearer\":\n log.debug(\"Authorization header Failed, lacked bearer\")\n return None\n if len(parts) != 2:\n log.debug(\"Authorization header Failed, not 2 parts\")\n return None\n else:\n log.debug(\"Got header:Authorization with a JWT\")\n log.debug(\"jwt_header(): %s\", Authorization)\n return RawAuth(\n rawjwt=parts[1],\n rawheader=Authorization,\n via=\"header\",\n key=\"Authorization\",\n )", "def get_token_auth_header():\n # Get authorization form request header\n auth = request.headers.get('Authorization', None)\n # Check if authorization header exists\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is MISSING!'\n }, abort(401))\n # If bearer token, then first part of string = 'bearer'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\"'\n }, abort(401))\n # Authorization header string length must be 2\n elif len(parts) != 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be a BEARER token'\n }, abort(401))\n\n token = parts[1]\n return token", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n print(auth)\n\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n \n parts = auth.split()\n \n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def extract_bearer_token(request):\n return request.headers['Authorization'].split(\" \")[-1].strip()", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n elif auth.split()[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n elif len(auth.split()) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be include type and token.'\n }, 401)\n elif len(auth.split()) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be Bearer token.'\n }, 401)\n else:\n token = auth.split()[1]\n return token", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected'}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header must start with Bearer'}, 401)\n\n if len(parts) < 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Token not found after Bearer'}, 401)\n\n if len(parts) > 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header is an invalid token structure'}, 401)\n\n return parts[1]", "def get_authorization_header(client, user):\n # obtain authorization token\n response = client.post(\n reverse('token-obtain'),\n data={'username': user.username, 'password': user.raw_password},\n content_type='application/json'\n )\n token = response.json()['access']\n return {'HTTP_AUTHORIZATION': f'Bearer {token}'}", "def authenticate_header(self, request):\n return '{0} realm=\"{1}\"'.format(settings.JWT_AUTH_HEADER_PREFIX,\n self.www_authenticate_realm)", "def get_token(self):\n self.register_user(self.user_data)\n result = self.login_user(self.login_data)\n header_access_token = json.loads(result.data.decode())['header_access_token']\n return header_access_token", "def jwt_token_verify(auth_header):\n # Hug do not extract Bearer prefix\n auth_token, payload = parse_header(auth_header)\n return payload" ]
[ "0.7406444", "0.7215993", "0.7196089", "0.7186894", "0.7012713", "0.6979103", "0.6942568", "0.69397306", "0.69034165", "0.6893624", "0.6858153", "0.68385124", "0.6749719", "0.6743572", "0.6729134", "0.6724721", "0.6700563", "0.6690566", "0.6663717", "0.66617864", "0.663666", "0.6626147", "0.6617149", "0.6606582", "0.6585572", "0.65675414", "0.6566705", "0.6564615", "0.65532583", "0.65476197" ]
0.8041909
0
set the passvoid to the managed instance
def set_passvoid(self, passvoid, write_to_server=True): if write_to_server: print("Provisioning passvoid " + passvoid) self.arangosh.js_set_passvoid("root", passvoid) self.passvoidfile.write_text(passvoid, encoding="utf-8") self.passvoid = passvoid for i in self.all_instances: if i.is_frontend(): i.set_passvoid(passvoid) self.cfg.passvoid = passvoid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def instance(self, instance):\n self._instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def instance(self, instance):\n\n self._instance = instance", "def _attach_to_instance(self, instance):\n self._instance = instance", "def target_instance(self, target_instance):\n self._target_instance = target_instance", "def SetActiveObject(self):", "def set_state( self ):", "def post_save_access_attempt(self, instance, **kwargs):", "def vm(self, vm):\n\n self._vm = vm", "def set_passthrough(self, bPassthrough):\n\t\tcall_sdk_function('PrlVmDev_SetPassthrough', self.handle, bPassthrough)", "def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.p = pdict.pop('p')", "def populate_instance(self, mapper, selectcontext, row, instance, **flags):\n instance.TEST = \"hello world\"\n return EXT_CONTINUE", "def passer(self, value=None):\n pass", "def post_execute(self):", "def on_assign(self):", "def put(self, **kwargs):\n logging.debug(\"In put() for FTDDeviceHAPairs class.\")\n # Attempting to \"Deploy\" during Device registration causes issues.\n self.fmc.autodeploy = False\n return super().put(**kwargs)", "def passive(self,target):\r\n target.temp[\"cannot_act\"][\"temp\"] = True", "def register_instance(self, instance):\n self.instance = instance", "def forward_pass(self):", "def __setstate__(self, state):\n return None", "def put(self):\n self._val = True", "def transfer(self):\n pass", "def __setstate__(self, state):\n\n self.set(DER = state)", "def set_turn_holder(active_entity: EntityID):\n store.turn_holder = active_entity", "def __setstate__(self, d):\n\t\tself.__dict__ = d", "def __setstate__(self, state):\n\n self.list = state" ]
[ "0.6201083", "0.614531", "0.614531", "0.614531", "0.614531", "0.614531", "0.6030137", "0.5798542", "0.57683265", "0.5758265", "0.5750186", "0.57295257", "0.567436", "0.56572455", "0.56152976", "0.5603632", "0.5594542", "0.5575087", "0.55718654", "0.55426687", "0.54812384", "0.542333", "0.53874207", "0.536859", "0.5360508", "0.5359031", "0.5348714", "0.53438604", "0.5343683", "0.5327227" ]
0.6298621
0
get the passvoid to the managed instance
def get_passvoid(self): return self.passvoid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n pass", "def get(self):\n pass", "def get(self):\n pass", "def get(self):\n pass", "def object(self):", "def retrieve(self):\n pass", "def context(self) -> Any:\n ...", "def context(self) -> CONTEXT:", "def get_transfer(self):\n return self._transfer", "def _get_state(self):", "def get(self):\n return", "def get(self):\n return", "def forward_pass(self):", "def get_object_to_run(self):", "def get():", "def get():", "def get_result(self, state):\n pass", "def _get_instance(self):", "def _get_instance(self):", "def get(self):\n return None", "def __call__(self):\n return self.referee()", "def post_execute(self):", "def target(self):", "def __get__(self, instance, owner):\n return self.xyz", "def getvalue(self):\n ...", "def getvalue(self):\n ...", "def _get(self):\n return None", "def get(self):\r\n raise NotImplementedError", "def call(self):", "def transfer(self):\n pass" ]
[ "0.59444445", "0.59444445", "0.59444445", "0.59444445", "0.58766246", "0.5858545", "0.5811761", "0.5790257", "0.57571363", "0.57146895", "0.5700108", "0.5700108", "0.5633125", "0.5611686", "0.55938387", "0.55938387", "0.5585385", "0.55678827", "0.55678827", "0.55592895", "0.55553067", "0.5553812", "0.5548197", "0.55450654", "0.5542865", "0.5542865", "0.5534726", "0.55094063", "0.54976785", "0.54966134" ]
0.6621574
0
make all managed instances plus the starter itself crash.
def crash_instances(self): try: if self.instance.status() == psutil.STATUS_RUNNING or self.instance.status() == psutil.STATUS_SLEEPING: print("generating coredump for " + str(self.instance)) gcore = psutil.Popen(["gcore", str(self.instance.pid)], cwd=self.basedir) print("launched GCORE with PID:" + str(gcore.pid)) gcore.wait() self.kill_instance() else: print("NOT generating coredump for " + str(self.instance)) except psutil.NoSuchProcess: logging.info("instance already dead: " + str(self.instance)) for instance in self.all_instances: instance.crash_instance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n cause_a_bunch_of_exceptions_to_happen()", "def detect_fatal_errors(self):\n for instance in self.all_instances:\n instance.detect_fatal_errors()", "def test_too_many_cores(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n instance_ids1 = []\n instance_ids2 = []\n for index in xrange(FLAGS.max_cores):\n instance_id = self._create_instance()\n compute1.run_instance(self.context, instance_id)\n instance_ids1.append(instance_id)\n instance_id = self._create_instance()\n compute2.run_instance(self.context, instance_id)\n instance_ids2.append(instance_id)\n instance_id = self._create_instance()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_run_instance,\n self.context,\n instance_id)\n db.instance_destroy(self.context, instance_id)\n for instance_id in instance_ids1:\n compute1.terminate_instance(self.context, instance_id)\n for instance_id in instance_ids2:\n compute2.terminate_instance(self.context, instance_id)\n compute1.kill()\n compute2.kill()", "def cleanup_resources(self, restart=False):", "def terminate_preemptible_instances(self, context, instances):\n # NOTE(aloga): we should not delete them directly, but probably send\n # them a signal so that the user is able to save her work.\n elevated = context.elevated()\n for instance in instances:\n LOG.info(_LI(\"Deleting %(uuid)s\") % {\"uuid\": instance[\"uuid\"]})\n instance = self.compute_api.get(elevated,\n instance[\"uuid\"],\n want_objects=True)\n self.compute_api.delete(elevated, instance)", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def check_vm_errors(st):\n\n global api, owned_instances\n owned_instances_changed = False\n\n logging.info(\"Check VMs in error state...\")\n\n # Get all instances in \"error\" state\n try:\n all_instances = api.get_all_instances()\n\n # Clean up list from nonexisting instances\n new_owned_instances = []\n for o in owned_instances:\n keep = False\n for a in all_instances:\n if o == a.id:\n keep = True\n break\n if keep:\n new_owned_instances.append(o)\n else:\n logging.debug(\"Unknown owned instance removed: %s\" % o)\n owned_instances_changed = True\n if owned_instances_changed:\n owned_instances = new_owned_instances\n\n # Only the ones in error state (generator)\n error_instances = ( x for x in all_instances if x.status(token_id=api.keystone.token_id) == 'error' and x.id in owned_instances )\n\n except Exception as e:\n logging.error(\"Can't get list of owned instances in error: %s\" % e)\n error_instances = []\n\n # Print them\n n_vms_to_restart = 0\n for ei in error_instances:\n\n # Operations to do if a VM is in error:\n # 1. Terminate it\n # 2. Remove it from the managed list\n # 3. Decrement VMs allegedly running\n # 3. Cancel event restoring VMs allegedly running\n # 4. Run new instances (ignoring errors)\n # 5. Increase VMs allegedly running\n\n # Terminate VM in error\n try:\n ei.terminate(token_id=api.keystone.token_id)\n logging.debug(\"Shutdown via API of %s in error state succeeded\" % ei.id)\n except Exception as e:\n logging.error(\"Shutdown via API failed for %s in error state: %s\" % (ei.id, e))\n continue\n\n # Remove from \"owned\" list\n owned_instances.remove(ei.id)\n owned_instances_changed = True\n\n # Change VMs allegedly running\n change_vms_allegedly_running(st, -1)\n\n # Remove event for the current instance\n st['event_queue'][:] = [ x for x in st['event_queue'] if x['action'] != 'change_vms_allegedly_running' or x['params'][1] != ei.id ]\n\n # Restart that number of VMs\n n_vms_to_restart = n_vms_to_restart + 1\n\n # Attempt to run replacement VMs (no retry in this case!)\n if n_vms_to_restart > 0:\n list_ok = scale_up( n_vms_to_restart, valid_hostnames=st['workers_status'].keys() )\n for inst in list_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n if len(list_ok) == n_vms_to_restart:\n logging.debug(\"Successfully requested all the new replacement VMs: %s\" % ','.join(list_ok))\n else:\n logging.debug(\"Cannot request all the replacement VMs: only %d/%d succeeded (%s)\" % (len(list_ok), n_vms_to_restart, ','.join(list_ok)))\n\n # Save to disk\n if owned_instances_changed:\n save_owned_instances()\n\n # Re-run this command in X seconds\n return {\n 'action': 'check_vm_errors',\n 'when': time.time() + cf['elastiq']['check_vms_in_error_every_s']\n }", "def _fail_on_bad_torque_start(self):\n for bundle in self._model.batch_get_bundles(state=State.WAITING_FOR_WORKER_STARTUP, bundle_type='run'):\n failure_message = self._read_torque_error_log(bundle.metadata.job_handle)\n if failure_message is None and time.time() - bundle.metadata.last_updated > 20 * 60:\n failure_message = 'Worker failed to start. You may have requested too many resources.'\n if failure_message is not None:\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})", "def cleanup_all(cls):\n for i in tuple(cls.instances):\n i.cleanup()", "def rescue(self, instance):\n pass", "def _cleanup_running_deleted_instances(self, context):\n action = CONF.running_deleted_instance_action\n\n if action == \"noop\":\n return\n\n # NOTE(sirp): admin contexts don't ordinarily return deleted records\n with utils.temporary_mutation(context, read_deleted=\"yes\"):\n for instance in self._running_deleted_instances(context):\n if action == \"log\":\n LOG.warning(_LW(\"Detected instance with name label \"\n \"'%s' which is marked as \"\n \"DELETED but still present on host.\"),\n instance.name, instance=instance)\n\n elif action == 'shutdown':\n LOG.info(_LI(\"Powering off instance with name label \"\n \"'%s' which is marked as \"\n \"DELETED but still present on host.\"),\n instance.name, instance=instance)\n try:\n try:\n # disable starting the instance\n self.driver.set_bootable(instance, False)\n except NotImplementedError:\n LOG.debug(\"set_bootable is not implemented \"\n \"for the current driver\")\n # and power it off\n self.driver.power_off(instance)\n except Exception:\n msg = _LW(\"Failed to power off instance\")\n LOG.warn(msg, instance=instance, exc_info=True)\n\n elif action == 'reap':\n LOG.info(_LI(\"Destroying instance with name label \"\n \"'%s' which is marked as \"\n \"DELETED but still present on host.\"),\n instance.name, instance=instance)\n bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(\n context, instance.uuid, use_slave=True)\n self.instance_events.clear_events_for_instance(instance)\n try:\n self._shutdown_instance(context, instance, bdms,\n notify=False)\n self._cleanup_volumes(context, instance.uuid, bdms)\n except Exception as e:\n LOG.warning(_LW(\"Periodic cleanup failed to delete \"\n \"instance: %s\"),\n e, instance=instance)\n else:\n raise Exception(_(\"Unrecognized value '%s'\"\n \" for CONF.running_deleted_\"\n \"instance_action\") % action)", "def testFailure():\n run(\"chariot-me\") #Start management-engine without initial deplflag\n egress()", "def terminate_instance(self, keep_instances=False):\n\n lh.subsubsection(\"terminating instances for: \" + str(self.name))\n logging.info(\n \"StarterManager: Terminating starter instance: %s\", str(self.default_starter_args + self.arguments)\n )\n\n logging.info(\"This should terminate all child processes\")\n self.instance.terminate()\n logging.info(\"StarterManager: waiting for process to exit\")\n exit_code = self.instance.wait()\n self.add_logfile_to_report()\n # workaround BTS-815: starter exits 15 on the wintendo:\n if IS_WINDOWS and exit_code == 15:\n exit_code = 0\n\n if exit_code != 0:\n raise Exception(\"Starter %s exited with %d\" % (self.basedir, exit_code))\n\n old_log = self.basedir / \"arangodb.log.old\"\n logging.info(\n \"StarterManager: done - moving logfile from %s to %s\",\n str(self.log_file),\n str(old_log),\n )\n if old_log.exists():\n old_log.unlink()\n self.log_file.rename(old_log)\n\n for instance in self.all_instances:\n instance.rename_logfile()\n if not instance.detect_gone():\n print(\"Manually terminating instance!\")\n instance.terminate_instance(False)\n\n if keep_instances:\n for i in self.all_instances:\n i.pid = None\n i.ppid = None\n return False\n # Clear instances as they have been stopped and the logfiles\n # have been moved.\n ret = False\n for instance in self.all_instances:\n print(\"u\" * 80)\n if instance.search_for_warnings(True):\n ret = True\n self.is_leader = False\n self.all_instances = []\n return ret", "def upgrade_instances(self, which_instances, moreargs, waitpid=True, force_kill_fatal=True):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n i.terminate_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n True,\n )\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n [],\n False,\n )", "def run(self):\n self.create_all_sync_instances()", "def stop_all_instances(self):\n print '# Stopping all the instances'\n number = self.compute.stop_all_instances()\n print '%d instances were stopped' % number", "def fix(self):\n\n pm.delete(self.errorNodes)\n\n self.run()", "def noise_application_instances(self):\n # Add some \"noise\" application instances to the DB for every test, to\n # make the tests more realistic.\n factories.ApplicationInstance.create_batch(size=3)", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def test_exception_in_all_worker_process(self):\n pool = ProcessPool(5)\n pool.start(ExceptionGeneratingWorker_5)\n with self.assertRaises(RuntimeError):\n for _ in range(10000):\n pool.ventilate(\"Datanum\")\n time.sleep(.1)", "def cleanup_resources(self, kernel_id, restart=False):", "def kill_all():\n compose_kill_all()", "def create_instances(self):\n disk_d = \"//\"+self.host+\"/d$\"\n mask = r\"^IBM$|^WebSphere.*\"\n root_flag = 0\n # print(os.listdir(disk_d)) #checkpoint\n for item in os.listdir(disk_d):\n searchObj = re.search(mask, item, re.M|re.I)\n if searchObj:\n root_flag = 1\n rootdir=disk_d+\"/\"+searchObj.group()\n # print(rootdir) #checkpoint\n\n if os.path.isdir(rootdir):\n candidates=os.listdir(rootdir)\n # print(candidates) #checkpoint\n for candidate in candidates:\n if os.path.isdir(rootdir+'/'+candidate+'/profiles'):\n user_install_root=rootdir+'/'+candidate\n candidate_instance=Instance(user_install_root)\n candidate_instance.get_profiles()\n if candidate_instance.profiles:\n self.instances.append(candidate_instance)\n # print(candidate_instance.uir+\": \"+str(candidate_instance.profiles)) #checkpoint\n\n if root_flag == 0: print(self.host+\" does not have IBM or WebSphere directory on disk D\")", "def reset():\n Vessel.reset_instances()", "def __exit__(self, exc_type, exc_value, traceback): \n self.shutdown()", "def _gracefully_stop(self):\n pass", "def test_concurrent_instances(self):\n cm = contextlib.ExitStack() # TODO: clean this up\n\n work_dir1 = Path(cm.enter_context(tempfile.TemporaryDirectory())) # TODO: make these delete only if no exception occured\n work_dir2 = Path(cm.enter_context(tempfile.TemporaryDirectory()))\n\n archive = RemotePrometheusArchive.for_tag('latest').download()\n prometheus1: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir1))\n prometheus2: PrometheusInstance = cm.enter_context(PrometheusInstance(archive, work_dir2))\n\n prometheus1.start()\n\n with self.assertRaisesRegex(Exception, 'certificate verify failed'):\n prometheus2.start()\n\n\n cm.close()", "def test_non_additive_instance_creation(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n type = 'f1.2xlarge'\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags = {'fsimcluster': 'testcluster'},\n always_expand=False)\n instances.should.have.length_of(1)\n\n # There should be one instance total now, across one reservation\n ec2_client = boto3.client('ec2')\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(1)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(1)", "def shutdown_instances(self):\r\n self.min_size = 0\r\n self.max_size = 0\r\n self.desired_capacity = 0\r\n self.update()", "def stopclean(self):\n raise Exception(\"Not implemented\")" ]
[ "0.6183212", "0.6157061", "0.5949658", "0.5940772", "0.58803564", "0.5872182", "0.58227324", "0.57449096", "0.573384", "0.5643141", "0.5623912", "0.5593848", "0.5592989", "0.559106", "0.55775046", "0.5566891", "0.5562831", "0.555892", "0.55581", "0.5557331", "0.5549696", "0.55376285", "0.5530706", "0.5511063", "0.55059254", "0.5468521", "0.5456432", "0.5443141", "0.54375386", "0.5435014" ]
0.70966077
0
wait for our instance to create a logfile
def wait_for_logfile(self): counter = 0 keep_going = True logging.info("Looking for log file.\n") while keep_going: self.check_that_instance_is_alive() if counter == 20: raise Exception("logfile did not appear: " + str(self.log_file)) counter += 1 logging.info("counter = " + str(counter)) if self.log_file.exists(): logging.info("Found: " + str(self.log_file) + "\n") keep_going = False time.sleep(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_log_file_created(self, mock_parsing_handler, mock_api_handler, mock_progress):\n\n directory = path.join(path_to_module, \"fake_ngs_data\")\n directory_status = DirectoryStatus(directory)\n log_file = path.join(directory, \"irida-uploader.log\")\n # Check that log file does not exist before starting\n self.assertFalse(path.exists(log_file))\n\n cli_entry._validate_and_upload(directory_status, False)\n\n # Make sure log file is created\n self.assertTrue(path.exists(log_file))", "def test_creation_logfile(self):\n log_file = os.path.join(DATA_DIR, 'sample_log.txt')\n manager = execution.LogManager('MainThread', log_file)\n LOGGER.debug('Log me!')\n manager.close()\n self.assertEqual(count_lines(log_file), 1)\n os.remove(log_file)", "def logManager(self):\n time.sleep(0.1)\n while True:\n try:\n time.sleep(0.2)\n data = self.logQ.get(block=False)\n except Queue.Empty:\n pass\n else:\n try:\n self.log_lock.acquire() \n self.log_file.logEntry(data)\n time.sleep(0.1)\n self.log_lock.release()\n except:\n print '*Unable to write to log file*'", "def wait_for_log(self, hub, success_criteria):\n timeMax = time() + BrowserConstants().CONTAINER_TIMEOUT\n line = 'error'\n while line not in BrowserConstants().CONTAINER_SUCCESS or time() < timeMax:\n for line in hub.logs().decode().split('\\n'):\n if success_criteria in line:\n logging.debug(line)\n return\n\n # TODO handle RemoteDisconnected\n # TODO check for running containers before creation/worker to store running containers", "def log_wait(logfile):\n if xopts['verbose']: print(\"** Watching logfile: %s\" % (logfile))\n with open(logfile, 'r') as tlog:\n stalker = tailer.follow(tlog)\n logline = stalker.next()\n\n return logline", "def init_logfile(self):\n\t\tif os.path.exists(self.logfile):\n\t\t\t# move the logfile aside and compress it\n\t\t\tbz_file = bz2.BZ2File(\"%s.bz2\" % self.logfile,'w')\n\t\t\tlog = open(self.logfile,'r')\n\t\t\tbz_file.writelines(log.readlines())\n\t\t\tlog.close()\n\t\t\tbz_file.close()\n\t\t#print \"Logging output to %s\" % self.logfile\n\t\tdate = dateutil.get_datetime()\n\t\ttime = dateutil.get_datetime(1)\n\t\tnew_file = open(self.logfile,'w')\n\t\tnew_file.write(\"#------------------------- RSYNC LOG -------------------------\\n#\\n\")\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Date',date))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Time',time))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Source',self.source))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Destination',self.destination))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Command',self.command))\n\t\tnew_file.write(\"#%12s: %s\\n\\n\" % ('Logfile',self.logfile))\n\t\tnew_file.close()\n\t\treturn True", "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def __init__(self, abs_path_logfile):\n\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n handler = logging.handlers.TimedRotatingFileHandler(abs_path_logfile, when='D', interval=1)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)", "def establish(lvl='INFO', logName=None, logPath=None, backups=0):\n try:\n print 'Script Started. Setting up Logging.'\n\n # Set logging level\n if lvl == 'DEBUG':\n logLevel = logging.DEBUG\n elif lvl == 'INFO':\n logLevel = logging.INFO\n elif lvl == 'WARNING':\n logLevel = logging.WARNING\n elif lvl == 'ERROR':\n logLevel = logging.ERROR\n else:\n print 'Invalid logging level. Choose: ERROR, WARNING, INFO, DEBUG'\n return\n\n # Setup basic logging configuration to standard output stream\n logging.basicConfig(level=logLevel, format=\"%(asctime)s\\t%(levelname)s:\\t%(message)s\")\n \n if logName != None and logName.strip() != '':\n # A logName has been provided so create a log file\n if logPath == None or logPath.strip() == '':\n # If no logPath is provided, use relative path\n logPath = r'.\\\\'\n logPathName = os.path.join(logPath, str(logName).strip())\n # If backups are needed, set the write style (write/append)\n if backups == 0:\n logMode = 'w'\n else:\n logMode = 'a'\n # Setup logging to a file\n fh = logging.handlers.RotatingFileHandler(filename=logPathName, mode=logMode, backupCount=int(backups))\n fh.setLevel(logLevel)\n formatter = logging.Formatter('%(asctime)s\\t%(levelname)s:\\t%(message)s')\n fh.setFormatter(formatter)\n logging.getLogger('').addHandler(fh)\n if os.path.isfile(logPathName):\n fh.doRollover()\n info('STARTING THE SCRIPT: {0}'.format(sys.argv[0]))\n info('Script running on host: {0}'.format(socket.gethostname()))\n info('Script running under the account of: {0}'.format(os.environ.get('USERNAME')))\n info('Log file created at: {0}'.format(logPathName))\n else:\n info('STARTING THE SCRIPT: {0}'.format(sys.argv[0]))\n info('Script running on host: {0}'.format(socket.gethostname()))\n info('Script running under the account of: {0}'.format(os.environ.get('USERNAME')))\n fh = None\n return fh\n except:\n print 'Error Establishing Log: {0}'.format(traceback.format_exc())", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def on_start(self):\r\n self.log()", "def __enter__(self):\n try:\n run(['logger', 'BVT', 'starting', self.full_description()], \n host=self.dut, timeout=10)\n except SubprocessError:\n print 'INFO: unable to mark test log'\n if not self.record:\n return self\n if self.result_id is None:\n self.mdb = get_autotest()\n terms = {'test_case':self.description or 'to be determined',\n 'automation_user': getpwuid(getuid()).pw_gecos.split(',')[0],\n 'control_pid' : getpid(), 'start_time' : time(),\n 'development_mode' : 0,\n 'command_line':abbreviate(' '.join(sys.argv))}\n if self.dut:\n dutdoc = self.mdb.duts.find_one({'name':self.dut})\n self.dut_id = terms['dut'] = dutdoc['_id']\n terms['dut_name'] = dutdoc['name']\n if 'development_mode' in dutdoc:\n terms['development_mode'] = dutdoc['development_mode']\n self.result_id = self.mdb.results.save(terms)\n if self.job_id is not None:\n self.mdb.jobs.update({'_id':objectid.ObjectId(self.job_id)}, {'$set':{'results_id':self.result_id}})\n if self.build is None and self.dut:\n self.build = get_build(self.dut, timeout=10)\n self.mdb.results.update({'_id':self.result_id}, \n {'$set':{'build':self.build}})\n if self.dut:\n self.mdb.duts.update({'_id':terms['dut']}, {'$set': {\n 'build':self.build,\n 'control_command_line': abbreviate(' '.join(sys.argv)),\n 'result_id' : self.result_id}})\n if self.stdout_filter:\n self.record_queue = Queue()\n self.stream_process = Process(\n target=service_queue, \n args=[self.record_queue, self.result_id, \n self.dut, self.dut_id])\n self.stream_process.start()\n self.stdout_filter.add_callback(self, \n lambda *x: self.record_queue.put(x))\n\n if self.description:\n print 'HEADLINE: starting', self.full_description()\n get_track().updates.save({'result_id':self.result_id,\n 'action':'new result record'})\n return self", "def log_start():\n\n scriptDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n scriptName = os.path.splitext(os.path.basename(__file__))[0]\n log = logging.getLogger('cam_server')\n hdlr = logging.FileHandler(scriptDir+'/logs/'+scriptName+'.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n log.addHandler(hdlr)\n log.setLevel(logging.INFO)\n return log", "def __init__(self, logfile):\n\n self.logfile = logfile\n if self. logfile:\n self.file = open(logfile, \"w\")\n self.starttime = time.time()\n self.file.write(\"%.2f %s Starting log\\n\" % (time.time() - self.starttime, time.asctime()))", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def _init_log(self):\n if not os_path_exists(self.log_file):\n self._write('', 'w')", "def init_log():\n os.system('rm -rf /target/testdriver.log || true')\n os.system('touch /target/testdriver.log')\n os.system(f\"chown {uid_gid_output} /target/testdriver.log\")\n os.system('chmod 664 /target/testdriver.log')", "def __init__(self, logfile):\r\n super(PopenWrapper, self).__init__()\r\n self.logfile = logfile", "def _createlog(self):\n\t\tif self.toemail and self.fromemail and self.smtphost:\n\t\t\t# Use the email logger as the first logger, so that when sending the email (in :meth:`EmailLogger.close`) fails, it will still be logged to the log file/stdout/stderr\n\t\t\tself._loggers.append(EmailLogger(self))\n\t\tif self.log2stderr:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stderr, self._formatlogline))\n\t\tif self.log2stdout:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stdout, self._formatlogline))\n\t\tif self.log2file:\n\t\t\t# Create the log file\n\t\t\tlogfilename = ul4c.Template(self.logfilename, \"logfilename\").renders(job=self)\n\t\t\tlogfilename = url.File(logfilename).abs()\n\t\t\tself.logfileurl = str(url.Ssh(misc.sysinfo.user_name, misc.sysinfo.host_fqdn or misc.sysinfo.host_name, logfilename.local()))\n\t\t\tskipurls = [logfilename]\n\t\t\tlogfile = logfilename.open(mode=\"w\", encoding=self.encoding, errors=self.errors)\n\t\t\tif self.loglinkname is not None:\n\t\t\t\t# Create the log link\n\t\t\t\tloglinkname = ul4c.Template(self.loglinkname, \"loglinkname\").renders(job=self)\n\t\t\t\tloglinkname = url.File(loglinkname).abs()\n\t\t\t\tskipurls.append(loglinkname)\n\t\t\t\tlogfilename = logfilename.relative(loglinkname)\n\t\t\t\ttry:\n\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\texcept OSError as exc:\n\t\t\t\t\tif exc.errno == errno.EEXIST:\n\t\t\t\t\t\tloglinkname.remove()\n\t\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\t\t\tself._loggers.append(URLResourceLogger(self, logfile, skipurls, self._formatlogline))", "def openLogfileConnection(self,):\n \n #\n # Imports\n #\n import sys\n import time\n import os\n \n #\n # for logmessages\n # \n tmpLogMessages = []\n \n #\n # check if logfile present open connection or create\n #\n SEAseqPipeLine.logfile = self.analysisPath + '/logfile.txt'\n if os.path.isfile(SEAseqPipeLine.logfile):\n if self.command == 'initiateAnalysis':\n print 'ERROR: the logfile already exists please use another path to initiate the analysis.\\n'\n sys.exit(1)\n else:\n SEAseqPipeLine.logfile = open(SEAseqPipeLine.logfile,'a',1)\n SEAseqPipeLine.logfile.write('----------------\\nConnection to logfile '+SEAseqPipeLine.logfile.name+' opened.\\n')\n return 0\n else:\n tmpLogMessage = 'Creating the logfile \"'+SEAseqPipeLine.logfile+'\".\\n'\n tmpLogMessages.append(tmpLogMessage)\n print tmpLogMessage\n SEAseqPipeLine.logfile = open(SEAseqPipeLine.logfile,'w',1)\n \n return tmpLogMessages", "def on_server_start(self):\n self._container = self._docker_client.containers.run(self.docker_image_name, detach=True, **self.docker_params)\n self.signal_ready()\n\n for log_line in self.get_lines():\n try:\n alert_dict = self.parse_line(log_line)\n if alert_dict:\n self.add_alert_to_queue(alert_dict)\n except Exception:\n self.logger.exception(None)", "def wait_for_log(self, regex, timeout=TIMEOUT):\n return self.wait_for_logs([regex], timeout)", "def verify_new_log_created(self, pod_type):\n pod_obj = self.get_pod_obj_based_on_id(pod_type)\n output_cmd = pod_obj.exec_cmd_on_pod(command=\"ls -lh /var/log/ceph\")\n expected_string = (\n self.podtype_id[pod_type][2]\n if pod_type == \"rgw\"\n else f\"{self.podtype_id[pod_type][2]}{self.podtype_id[pod_type][1]}\"\n )\n cnt_logs = len(re.findall(expected_string, output_cmd))\n if cnt_logs != int(self.podtype_id[pod_type][3]) + 1:\n log.info(output_cmd)\n log.error(\n f\"pod_type:{pod_type} cnt_logs_before_fill_log:\"\n f\"{self.podtype_id[pod_type][3]} cnt_logs_after_fill_log:{cnt_logs}\"\n )\n pod_obj.exec_cmd_on_pod(\n command=f\"dd if=/dev/urandom of=/var/log/ceph/{expected_string}.log bs=1M count=560\",\n out_yaml_format=False,\n container_name=\"log-collector\",\n )\n return False\n return True", "def checkLogs():\n run(\"cat /etc/chariot/logs\")#not sure if this is cat-able", "def _logging_subprocess(self):\n\n # Setup logging for logging subprocess\n setproctitle('flowbber - logging manager')\n\n # # Level\n level = self.LEVELS.get(self._verbosity, logging.DEBUG)\n\n # # Format\n if level != logging.DEBUG:\n format_tpl = self.FORMAT\n else:\n format_tpl = self.FORMAT_DEBUG\n formatter = ColoredFormatter(fmt=format_tpl, style='{')\n\n # # Handler\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n # # Configure baisc logging\n logging.basicConfig(handlers=[handler], level=level)\n\n # Start listening for logs and prints\n listener = QueueListener(self._log_queue, handler)\n listener.start()", "def _test():\n try:\n print 'Test for Loging'\n # Establish Logging at the beginning of the script\n fh = establish(lvl='DEBUG', logName='TestLog.txt', logPath='', backups=0)\n\n # Supply log functions with message as a STRING\n info('TEST - Info lvl')\n debug('TEST - Debug lvl')\n warning('TEST - Warning lvl')\n error('TEST - Error lvl')\n exception('TEST - Exception. See the exception below this line.')\n info('Would any of this be logged to ArcPy: {0}'.format(_logToArcpyMessagingWindow))\n\n except:\n exception('Error in main function of script')\n print 'ERROR WITH SCRIPT: {0}'.format(traceback.format_exc())\n finally:\n # Ensure to Shut-down the Logging\n info('Script Completed')\n shutdown(fh)\n print 'Test Complete'", "def init_logs():\n\n #Ensure that the directories are made\n make_dirs()\n\n #Create FileHandler logging handler, set it's log level, configure the log storage format,\n # and add the formatter to the root logger\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logging.root.addHandler(fh)\n logging.root.setLevel(logging.INFO)\n\n #Report it to the world!\n logging.info(\"Saving logs to \" + log_file)", "def execute_to_log(cmd, logfile, timeout=-1,\n watch_logs=[\n ('[syslog]', '/var/log/syslog'),\n ('[sqlslo]', '/var/log/mysql/slow-queries.log'),\n ('[sqlerr]', '/var/log/mysql/error.log')\n ],\n heartbeat=True, env=None, cwd=None\n ):\n\n if not os.path.isdir(os.path.dirname(logfile)):\n os.makedirs(os.path.dirname(logfile))\n\n logger = logging.getLogger(logfile)\n log_handler = logging.FileHandler(logfile)\n log_formatter = logging.Formatter('%(asctime)s %(message)s')\n log_handler.setFormatter(log_formatter)\n logger.addHandler(log_handler)\n\n descriptors = {}\n\n for watch_file in watch_logs:\n if not os.path.exists(watch_file[1]):\n logger.warning('Failed to monitor log file %s: file not found'\n % watch_file[1])\n continue\n\n try:\n fd = os.open(watch_file[1], os.O_RDONLY)\n os.lseek(fd, 0, os.SEEK_END)\n descriptors[fd] = {'name': watch_file[0],\n 'poll': select.POLLIN,\n 'lines': ''}\n except Exception as e:\n logger.warning('Failed to monitor log file %s: %s'\n % (watch_file[1], e))\n\n cmd += ' 2>&1'\n start_time = time.time()\n p = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env, cwd=cwd)\n\n descriptors[p.stdout.fileno()] = dict(\n name='[output]',\n poll=(select.POLLIN | select.POLLHUP),\n lines=''\n )\n\n poll_obj = select.poll()\n for fd, descriptor in descriptors.items():\n poll_obj.register(fd, descriptor['poll'])\n\n last_heartbeat = time.time()\n\n def process(fd):\n \"\"\" Write the fd to log \"\"\"\n global last_heartbeat\n descriptors[fd]['lines'] += os.read(fd, 1024 * 1024)\n # Avoid partial lines by only processing input with breaks\n if descriptors[fd]['lines'].find('\\n') != -1:\n elems = descriptors[fd]['lines'].split('\\n')\n # Take all but the partial line\n for l in elems[:-1]:\n if len(l) > 0:\n l = '%s %s' % (descriptors[fd]['name'], l)\n logger.info(l)\n last_heartbeat = time.time()\n # Place the partial line back into lines to be processed\n descriptors[fd]['lines'] = elems[-1]\n\n while p.poll() is None:\n if timeout > 0 and time.time() - start_time > timeout:\n # Append to logfile\n logger.info(\"[timeout]\")\n os.kill(p.pid, 9)\n\n for fd, flag in poll_obj.poll(0):\n process(fd)\n\n if time.time() - last_heartbeat > 30:\n # Append to logfile\n logger.info(\"[heartbeat]\")\n last_heartbeat = time.time()\n\n # Do one last write to get the remaining lines\n for fd, flag in poll_obj.poll(0):\n process(fd)\n\n # Clean up\n for fd, descriptor in descriptors.items():\n poll_obj.unregister(fd)\n os.close(fd)\n try:\n p.kill()\n except OSError:\n pass\n\n logger.info('[script exit code = %d]' % p.returncode)\n logger.removeHandler(log_handler)\n log_handler.flush()\n log_handler.close()\n return p.returncode", "def test_creation_no_logfile(self):\n # When we don't give the handler a URI, it creates a NullHandler\n # instance, so we don't save any of the logging messages to the log\n # file.\n manager = execution.LogManager('sample_thread_name')\n manager.close()\n self.assertEqual(manager.logfile_handler.__class__,\n logging.NullHandler)" ]
[ "0.6342506", "0.6339533", "0.632533", "0.62805986", "0.61698097", "0.6142417", "0.60563993", "0.5989396", "0.58973587", "0.58555603", "0.5847372", "0.5842015", "0.5833766", "0.58280855", "0.58130676", "0.5812685", "0.5794679", "0.57772213", "0.5774569", "0.57051474", "0.56672895", "0.56639576", "0.5659014", "0.56439865", "0.56435555", "0.5639434", "0.56275976", "0.5625284", "0.5624935", "0.56205183" ]
0.7679139
0
wait for our instance to bind its TCPports
def wait_for_port_bind(self): if self.starter_port is not None: count = 0 while count < 10: for socket in self.instance.connections(): if socket.status == "LISTEN" and socket.laddr.port == self.starter_port: print("socket found!") return count += 1 time.sleep(1) raise Exception(f"starter didn't bind {self.starter_port} on time!") print("dont know port")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_for_open_ports(self, instance_name=\"\"):\n ports = None\n if instance_name in wellknownports:\n ports = wellknownports[instance_name]\n else:\n elements = self.systemd_name.split(\"@\")\n if elements[0] in wellknownports:\n ports = wellknownports[elements[0]]\n if ports:\n ipautil.wait_for_open_ports('localhost', ports,\n self.api.env.startup_timeout)", "def socket_bind(self):\n try:\n self.socket.bind((self.host, self.port))\n self.socket.listen(5)\n except socket.error as e:\n print(\"Socket binding error: \" + str(e))\n time.sleep(5)\n self.socket_bind()\n return", "def _wait_for_port(self, delay=0.1, attempts=20):\n while attempts > 0:\n s = socket.socket()\n try:\n s.connect((self.host, self.port))\n except Exception:\n time.sleep(delay)\n attempts -= 1\n else:\n return\n finally:\n s.close()\n raise RuntimeError(\"Port %d is not open\" % self.port)", "def bind(self):\n self._conn = socket.socket(socket.AF_INET, self.protocol.value)\n try:\n self._conn.bind((self.host, self.port))\n except OSError as e:\n self.close()\n raise BindError(str(e))\n self._conn.setblocking(False)\n self._conn.listen(100)\n self._selector.register(self._conn, selectors.EVENT_READ, self.accept)\n\n # Event callback.\n self.event_callback[ConnectionEvent.ON_BIND](self._conn)\n\n self._mainloop()", "def __bind(self, args = []):\n \n try: \n\n # Start the local chat server and be ready to receive incoming requests\n localServerPort = self.__agent.startLocalServer()\n\n # Sleep a little bit to allow the new thread to open the listening port\n sleep(0.3)\n \n serverIp, serverPort = self.__cm.getConnectionInfo()\n\n self.__cm.send(p.T_BIND, [serverIp, localServerPort])\n reply = self.__cm.receive()\n \n if (reply.type == p.T_ERR):\n raise Exception, \"Port binding was not succussful!\"\n\n except Exception,e:\n self.__handleError('Bind', e)", "def _wait_for_connection(self, port, *args):\n getLogger(__name__).info(\"Waiting for connection on port {}...\"\n .format(port))\n listener = self._create_new_socket()\n listener.bind((\"\", port))\n listener.listen(1)\n conn, addr = listener.accept()\n\n self._set_socket(conn)\n getLogger(__name__).info(\"Connected to peer at {}:{}\"\n .format(addr[0], addr[1]))", "def test_get_unused_port() -> None:\n available_port = get_unused_port()\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.bind((\"\", available_port))\n assert int(sock.getsockname()[1]) == available_port", "def checkPort(self, port, servicename, hint):\n print (\"Checking remote port %s/tcp (%s)\" % (port, servicename)).ljust(65, '.'),\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect( (self._target,int(port)) )\n s.close()\n print \"[ OK ]\"\n except Exception, err:\n print \"[ Failed ]\"\n print \"\\n***ERROR: %s\" % err\n print \"Port %s/tcp seems to be closed\" % port\n print hint\n sys.exit(0)", "def bind_server(self):\n self.MAIN_CONNECTION.bind((self.HOST, self.PORT))", "def tcp_listening(port):\n return (\n subprocess.call(\n tcp_listening_cmd(port).split(),\n stdin=DEVNULL,\n stdout=DEVNULL,\n stderr=DEVNULL,\n close_fds=True,\n )\n == 0\n )", "def go_online(self, start=50000, tries=10):\n flag = False\n for listen_port in xrange(start, start+tries-1):\n if bind_to_port(self.s, listen_port):\n flag = True\n break\n\n if not flag:\n print \"Couldn't bind to connection port. Aborting...\"\n sys.exit()\n\n self.s.listen(25)\n print 'Server is listening at', listen_port\n self.port = listen_port", "def wait_for_port(port: int, host: Text = \"127.0.0.1\", timeout: float = 5.0):\n\n start_time = time.perf_counter()\n\n while True:\n try:\n with socket.create_connection((host, port), timeout=timeout):\n break\n except OSError as ex:\n time.sleep(0.01)\n if time.perf_counter() - start_time >= timeout:\n raise TimeoutError(\n \"Waited too long for the port {} on host {} to start accepting \"\n \"connections.\".format(port, host)\n ) from ex", "def port_in_use(port_num):\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('0.0.0.0', port_num))\n except OSError:\n return True\n else:\n return False", "def setup_for_run(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.ip_address, self.port))\n self.server.listen(100)", "def wait_for_container(self):\n i = 0\n while True:\n ip_address = self.btcd_container.attrs[\"NetworkSettings\"][\"IPAddress\"]\n if ip_address.startswith(\"172\"):\n self.rpcconn.ipaddress = ip_address\n break\n self.btcd_container.reload()\n time.sleep(0.5)\n i = i + 1\n if i > 20:\n raise Exception(\"Timeout while starting bitcoind-docker-container!\")", "def start(self):\n\n self.socket.bind((self.ip, self.port))\n self.socket.listen(self.listenNumber)\n self.printLine()\n print(\"start for listening \")", "def startListening(self, port=-1, findFreePort=False):\n res = self.listen(self.__address, port)\n if findFreePort and Preferences.getCooperation(\"TryOtherPorts\"):\n endPort = port + Preferences.getCooperation(\"MaxPortsToTry\")\n while not res and port < endPort:\n port += 1\n res = self.listen(self.__address, port)\n return res, port", "def port_connection(self, sock):\n sock.bind(('', 0)) # Bind to OS-assigned available & random port.\n sock.listen(1)", "def openRtpPort(self):\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tself.rtpSocket_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\t\t\t\tself.rtpSocket_client.bind(('', self.rtpPort))\r\n\t\t\t\tself.rtpSocket_client.settimeout(0.5)\r\n\t\t\t\tself.listenRtp()\r\n\t\t\texcept Exception as err:\r\n\t\t\t\tif (str(err) == \"[Errno 9] Bad file descriptor\"):\r\n\t\t\t\t\tbreak", "def test_connection(self):\n self._bind_to_service()", "def wait_for_port(port, host=\"localhost\", interval=30):\n print('Waiting for database connections to be available...')\n good = False\n while not good:\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\n good = True\n except socket.error:\n pass\n finally:\n sock.close()\n time.sleep(interval)", "def listen(self):\r\n self.theSocket.listen(0)\r\n print('Socket now listening')\r\n \r\n #wait to accept a connection - blocking call\r\n self.conn, self.addr = self.theSocket.accept()\r\n\r\n print('Connected with ' + self.addr[0] + ':' + str(self.addr[1]))\r\n return self.conn", "def run(self):\n HOST = 'localhost' # Symbolic name meaning all available interfaces\n PORT = 54123 # Arbitrary non-privileged port\n \n \n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n while(self.running):\n s.listen(1)\n conn, addr = s.accept()\n self.listen_to_connection(conn)\n conn.close()\n s.close()", "def setup(self):\n # Bind socket to local host and port\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(2)\n \n try:\n self.socket.bind((HOST, PORT))\n except socket.error:\n return False\n\n if self.running:\n # Start listening on socket\n self.socket.listen(2)\n print \"MapServer: Socket now listening.\"\n\n # Wait to accept a connection - blocking call\n try:\n self.connection, address = self.socket.accept()\n print \"MapServer: Socket connected with \" + address[0] + \":\" + str(address[1])\n self.connection.sendall(str(self.MAP_SIZE_PIXELS)+\"\\n\")\n return True\n except socket.error:\n return False", "def open_listener(self):\n\n try:\n self.listener = Listener((self.host, self.port))\n self.startup_success = True\n log.info(\"listening on '%s', %s\", self.host, self.port)\n except:\n self.startup_success = False\n log.exception(\"Could not bind socket '%s', %s\", self.host, self.port)\n\n self.startup.set()\n return self.startup_success", "def bind_ports(self, ip, ports): #{\n if isinstance(ports, int):\n ports = [ports]\n for p in ports:\n try:\n if p==0:\n port = self.socket.bind_to_random_port(\"tcp://%s\" % ip)\n else:\n self.socket.bind(\"tcp://%s:%i\" % (ip, p))\n port = p\n except zmq.ZMQError:\n # bind raises this if the port is not free\n continue\n except zmq.ZMQBindError:\n # bind_to_random_port raises this if no port could be found\n continue\n else:\n break\n else:\n raise zmq.ZMQBindError('Could not find an available port')\n\n url = 'tcp://%s:%i' % (ip, port)\n self.bound.add(url)\n self._ready = True\n\n return port", "def check_port(self):\r\n\t\treturn(self.connect.is_open)", "def WaitUntilServing(self, timeout=30.0):\n assert self._process, 'server was not started'\n finish_time = time.time() + timeout\n while time.time() < finish_time:\n if self._process.poll() is not None:\n raise Error('server has already exited with return: %r',\n self._process.returncode)\n if self._CanConnect():\n return\n time.sleep(0.2)\n raise Error('server did not start after %f seconds', timeout)", "def check_port_availability(self, hostname, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.logger.debug(\"Attempting to connect to {}:{}\\\n \".format(hostname, port))\n num_retries = 1000\n retry_count = 0\n port_up = False\n while(retry_count < num_retries):\n if(sock.connect_ex((hostname, int(port)))):\n self.logger.debug(\"{} port is up on {}\".format(port, hostname))\n port_up = True\n break\n retry_count += 1\n time.sleep(0.1)\n return port_up", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")" ]
[ "0.7288536", "0.70136964", "0.699281", "0.669659", "0.663772", "0.66184366", "0.6514401", "0.6453551", "0.6413905", "0.63941497", "0.63894254", "0.6319579", "0.6308585", "0.6289045", "0.6239562", "0.6228363", "0.6223435", "0.62091696", "0.62067133", "0.6188757", "0.6175939", "0.6159813", "0.61548287", "0.6138284", "0.6120944", "0.6082442", "0.6073268", "0.6066105", "0.60513693", "0.60339737" ]
0.8042169
0
kill the instance of this starter (it won't kill its managed services)
def kill_instance(self): logging.info("StarterManager: Killing: %s", str(self.default_starter_args + self.arguments)) self.instance.kill() try: logging.info(str(self.instance.wait(timeout=45))) self.add_logfile_to_report() except Exception as ex: raise Exception("Failed to KILL the starter instance? " + repr(self)) from ex logging.info("StarterManager: Instance now dead.") self.instance = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill(self):\n # Prevent a weird behavior: when STOPPED and kill() is called, app crashes (FIXME)\n if self.__state is not ServiceState.STOPPED:\n os.kill(int(self.__properties['MainPID']), signal.SIGKILL)\n # Not nice but simple and currently working (FIXME)\n # TODO: Change time.sleep to wait until process of same service but different PID is up and running\n time.sleep(0.5)", "def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def kill_vrouter_instance(self):\n # Stop vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"Stopping vrouter pid=\" + str(self.pid))\n if (self.pid > 0):\n try:\n os.kill(self.pid, signal.SIGTERM)\n time.sleep(1)\n except OSError as e:\n self.logger.error(e)", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def kill(self):\n\n self.proc.kill()", "def close(self):\n subprocess.call([\"pkill\", \"controller\"])", "def stop_instance(InstanceId=None, Force=None):\n pass", "def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()", "def stop(self):\n self.scion_sh('stop')", "def kill(self):\n self._stop_proc(signal.SIGKILL)", "def stop(self):\n os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)", "def kill_instance(py, accelerator, sig_name):\n acc_client = get_accelerator_client(py, accelerator)\n acc_client.kill_instance(sig_name)", "def kill():\n Log.info(\"Kill tns processes.\")\n if Settings.HOST_OS == OSType.WINDOWS:\n Process.kill(proc_name='node')\n else:\n Process.kill(proc_name='node', proc_cmdline=Settings.Executables.TNS)\n Process.kill_by_commandline(cmdline='webpack.js')", "def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False", "def stop_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"stop %s\" % item.strip())", "def stop():\n if env.latest and not env.python3:\n sudo('/bin/systemctl stop demo-latest.service', shell=False)\n elif env.latest and env.python3:\n sudo('/bin/systemctl stop demo-latest-py3.service', shell=False)\n else:\n # demo site is multi instance, cant do supervisor for now\n with cd(env.directory):\n sudo('./bin/supervisorctl stop all', user=env.deploy_user)", "def stop():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --stop-daemon'])", "def stop(self):\n self.killed = True", "def stop_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"stop\", service_name])", "def kill(self):\n self.send_signal(signal.SIGKILL)", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def stop():\n with cd(env.directory):\n sudo('./bin/supervisorctl stop all', user=env.deploy_user)", "def kill(self):\r\n\r\n endpoint = self._get_nailgun_endpoint()\r\n if endpoint:\r\n self._log_kill(endpoint.pid, endpoint.port)\r\n try:\r\n os.kill(endpoint.pid, 9)\r\n except OSError:\r\n pass", "def kill(self):\n self._process.kill()", "def terminate(self):\n self._running = False", "def __del__(self):\n if self.child_pid:\n self.host.Kill(self.child_pid, IperfServer.KILL_STRING)", "def stop(params) -> None:\n check_root()\n stop_streamer(params)\n unload_kernel_module(params)\n stop_microservice(params)", "async def stop(self, now=False):\n alive = await self.remote_signal(15)\n\n try:\n self.stop_ec2_instance(self.ec2_instance_id) # function that uses boto3 to stop an instance based on instance_id\n except Exception as e:\n self.log.error(\"Error in terminating instance\") # easy to save the instance id when you start the instance\n self.log.error(str(e)) # this will print the error on our JupyterHub process' output\n\n self.clear_state()", "async def kill(self, restart: bool = False) -> None:\n pass" ]
[ "0.7303505", "0.7108762", "0.70873225", "0.70490146", "0.69961184", "0.6964912", "0.6952806", "0.69508827", "0.6916327", "0.688939", "0.686323", "0.681936", "0.6806005", "0.6758173", "0.6745181", "0.6741397", "0.6677145", "0.6674688", "0.6668985", "0.6658009", "0.6651729", "0.66455394", "0.6630754", "0.66288483", "0.6611984", "0.66058433", "0.6586204", "0.6570232", "0.65641314", "0.6548713" ]
0.7906417
0
replace the parts of the installation with information after an upgrade kill the starter processes of the old version revalidate that the old arangods are still running and alive replace the starter binary with a new one. this has not yet spawned any children
def replace_binary_for_upgrade(self, new_install_cfg, relaunch=True): # On windows the install prefix may change, # since we can't overwrite open files: old_version = self.cfg.version self.default_starter_args = new_install_cfg.default_starter_args.copy() self.enterprise = new_install_cfg.enterprise self.replace_binary_setup_for_upgrade(new_install_cfg) with step("kill the starter processes of the old version"): logging.info("StarterManager: Killing my instance [%s]", str(self.instance.pid)) self.kill_instance() with step("revalidate that the old arangods are still running and alive"): self.detect_instance_pids_still_alive() if relaunch: with step("replace the starter binary with a new one," + " this has not yet spawned any children"): self.respawn_instance(new_install_cfg.version) logging.info("StarterManager: respawned instance as [%s]", str(self.instance.pid)) self.arangosh = None self.detect_arangosh_instances(new_install_cfg, old_version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()", "def upgrade(self):", "def upgrade(self):", "def replace_binary_setup_for_upgrade(self, new_install_cfg):\n # On windows the install prefix may change,\n # since we can't overwrite open files:\n self.cfg.set_directories(new_install_cfg)\n if self.cfg.hot_backup_supported:\n self.hotbackup_args = [\n \"--all.rclone.executable\",\n self.cfg.real_sbin_dir / \"rclone-arangodb\",\n ]", "def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()", "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def upgrade(self,summary_handle,role,rpm_keyword,image_url,dir_installer,exit_flag,mode,summary_var_dict={}):\n if image_url.endswith(\"/\"):\n imageurl_final = image_url\n else:\n imageurl_final = image_url + \"/\"\n\n length = len(imageurl_final.split('/')) -4\n cmd = \"yum clean all\"\n self.sendCmd(cmd,300)\n dir_installer_role = dir_installer + \"/\" + role\n self.changeDirectory(dir_installer_role)\n tmp_var = \"wget%s%s\" %(self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"false\":\n self.download_rpm(summary_handle,length,imageurl_final,role)\n else:\n self.download_rpm(summary_handle,length,imageurl_final,role)\n\n\n num_files = \"ls -lrt *\\.rpm | grep %s-[0-9] | awk \\'{print $NF}\\' | xargs ls -t | tail -n1\" %rpm_keyword\n output = self.sendCmd(num_files).split(\"\\n\")\n for each in output:\n if each.rstrip().endswith(\"rpm\"):\n\n ##### Step added for uninstalling the rpm before installing \n tmpcmd = \"yum -y remove \" + each.rstrip().rstrip(\".rpm\")\n\n\n tmpcmd1 = \"yum -y install \" + each.rstrip()\n tmp_var = \"%s%s%s\" %(tmpcmd1,self,role)\n\n ##### IF loop added for recovery option\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n continue\n\n\n output = self.sendCmd(tmpcmd,600)\n output = self.sendCmd(tmpcmd1,600)\n time.sleep(30)\n output1 = self.sendCmd(\"echo $?\").split(\"\\n\")\n output2 = [item.replace(\"\\r\", \"\") for item in output1]\n if \"0\" not in output2 :\n summary_handle.write(\"%s,%s,%s,fail \\n\" %(tmpcmd1,self,role))\n if exit_flag == \"yes\":\n report.fail(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n logger.info(\"Installation failed for %s on node %s having role %s with following error message : \\n %s\" %(each.strip(),self,role,output))\n else:\n summary_handle.write(\"%s,%s,%s,pass \\n\" %(tmpcmd1,self,role))\n logger.info(\"Successful installation of %s on node %s having role %s\" %(each.strip(),self,role))", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def manually_launch_instances_for_upgrade(self, which_instances, moreargs, waitpid=True, kill_instance=False):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n if kill_instance:\n i.kill_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n waitpid,\n )", "def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")", "def _prepare_self_update(settings):\n\n\t# sanity check: ensure that that this routine only runs once\n\tif portage._bin_path != portage.const.PORTAGE_BIN_PATH:\n\t\treturn\n\n\t# Load lazily referenced portage submodules into memory,\n\t# so imports won't fail during portage upgrade/downgrade.\n\t_preload_elog_modules(settings)\n\tportage.proxy.lazyimport._preload_portage_submodules()\n\n\t# Make the temp directory inside $PORTAGE_TMPDIR/portage, since\n\t# it's common for /tmp and /var/tmp to be mounted with the\n\t# \"noexec\" option (see bug #346899).\n\tbuild_prefix = os.path.join(settings[\"PORTAGE_TMPDIR\"], \"portage\")\n\tportage.util.ensure_dirs(build_prefix)\n\tbase_path_tmp = tempfile.mkdtemp(\n\t\t\"\", \"._portage_reinstall_.\", build_prefix)\n\tportage.process.atexit_register(shutil.rmtree, base_path_tmp)\n\n\torig_bin_path = portage._bin_path\n\tportage._bin_path = os.path.join(base_path_tmp, \"bin\")\n\tshutil.copytree(orig_bin_path, portage._bin_path, symlinks=True)\n\n\torig_pym_path = portage._pym_path\n\tportage._pym_path = os.path.join(base_path_tmp, \"pym\")\n\tshutil.copytree(orig_pym_path, portage._pym_path, symlinks=True)\n\n\tfor dir_path in (base_path_tmp, portage._bin_path, portage._pym_path):\n\t\tos.chmod(dir_path, 0o755)", "def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))", "def main():\n\n # a quick way to verify the version\n if getscriptversion:\n print('This script is running version: ' + scriptversion)\n exit(0)\n\n # verify that environmental and script requirements are met\n requirements()\n\n # pretty the screen up\n clear()\n # do the MD5 checksum\n checkmd5sum()\n if not user or not password:\n getcreds()\n\n # if device_file is provided parse the lines into a list of devices\n if device_file:\n with open(device_file) as line:\n devices = line.readlines()\n devices = [x.strip() for x in devices]\n else:\n devices = args.devices.split(\",\")\n\n for device in devices:\n\n device = Acos(device)\n print('')\n print('')\n print(dev_addr + ' ' + '{:*^100}'.format('Begin upgrade log for ' + dev_addr))\n print(dev_addr + ' ' + '{:*^100}'.format('Performing pre-upgrade checks'))\n\n # check if the device is online before running\n status = device.checkstatus()\n if status == 'FAIL':\n continue\n\n # authenticate to the device\n response = device.axapi_authenticate(user, password)\n if response == 'FAIL':\n continue\n # get the device hostname\n device.get_hostname()\n\n # get the currently running version\n version = device.get_running_ver()\n\n print(dev_addr + ' ' + '{:*^100}'.format(' Performing upgrade'))\n\n # if we are running 4.1.0 we have to use a different upgrade method\n if '4.1.0' in version:\n response = device.gui_upgrade(user, password)\n if response == 'FAIL':\n continue\n # for other versions just use the normal method\n else:\n response = device.upgrade()\n if response == 'FAIL':\n continue\n bootvar = device.get_bootvar()\n\n # if the user has specified they'd like to update the boot variable\n if updatebootvar:\n # why do work that we don't have to\n if partition in bootvar:\n print(dev_addr + ' Bootvar update requested, but not necessary, device already set to boot from ' + partition)\n # if you're not already set to boot from the partition we installed to, update the bootvar\n else:\n device.update_bootvar()\n # if the user wants to reboot to initialize the new code reboot the box\n if reboot:\n device.reboot()\n # if the user wants to speed up the script, then just skip monitoring them\n if dontwaitforreturn:\n print(dev_addr + ' Skipping post-upgrade verification at user request')\n continue\n # otherwise you probably want to make sure the box comes up first\n else:\n device.reboot_monitor()\n if not reboot:\n print(dev_addr + '{:*^100}'.format('NOTICE NOTICE NOTICE'))\n print(dev_addr + 'You have requested the device not reboot, in order to initialize the new code you will need to reboot the device')\n # if you install to a partition the device won't reboot to, we probably want to stop you from shooting yourself in the foot\n elif not partition in bootvar:\n print(dev_addr + '{:*^100}'.format('NOTICE NOTICE NOTICE'))\n print(dev_addr + ' You have chosen to install to the partition that the device does not currently boot from.')\n print(dev_addr + ' If you wish for the device to run the new code upon reboot you need to update the boot variable manually.')\n if reboot:\n print(dev_addr + ' You have also requested a reboot which will not invoke the new code, SKIPPING REBOOT')\n elif reboot:\n device.reboot()\n # if the user wants to speed up the script, then just skip monitoring them\n if dontwaitforreturn:\n print(dev_addr + ' Skipping post-upgrade verification at user request')\n continue\n # otherwise you probably want to make sure the box comes up first\n else:\n device.reboot_monitor()\n # technically we could still use the old AXAPI token, however for sake of code clarity we're going to do a quick log off then back on\n # the alternative would be having to shove the remaining steps below into each of the appropriate loops making this a bit more\n # spaghettish than it already is\n else:\n device.axapi_logoff()\n\n print(dev_addr + ' ' + '{:*^100}'.format(' Performing post-upgrade checks'))\n\n # since it is very likely the box has rebooted, and our old token is gone, lets get a new one\n response = device.axapi_authenticate(user, password)\n if response == 'FAIL':\n continue\n\n # find out where the device was booted from\n bootdefault = device.get_bootvar()\n\n # get the version of the currently booted partition\n device.get_ver(bootdefault)\n\n # get the current boot variable\n device.get_bootvar()\n\n # get the current running version\n device.get_running_ver()\n\n # log off\n device.axapi_logoff()\n print(dev_addr + ' ' + '{:*^100}'.format(' End upgrade log for ' + dev_addr))", "def tweak_new_filesystem(root_dir):\n\n # create a symlink for insserv\n force_symlink('../usr/lib/insserv/insserv',\n os.path.join(root_dir, 'sbin/insserv'))\n\n # create a symlink for awk\n force_symlink('mawk', os.path.join(root_dir, 'usr/bin/awk'))\n\n # Nvidia keeps packaging up a broken post-install script for their cudnn\n # deb. Freaking nvidia\n cudnn_postinst_path = 'var/lib/dpkg/info/libcudnn6-dev.postinst'\n cudnn_postinst_path = os.path.join(root_dir, cudnn_postinst_path)\n\n if os.path.exists(cudnn_postinst_path):\n with open(cudnn_postinst_path, 'r') as infile:\n content = infile.read()\n if not content.startswith(\"#!\"):\n with open(cudnn_postinst_path, 'w') as outfile:\n outfile.write('#! /bin/sh\\n')\n outfile.write(content)\n\n # NOTE(josh): patch the base-packages post-install hook so it doesn't\n # complain about files in /var/run\n basefiles_path = os.path.join(root_dir,\n 'var/lib/dpkg/info/base-files.postinst')\n if os.path.exists(basefiles_path):\n apply_patch_text(BASE_FILES_PATCH, root_dir)\n\n # NOTE(josh): ifupdown should depend on initscripts, but it doesn't\n status_path = os.path.join(root_dir, 'var/lib/dpkg/status')\n tempfile_path = status_path + '.tmp'\n with open(tempfile_path, 'wb') as outfile:\n with open(status_path, 'rb') as infile:\n for line in infile:\n outfile.write(line)\n if line.strip() == 'Package: ifupdown':\n break\n\n for line in infile:\n if line.startswith('Depends: '):\n line = ', '.join(line.strip().split(', ') + ['initscripts']) + '\\n'\n outfile.write(line)\n break\n else:\n outfile.write(line)\n\n for line in infile:\n outfile.write(line)\n os.rename(tempfile_path, status_path)\n\n # NOTE(josh): resolvconf tries to a write a file in this directory\n try:\n target_path = os.path.join(root_dir, 'run/resolvconf/interface')\n os.makedirs(target_path)\n except OSError:\n if not os.path.isdir(target_path):\n raise\n\n # NOTE(josh): Can't postinst makedev without CAP_MKNOD\n if os.getuid() != 0:\n makedev_postinst = os.path.join(root_dir,\n 'var/lib/dpkg/info/makedev.postinst')\n if os.path.exists(makedev_postinst):\n os.rename(makedev_postinst, makedev_postinst + '.bak')\n\n # remove temporary/boostrap files\n files_to_remove = ['etc/apt/sources.list.d/bootstrap.list']\n\n for filename in files_to_remove:\n file_path = os.path.join(root_dir, filename)\n if os.path.exists(file_path):\n os.remove(file_path)", "def upgrade_instances(self, which_instances, moreargs, waitpid=True, force_kill_fatal=True):\n for instance_type in which_instances:\n for i in self.all_instances:\n if i.instance_type == instance_type:\n i.terminate_instance()\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n moreargs,\n True,\n )\n i.launch_manual_from_instance_control_file(\n self.cfg.sbin_dir,\n self.old_install_prefix,\n self.cfg.install_prefix,\n self.cfg.version,\n self.enterprise,\n [],\n False,\n )", "def upgrade():\n config = ConfigManager()\n apps = config['apps']\n for i, app in progressbar(enumerate(apps), redirect_stdout=True):\n z = Zap(app)\n if i == 0:\n z.update(show_spinner=False)\n else:\n z.update(check_appimage_update=False, show_spinner=False)", "def restart(self):\n print \"Restarting \" + executable + \" \" + str(argv) \n execl(executable, *([executable]+argv))", "def deploy():\n update_treesheets()\n restart_treesheets()", "def re_process(self):\n rmtree(self.processed_dir)\n os.makedirs(self.processed_dir)\n self.process()\n\n print('Done!')", "def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()", "def pre_stop_backup_cores(self, env):\n import params\n env.set_params(params)\n\n if compare_versions(format_stack_version(params.version), '4.2.0.0') >= 0:\n solr_home_dir=params.solr_data_dir\n else: #4.1.0.0\n solr_home_dir=params.old_lib_dir + \"/data\"\n\n unique = get_unique_id_and_date()\n backup_solr_dir=\"/tmp/upgrades/{0}/solr_{1}\".format(params.version, unique)\n backup_solr_cores=\"/tmp/solr/cores\"\n\n if os.path.isdir(solr_home_dir) and not os.path.isdir(backup_solr_dir):\n os.makedirs(backup_solr_dir)\n Execute(('cp', '-r', solr_home_dir+\"/.\", backup_solr_dir),\n sudo=True\n )\n\n if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:\n Directory(backup_solr_cores,\n action=\"delete\",\n create_parents=True)\n\n Directory(backup_solr_cores,\n mode=0755,\n cd_access='a',\n owner=params.solr_user,\n create_parents=True,\n group=params.user_group\n )\n\n Execute(('cp', '-r', solr_home_dir+\"/.\", backup_solr_cores),\n user=params.solr_user\n )", "def upgrade_server():\n log('Atualizando programas', yellow)\n sudo('apt-get -y upgrade')", "async def async_post_installation(self):\n if self.data.config_flow:\n if self.data.full_name != \"hacs/integration\":\n await self.reload_custom_components()\n if self.data.first_install:\n self.pending_restart = False\n return\n self.pending_restart = True", "def install_step(self):\n\n# if LooseVersion(self.version) < LooseVersion('2012-10-05'):\n\tif (False):\n self.inchworm()\n self.chrysalis()\n self.kmer()\n self.butterfly()\n\n bwapluginver = self.cfg['bwapluginver']\n if bwapluginver:\n self.trinityplugin('bwa-%s-patched_multi_map' % bwapluginver)\n\n if self.cfg['RSEMmod']:\n self.trinityplugin('RSEM-mod', cc=os.getenv('CXX'))\n\n else:\n self.jellyfish()\n\n inchworm_flags = self.inchworm(run=False)\n chrysalis_flags = self.chrysalis(run=False)\n\n cc = os.getenv('CC')\n cxx = os.getenv('CXX')\n\n lib_flags = \"\"\n for lib in ['ncurses', 'zlib']:\n libroot = get_software_root(lib)\n if libroot:\n lib_flags += \" -L%s/lib\" % libroot\n\n fn = \"Makefile\"\n for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):\n\n line = re.sub(r'^(INCHWORM_CONFIGURE_FLAGS\\s*=\\s*).*$', r'\\1%s' % inchworm_flags, line)\n line = re.sub(r'^(CHRYSALIS_MAKE_FLAGS\\s*=\\s*).*$', r'\\1%s' % chrysalis_flags, line)\n line = re.sub(r'(/rsem && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=%s CXX=\"%s %s\" CFLAGS_EXTRA=\"%s\"\\n' % (cc, cxx, lib_flags, lib_flags), line)\n line = re.sub(r'(/fastool && \\$\\(MAKE\\))\\s*$',\n r'\\1 CC=\"%s -std=c99\" CFLAGS=\"%s ${CFLAGS}\"\\n' % (cc, lib_flags), line)\n\n sys.stdout.write(line)\n\n trinity_compiler = None\n comp_fam = self.toolchain.comp_family()\n if comp_fam in [toolchain.INTELCOMP]:\n trinity_compiler = \"intel\"\n elif comp_fam in [toolchain.GCC]:\n trinity_compiler = \"gcc\"\n else:\n self.log.error(\"Don't know how to set TRINITY_COMPILER for %s compiler\" % comp_fam)\n\n cmd = \"make TRINITY_COMPILER=%s\" % trinity_compiler\n run_cmd(cmd)\n\n # butterfly is not included in standard build\n self.butterfly()\n\n # remove sample data if desired\n if not self.cfg['withsampledata']:\n try:\n shutil.rmtree(os.path.join(self.cfg['start_dir'], 'sample_data'))\n except OSError, err:\n self.log.error(\"Failed to remove sample data: %s\" % err)", "def _update(self):\n candidates = _find_running_exe(path.join(self.run_dir, \"osiris\"))\n\n try:\n if not candidates: # No process running found\n self.processes = None\n # Try to find a job in queue\n jobs = _get_grid_jobs()\n if not jobs: # Either no qstat or empty list\n self.running_mode = \"\"\n else:\n script_path = path.abspath(path.join(self.run_dir, \"start.sh\"))\n valid_jobs = list(filter(lambda j: j[\"script\"] == script_path, jobs))\n if valid_jobs:\n if len(valid_jobs) > 1:\n logger.warning(\"More than one grid job was found for the run.\")\n self.job = valid_jobs[0]\n self.running_mode = \"grid\"\n else: # No queued job\n self.running_mode = \"\"\n\n else:\n self.processes = list(map(psutil.Process, candidates))\n self.running_mode = \"local\"\n\n except psutil.NoSuchProcess:\n # If the processes have died before processing was completed.\n self.processes = None\n self.running_mode = \"\"", "def update():\n with cd(env.directory):\n\n # update plone\n result = sudo('git pull', user=env.deploy_user)\n quick_update = 'Already up-to-date.' in result\n\n if quick_update:\n # Plonesite Recipe replaces site on the fly\n print 'UPDATE: No full Buildout required: {0:s}'.format(result)\n # buildout\n stop()\n sudo('./bin/buildout install plonesite', user=env.deploy_user)\n start()\n\n else:\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n\n sudo('rm -rf ./var/blobstorage', user=env.deploy_user)\n sudo('rm -rf ./var/filestorage', user=env.deploy_user)\n sudo('rm .installed.cfg', user=env.deploy_user)\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo('./bin/zeoclient_debug adduser admin admin', user=env.deploy_user) # noqa: E501\n\n # load page twice to fill cache and prevent a bug showing raw html\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501\n sudo('/usr/bin/wget -S -qO- demo.starzel.de > /tmp/demo.starzel.de.html', user=env.deploy_user) # noqa: E501", "def replaceAll(folderCurrent):\n folder7zip = os.path.abspath(os.path.join(folderCurrent, '_thirdparty',\n '7zip'))\n folderRelease = os.path.abspath(os.path.join(folderCurrent,\n '../', '_RELEASE'))\n folderRoot = os.path.abspath(os.path.join(folderCurrent,\n '../'))\n norelease = 0\n os.chdir(folderRelease)\n releaseNumber = []\n for file in glob.glob(\"*.zip\"):\n releaseName = file.replace('.zip', '')\n LatestVersion = re.compile(r'(\\d+)$').search(releaseName).group(1)\n releaseNumber.append(LatestVersion)\n if not releaseNumber:\n releaseNumber = ['0']\n norelease = 1\n versionNumberString = ':'.join(releaseNumber)\n versionNumber = findLargestNumber(versionNumberString)\n if (norelease == 1):\n versionNumber = versionNumber\n else:\n versionNumber = versionNumber + 1\n versionNumber = str(versionNumber)\n print(versionNumber)\n fileReleaseName = 'PC2DedicatedServerWrapper_1.' + versionNumber + '.zip'\n\n fileRelase = os.path.abspath(os.path.join(folderRelease,\n fileReleaseName))\n\n os.chdir(folder7zip)\n os.system('7za.exe a -t7z \"' + fileRelase + '\" \"' + folderRoot + '\" -xr!_SRC -xr!.git* -xr!_RELEASE -xr!DedicatedServerWrapperGUI*')", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def update():\n\n # update plone\n with cd(env.directory):\n sudo('git pull', user=env.deploy_user)\n\n with cd(env.directory):\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('rm -rf ./src-mrd', user=env.deploy_user)\n else:\n sudo('./bin/pip install --no-cache-dir -r requirements.txt', user=env.deploy_user) # noqa: E501\n\n sudo('rm -rf ./var/blobstorage ./var/filestorage .installed.cfg ', user=env.deploy_user) # noqa: E501\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo(\"sleep 10\")\n\n # create plonesite with addons (uses different ports for py2 and py3)\n if env.latest:\n if env.python3:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py3.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py2.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n\n # load page to warmup\n sudo('/usr/bin/wget -S -qO- {domain} > /dev/null'.format(domain=env.domain), user=env.deploy_user) # noqa: E501", "def test_update(executable):\n from tempfile import mkdtemp\n from shutil import rmtree\n from pylada.jobfolder.jobfolder import JobFolder\n from pylada.process.jobfolder import JobFolderProcess\n from pylada import default_comm\n from functional import Functional\n\n root = JobFolder()\n for n in xrange(3):\n job = root / str(n)\n job.functional = Functional(executable, [n])\n job.params['sleep'] = 1\n supp = JobFolder()\n for n in xrange(3, 6):\n job = supp / str(n)\n job.functional = Functional(executable, [n])\n job.params['sleep'] = 1\n\n comm = default_comm.copy()\n comm['n'] = 4\n\n dir = mkdtemp()\n try: \n program = JobFolderProcess(root, nbpools=2, outdir=dir, keepalive=True)\n assert program.keepalive \n\n # compute current jobs.\n program.start(comm)\n program.wait()\n assert hasattr(program, '_comm')\n\n # compute second set of updated jobs\n program.update(supp)\n program.wait()\n\n finally:\n try: rmtree(dir)\n except: pass\n\n # check with deleteold=True\n dir = mkdtemp()\n try: \n program = JobFolderProcess(root, nbpools=2, outdir=dir, keepalive=True)\n assert program.keepalive \n\n # compute current jobs.\n program.start(comm)\n program.wait()\n assert hasattr(program, '_comm')\n\n # compute second set of updated jobs\n program.update(supp, deleteold=True)\n assert hasattr(program, '_comm')\n program.wait()\n\n finally:\n try: rmtree(dir)\n except: pass" ]
[ "0.63134474", "0.5887858", "0.5887858", "0.58364576", "0.57861626", "0.57441574", "0.57368964", "0.57141185", "0.57095736", "0.56810385", "0.56625885", "0.56388724", "0.5615103", "0.5612521", "0.5526397", "0.55071336", "0.5483531", "0.5466706", "0.54461366", "0.5432789", "0.5417732", "0.541656", "0.5408389", "0.53810793", "0.53488153", "0.53396404", "0.53326", "0.5324019", "0.5318601", "0.53064966" ]
0.7012954
0
kill specific instances of this starter (it won't kill starter itself)
def kill_specific_instance(self, which_instances): for instance_type in which_instances: for instance in self.all_instances: if instance.instance_type == instance_type: instance.terminate_instance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill_instance(self):\n logging.info(\"StarterManager: Killing: %s\", str(self.default_starter_args + self.arguments))\n self.instance.kill()\n try:\n logging.info(str(self.instance.wait(timeout=45)))\n self.add_logfile_to_report()\n except Exception as ex:\n raise Exception(\"Failed to KILL the starter instance? \" + repr(self)) from ex\n\n logging.info(\"StarterManager: Instance now dead.\")\n self.instance = None", "def stop_test_instance(test_name=None):\n env.warn_only = True\n if test_name is not None:\n instances = [test_name]\n else:\n output = run('ls -1 %s' % env.site_root)\n instances = [x.strip() for x in output.split(\"\\n\")]\n for item in instances:\n sudo(\"stop %s\" % item.strip())", "def killAll(controller=False):", "def singularity_rm(self):\n Client.instances(self.pid, quiet=self.quiet).stop()", "def kill(self):\n \n self.killSlavePids()", "def kill_sync_processes(self, force, rev):\n for i in self.all_instances:\n if i.is_sync_instance():\n if not force and i.pid_file is not None and rev >= semver.VersionInfo.parse(\"0.15.0\"):\n print(\"Skipping manual kill\")\n return\n logging.info(\"manually killing syncer: \" + str(i.pid))\n i.terminate_instance()", "def kill_all():\n compose_kill_all()", "def stop(self, *args):\n if args[0] == 'all':\n for k, v in self.processers.items():\n if v:\n try:\n v.terminate()\n except:\n pass\n print 'Killed %s.' % k\n\n self.processers = dict.fromkeys(self.processers.keys())\n else:\n seq = args[0]\n try:\n self.processers['process%s' % seq].terminate()\n self.processers['process%s' % seq] = None\n print 'Killed process%s.' % seq\n except:\n print 'Have no process%s.' % seq", "def killExperiment(self, **kwargs):\n if kwargs['kill']=='YES':\n killRobot.sshKill()", "def kill(self):\n\n #Kill relevant process names\n if self.driver_type != 'firefox_wdm':\n os.system('pkill -f chrome')\n os.system('pkill -f Chrome')\n os.system('pkill -f chromedriver')\n else:\n os.system('pkill -f FireFox')\n #TODO: confirm this -> os.system('pkill -f geckodriver')", "def kill(self, id):", "def kill(self, id):", "def remote_kill():", "def kill_all(self):\n self._stop_all('kill')", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instances()", "def terminate_instance(self, keep_instances=False):\n\n lh.subsubsection(\"terminating instances for: \" + str(self.name))\n logging.info(\n \"StarterManager: Terminating starter instance: %s\", str(self.default_starter_args + self.arguments)\n )\n\n logging.info(\"This should terminate all child processes\")\n self.instance.terminate()\n logging.info(\"StarterManager: waiting for process to exit\")\n exit_code = self.instance.wait()\n self.add_logfile_to_report()\n # workaround BTS-815: starter exits 15 on the wintendo:\n if IS_WINDOWS and exit_code == 15:\n exit_code = 0\n\n if exit_code != 0:\n raise Exception(\"Starter %s exited with %d\" % (self.basedir, exit_code))\n\n old_log = self.basedir / \"arangodb.log.old\"\n logging.info(\n \"StarterManager: done - moving logfile from %s to %s\",\n str(self.log_file),\n str(old_log),\n )\n if old_log.exists():\n old_log.unlink()\n self.log_file.rename(old_log)\n\n for instance in self.all_instances:\n instance.rename_logfile()\n if not instance.detect_gone():\n print(\"Manually terminating instance!\")\n instance.terminate_instance(False)\n\n if keep_instances:\n for i in self.all_instances:\n i.pid = None\n i.ppid = None\n return False\n # Clear instances as they have been stopped and the logfiles\n # have been moved.\n ret = False\n for instance in self.all_instances:\n print(\"u\" * 80)\n if instance.search_for_warnings(True):\n ret = True\n self.is_leader = False\n self.all_instances = []\n return ret", "def kill(targets, controller=False):", "def kill_instance(py, accelerator, sig_name):\n acc_client = get_accelerator_client(py, accelerator)\n acc_client.kill_instance(sig_name)", "def stop_all_instances(self):\n print '# Stopping all the instances'\n number = self.compute.stop_all_instances()\n print '%d instances were stopped' % number", "def terminate_instances(self, ids):\n self.conn.terminate_instances(instance_ids=ids)", "def stop_instances(self, ids):\n self.conn.stop_instances(instance_ids=ids)", "def terminate_preemptible_instances(self, context, instances):\n # NOTE(aloga): we should not delete them directly, but probably send\n # them a signal so that the user is able to save her work.\n elevated = context.elevated()\n for instance in instances:\n LOG.info(_LI(\"Deleting %(uuid)s\") % {\"uuid\": instance[\"uuid\"]})\n instance = self.compute_api.get(elevated,\n instance[\"uuid\"],\n want_objects=True)\n self.compute_api.delete(elevated, instance)", "def _kill_self():\n os.kill(os.getpid(), signal.SIGKILL)", "def kill(self):\n kill_cmds = [\n \"sudo pkill '(daos_server|daos_io_server)' --signal INT\",\n \"sleep 5\",\n \"pkill '(daos_server|daos_io_server)' --signal KILL\",\n ]\n self.log.info(\"Killing any server processes\")\n pcmd(self._hosts, \"; \".join(kill_cmds), False, None, None)", "def stop(self):\n self.killed = True", "def kill(name, signal=9, exact=False):\n for pid in find(name, exact):\n run(\"kill -s {0} {1}\".format(signal, pid))", "def kill_vrouter_instance(self):\n # Stop vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"Stopping vrouter pid=\" + str(self.pid))\n if (self.pid > 0):\n try:\n os.kill(self.pid, signal.SIGTERM)\n time.sleep(1)\n except OSError as e:\n self.logger.error(e)", "def stopEngines():\n pass", "async def kill(self, restart: bool = False) -> None:\n pass", "def crash_instances(self):\n try:\n if self.instance.status() == psutil.STATUS_RUNNING or self.instance.status() == psutil.STATUS_SLEEPING:\n print(\"generating coredump for \" + str(self.instance))\n gcore = psutil.Popen([\"gcore\", str(self.instance.pid)], cwd=self.basedir)\n print(\"launched GCORE with PID:\" + str(gcore.pid))\n gcore.wait()\n self.kill_instance()\n else:\n print(\"NOT generating coredump for \" + str(self.instance))\n except psutil.NoSuchProcess:\n logging.info(\"instance already dead: \" + str(self.instance))\n\n for instance in self.all_instances:\n instance.crash_instance()" ]
[ "0.70153147", "0.68872017", "0.68046296", "0.6784485", "0.6766652", "0.6715903", "0.6670214", "0.660583", "0.660212", "0.6543471", "0.65000117", "0.65000117", "0.64805627", "0.6475557", "0.6455918", "0.6429913", "0.6328956", "0.6207044", "0.6205789", "0.62014425", "0.6166641", "0.6160382", "0.61603194", "0.6150022", "0.61052334", "0.6098099", "0.60844517", "0.60338074", "0.60096866", "0.59968233" ]
0.7285985
0
wait for the upgrade commanding starter to finish
def wait_for_upgrade(self, timeout=60): ret = None try: ret = self.upgradeprocess.wait(timeout=timeout) except psutil.TimeoutExpired as timeout_ex: msg = "StarterManager: Upgrade command [%s] didn't finish in time: %d" % ( str(self.basedir), timeout, ) raise TimeoutError(msg) from timeout_ex logging.info( "StarterManager: Upgrade command [%s] exited: %s", str(self.basedir), str(ret), ) if ret != 0: raise Exception("Upgrade process exited with non-zero reply")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_wait_for_upgrade(self):\n self.run_test_suites(self.wait_for_upgrade_test_suite_list)", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "async def on_upgrade_complete(self, upgrade: UpgradeId):", "def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)", "def wait_for_upgrade_done_in_log(self, timeout=120):\n keep_going = True\n logging.info('Looking for \"Upgrading done\" in the log file.\\n')\n while keep_going:\n text = self.get_log_file()\n pos = text.find(\"Upgrading done.\")\n keep_going = pos == -1\n if keep_going:\n time.sleep(1)\n progress(\".\")\n timeout -= 1\n if timeout <= 0:\n raise TimeoutError(\"upgrade of leader follower not found on time\")\n for instance in self.all_instances:\n instance.wait_for_shutdown()", "async def wait_until_done(self) -> None:\n ...", "def wait_for_version_reply(self):\n frontends = self.get_frontends()\n for frontend in frontends:\n # we abuse this function:\n while frontend.get_afo_state() != AfoServerState.LEADER:\n progress(\".\")\n time.sleep(0.1)", "def wait(self):\n pass", "def wait(self):\n pass", "def waitUntilSuccess():", "def do_wait(self):\n pass", "def wait():\n pass", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_component_update_available_UPGRADE(self):\n MockPopen.mock_stdout = 'Inst a [old] (new from)'\n self.assertTrue(self.u.component_update_available())", "def wait(self):\n self.Popen.wait()", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")", "def wait(self):\n self.mainloop().wait()", "def test_nothing_to_upgrade(self, mock_click_echo):\n agent_config = self.load_agent_config(self.agent_name)\n result = self.run_cli_command(\"upgrade\", cwd=self._get_cwd())\n assert result.exit_code == 0\n mock_click_echo.assert_any_call(\"Starting project upgrade...\")\n mock_click_echo.assert_any_call(\n f\"Checking if there is a newer remote version of agent package '{agent_config.public_id}'...\"\n )\n mock_click_echo.assert_any_call(\n \"Package not found, continuing with normal upgrade.\"\n )\n mock_click_echo.assert_any_call(\"Everything is already up to date!\")", "def wait():\n time.sleep(1)", "def wait_vm_deployment(self, is_setup: bool, params: dict) -> Tuple[\"Status\", dict]:", "def wait(self):\n time.sleep(0.010)", "def _wait_for_install(self, instance, ssh_options, wait_dir):\n wait_time = 3\n command = \"ls %s\" % wait_dir\n ssh_command = self._get_standard_ssh_command(instance, ssh_options, command)\n\n self.logger.info(\"Waiting for install with command %s\" % ssh_command)\n while True:\n retcode = subprocess.call(ssh_command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n if retcode == 0:\n break\n self.logger.debug(\"Sleeping for %d seconds...\" % wait_time)\n time.sleep(wait_time)", "def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)", "def test_upgrade(self):\n with cd(self.latest_agent_name):\n latest_agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # upgrade again to check it workd with upgraded version\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # compare both configuration files, except the agent name and the author\n upgraded_agent_dir = Path(self.agent_name)\n latest_agent_dir = Path(self.latest_agent_name)\n lines_upgraded_agent_config = (\n (upgraded_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n lines_latest_agent_config = (\n (latest_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n # the slice is because we don't compare the agent name and the author name\n assert lines_upgraded_agent_config[2:] == lines_latest_agent_config[2:]\n\n # compare vendor folders.\n assert are_dirs_equal(\n upgraded_agent_dir / \"vendor\", latest_agent_dir / \"vendor\"\n )", "def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()", "def setup_complete():\n\n async def predicate(ctx:vbu.Context):\n if await fetch_guild_settings(ctx):\n return True\n raise CheckFailure(f'Your server hasn\\'t yet been set up. Use {ctx.prefix}setup')\n return commands.check(predicate)" ]
[ "0.77359796", "0.7126432", "0.7126432", "0.7126432", "0.7126432", "0.6678276", "0.66306156", "0.6630208", "0.66240835", "0.64890206", "0.6419722", "0.640707", "0.640707", "0.64049435", "0.63792473", "0.6337039", "0.63248074", "0.62579197", "0.6122265", "0.6120902", "0.61138004", "0.60820246", "0.60655546", "0.6057462", "0.6029344", "0.60255265", "0.5987153", "0.59747934", "0.59511006", "0.59248954" ]
0.73207533
1
tries to wait for the server to restart after the 'restore' command
def wait_for_restore(self): for node in self.all_instances: if node.instance_type in [ InstanceType.RESILIENT_SINGLE, InstanceType.SINGLE, InstanceType.DBSERVER, ]: node.detect_restore_restart()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def continue_server():\n update_server_status({'ready': True})", "async def async_restore(self):\n await self._client.restore()\n self.async_write_ha_state()", "def _restart(self):\n pass", "def acquire_restart(self):\n self.bus.write('ACQ:STATE RUN')", "def restart(self):\n\t\treturn self.reset().start()", "def test_restore_with_erlang_crash_and_restart(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.kill_erlang(self.os_name)\n conn.start_couchbase()\n conn.disconnect()\n timeout_now = 600\n output = restore_result.result(timeout=timeout_now)\n self.assertTrue(self._check_output(\"Restore completed successfully\", output),\n \"Restore failed with erlang crash and restart within 180 seconds\")\n self.log.info(\"Restore succeeded with erlang crash and restart within 180 seconds\")", "def restart_salt():\n stop_salt()\n start_salt()", "def test_restore_with_memcached_crash_and_restart(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version)\n self.sleep(10)\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.pause_memcached(self.os_name)\n conn.unpause_memcached(self.os_name)\n conn.disconnect()\n output = restore_result.result(timeout=600)\n self.assertTrue(self._check_output(\"Restore completed successfully\", output),\n \"Restore failed with memcached crash and restart within 400 seconds\")\n self.log.info(\"Restore succeeded with memcached crash and restart within 400 seconds\")", "def test_backup_restore_after_rebalance(self):\n serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]\n serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create_validate()\n self.backupset.number_of_backups = 1\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)\n rebalance.result()\n self.backup_cluster_validate()\n if not self.same_cluster:\n self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])\n serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]\n serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)\n else:\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)\n rebalance.result()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")", "def restart(self) -> None:", "def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()", "def _graceful_restart(self, wait):\n\n self._sut.shutdown(True)\n self._sut.start()\n\n if wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)", "def doRestore(self):\n self.logger.log(\"Begin to restore instance status...\")\n \n try:\n self.readConfigInfo()\n self.getUserInfo()\n \n # dump status to file\n cmd = ClusterCommand.getQueryStatusCmd(self.user, self.dbNodeInfo.id, self.__curStatusFile)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Query local instance status failed!Error: %s\" % output)\n \n bakDbStatus = DbClusterStatus()\n bakDbStatus.initFromFile(self.__bakStatusFile)\n bakNodeStatus = bakDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (bakNodeStatus is None):\n self.logger.logExit(\"Get backup status of local node failed!\")\n \n curDbStatus = DbClusterStatus()\n curDbStatus.initFromFile(self.__curStatusFile)\n curNodeStatus = curDbStatus.getDbNodeStatusById(self.dbNodeInfo.id)\n if (curNodeStatus is None):\n self.logger.logExit(\"Get current status of local node failed!\")\n if (not curNodeStatus.isNodeHealthy()):\n self.logger.logExit(\"Current status of node is not healthy!\")\n \n # Compare the status and restore it\n bakInstances = bakNodeStatus.datanodes + bakNodeStatus.gtms\n for bakInst in bakInstances:\n curInst = curNodeStatus.getInstanceByDir(bakInst.datadir)\n if (curInst is None):\n self.logger.logExit(\"Get current status of instance failed!DataDir:%s\" % bakInst.datadir)\n \n if (bakInst.status == curInst.status):\n continue\n \n if (bakInst.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n self.__switchToPrimary(bakInst.datadir)\n elif (bakInst.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n self.__switchToStandby(bakInst.datadir)\n \n except Exception, e:\n self.logger.logExit(str(e))\n \n self.logger.log(\"Restore instance status successfully.\")\n self.logger.closeLog()", "def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True", "def restart(self):", "def restartSystem(self):\n # save retry count between reboots\n try:\n self.notifyPut('Restarting System...')\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n self.db['retry_count'] = self.retry_count\n self.db.close()\n except Exception, e:\n self.logQ.put('{0} - Unable to save retry count'.format(e))\n \n try:\n subprocess.call(['SHUTDOWN', '/f', '/r'])\n except Exception, e:\n self.logQ.put('{0} - Unable to restart Windows'.format(e))\n return", "def request_shutdown(self, restart=False):", "def test_resume_restore(self):\n if not self.backupset.resume:\n self.fail(\"Resume must be True for this test\")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.log.info(\"Start to flush bucket\")\n self._all_buckets_flush()\n restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,\n objstore_provider=self.objstore_provider,\n no_progress_bar=self.no_progress_bar,\n cli_command_location=self.cli_command_location,\n cb_version=self.cb_version,\n force_updates=self.backupset.force_updates,\n no_resume=True)\n state = \"\"\n while state not in (\"FINISHED\", \"EXECUTING\"):\n state = restore_result.state\n self._kill_cbbackupmgr()\n self.assertFalse(self._check_output(\"success\", restore_result.result()))\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")", "def repl_restart(restart: bool = True) -> None:", "def snap_restore_complete(mnode, volname, snapname):\n\n # Stopping volume before snap restore\n ret = volume_stop(mnode, volname)\n if not ret:\n g.log.error(\"Failed to stop volume %s before restoring snapshot \"\n \"%s in node %s\" % (volname, snapname, mnode))\n return False\n ret, _, _ = snap_restore(mnode, snapname)\n if ret != 0:\n g.log.error(\"snapshot restore cli execution failed\")\n return False\n\n # Starting volume after snap restore\n ret = volume_start(mnode, volname)\n if not ret:\n g.log.error(\"Failed to start volume %s after restoring snapshot \"\n \"%s in node %s\" % (volname, snapname, mnode))\n return False\n return True", "def restart(self):\n pass", "def restart():\n stop()\n start()", "def restart(self):\r\n pass", "def test_backup_restore_with_rebalance(self):\n serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]\n serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create_validate()\n self.backupset.number_of_backups = 1\n rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)\n self.sleep(10)\n count = 0\n while rebalance.state != \"FINISHED\":\n if count == 0:\n self.backup_cluster_validate()\n count += 1\n if not self.same_cluster:\n self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])\n serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]\n serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)\n else:\n rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)\n self.sleep(10)\n count = 0\n while rebalance.state != \"FINISHED\":\n if count == 0:\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")\n count += 1", "def test_restore_with_memcached_crash(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster()\n rest_conn = RestConnection(self.backupset.restore_cluster_host)\n rest_conn.create_bucket(bucket=\"default\", ramQuotaMB=512)\n try:\n conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n conn.pause_memcached(self.os_name)\n output, error = self.backup_restore()\n self.assertTrue(self._check_output(\n \"Error restoring cluster: failed to connect\", output),\n \"Expected error message not thrown by Restore 180 seconds after memcached crash\")\n self.log.info(\"Expected error thrown by Restore 180 seconds after memcached crash\")\n except Exception as ex:\n self.fail(str(ex))\n finally:\n conn.unpause_memcached(self.os_name)\n conn.disconnect()\n self.sleep(30)", "def finish_maintenance(self, errors):\n if not self.can_restart:\n return\n\n try:\n self._shutdown()\n run(\" \".join(self.cmd_line_opts['argv']))\n self.client = pymongo.MongoClient(self.host, self.port)\n self._wait_secondaries_catch_up()\n except Exception as e:\n errors.put(e)\n traceback.print_exc()", "def test_backup_restore_after_offline_upgrade(self):\n upgrade_version = self.input.param(\"upgrade_version\", \"5.0.0-3330\")\n if upgrade_version == \"5.0.0-3330\":\n self.fail(\"\\n *** Need param 'upgrade_version=' to run\")\n\n backup_service_test = self.input.param(\"backup_service_test\", False)\n\n if backup_service_test:\n backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)\n self.cli_command_location = \"/opt/couchbase/bin\"\n\n self._install(self.servers)\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],\n [])\n rebalance.result()\n self.add_built_in_server_user()\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.buckets = RestConnection(self.master).get_buckets()\n self.total_buckets = len(self.buckets)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.backup_cluster_validate()\n self.sleep(5)\n BucketOperationHelper.delete_bucket_or_assert(self.master, \"default\", self)\n\n \"\"\" Start to upgrade \"\"\"\n if self.force_version_upgrade:\n upgrade_version = self.force_version_upgrade\n upgrade_threads = self._async_update(upgrade_version=upgrade_version,\n servers=self.servers[:2])\n for th in upgrade_threads:\n th.join()\n self.log.info(\"Upgraded to: {ver}\".format(ver=upgrade_version))\n self.sleep(30)\n\n \"\"\" Re-create default bucket on upgrade cluster \"\"\"\n RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(5)\n\n # Create a backup node and perform a backup service import repository and restore\n if backup_service_test:\n backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])\n backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, \"my_repo\")\n backup_service_hook.backup_service.take_one_off_restore(\"imported\", \"my_repo\", 20, 20)\n backup_service_hook.cleanup()\n return\n\n \"\"\" Only server from Spock needs build in user\n to access bucket and other tasks\n \"\"\"\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n self.add_built_in_server_user()\n for user in self.users_check_restore:\n user_name = user.replace('[', '_').replace(']', '_')\n testuser = [{'id': user_name, 'name': user_name,\n 'password': 'password'}]\n rolelist = [{'id': user_name, 'name': user_name,\n 'roles': user}]\n\n self.log.info(\"**** add built-in '%s' user to node %s ****\" % (testuser[0][\"name\"],\n self.master.ip))\n RbacBase().create_user_source(testuser, 'builtin', self.master)\n\n self.log.info(\"**** add '%s' role to '%s' user ****\" % (rolelist[0][\"roles\"],\n testuser[0][\"name\"]))\n RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')\n\n backupsets = [self.backupset]\n if \"5\" <= RestConnection(self.master).get_nodes_version()[:1]:\n for user in self.users_check_restore:\n new_backupset = copy.deepcopy(self.backupset)\n new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')\n backupsets.append(new_backupset)\n for backupset in backupsets:\n self.backupset = backupset\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,\n \"default\", self)", "def ready_for_commands(self, retries = 3):\n while retries:\n try:\n self.refresh()\n return True\n except Reset_Exception as e:\n pass\n except Max_Retry_Exception as e:\n pass\n finally:\n retries -= 1\n raise e", "async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')" ]
[ "0.69105613", "0.6523955", "0.63679016", "0.62714815", "0.6169232", "0.6149965", "0.61382735", "0.61187077", "0.61115396", "0.61071575", "0.606816", "0.6046619", "0.6045591", "0.6022971", "0.6000961", "0.59798926", "0.59677154", "0.5965609", "0.59406596", "0.5929384", "0.5921322", "0.5902289", "0.5885909", "0.5868154", "0.58384186", "0.5831974", "0.58264554", "0.582301", "0.5818666", "0.5801851" ]
0.7891546
0
use arangosh to run a command on the frontend arangod
def execute_frontend(self, cmd, verbose=True): return self.arangosh.run_command(cmd, verbose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_command(self, args):\n pass", "def command():\n pass", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():" ]
[ "0.6615906", "0.6522063", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979" ]
0.79413956
0
get the port of a syncmaster arangosync
def get_sync_master_port(self): self.sync_master_port = None pos = None sm_port_text = "Starting syncmaster on port" sw_text = "syncworker up and running" worker_count = 0 logging.info("detecting sync master port") while worker_count < 3 and self.is_instance_running(): progress("%") lfs = self.get_log_file() npos = lfs.find(sw_text, pos) if npos >= 0: worker_count += 1 pos = npos + len(sw_text) else: time.sleep(1) lfs = self.get_log_file() pos = lfs.find(sm_port_text) pos = lfs.find(sm_port_text, pos + len(sm_port_text)) pos = lfs.find(sm_port_text, pos + len(sm_port_text)) if pos >= 0: pos = pos + len(sm_port_text) + 1 self.sync_master_port = int(lfs[pos : pos + 4]) return self.sync_master_port
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")", "def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")", "def masterPort(self):\r\n return self._masterPort", "def port(self) -> int:", "def get_slave_port():\n return 9901", "def _get_port(self):\n return self.__port", "def port():", "def get_serverport(cobj):\n pass", "def Port(self) -> int:", "def get_res_port():\n return get_port() + 1", "def get_port(self):\n return self.port", "def get_cmd_port(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetCmdPort', self.handle)", "def _get_nport(self):\n return self.__nport", "def get_port(self):\n return self.__port", "def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)", "def get_port(self) -> int:\n return int(self.socket.getsockname()[1])", "def get_port(self) -> int:\n return self._port", "def getCurPort(self):\n cmd_string = '?6'\n data = self.sendRcv(cmd_string)\n with self._syringeErrorHandler():\n try:\n port = int(data)\n except ValueError:\n raise SyringeError(7, self.__class__.ERROR_DICT)\n self.state['port'] = port\n return port", "def __get_port(self) -> int:\n\t\ttry:\n\t\t\treturn int(os.getenv('MQTT_DRIVEN_PORT'))\n\t\texcept:\n\t\t\treturn 1883", "def get_port(self):\n \n return self._port", "def external_port(self):\r\n return self._external_port", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def port(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"port\")", "def get_port():\n return int(os.getenv(\"PORT\", \"7840\"))", "def head_port(self):\n return self.head_args.port[0] if self.head_args else None", "def getPort(self):\n return self._port", "def comm_port(self):\r\n return self._comm_port", "def get_manager_rest_service_port():\n return int(os.environ[MANAGER_REST_PORT_KEY])", "def port(self):\r\n _, port = self.server_address\r\n return port" ]
[ "0.70510185", "0.70510185", "0.70024353", "0.6852463", "0.68338513", "0.679988", "0.6775076", "0.6759334", "0.66923726", "0.6612166", "0.6540162", "0.6490325", "0.64330757", "0.6365352", "0.63405347", "0.6295224", "0.61943614", "0.6190745", "0.6190299", "0.6183827", "0.6175898", "0.6159054", "0.6159054", "0.6159054", "0.61576355", "0.61543393", "0.61479646", "0.6140313", "0.6073863", "0.60477436" ]
0.7340527
0
get the logfile of the dbserver instance
def read_db_logfile(self): server = self.get_dbserver() assert server.logfile.exists(), "don't have logfile?" return server.logfile.read_text(errors="backslashreplace")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logfile(self):\n return self._get('logfile')", "def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()", "def getLogFile(self):\r\n return LOG.getLogFile().name", "def get_log(self):\n\n open_lf = open(self.logfile, 'r')\n log_str = open_lf.read()\n sys.stdout.write(log_str)\n\n return log_str", "def getLogFile(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FILE_KEY)", "def getLogs():", "def getLogs():", "def get_log_file():\n log_file = os.getenv(\"LOG_FILE\", \"\")\n if log_file != \"\":\n return log_file\n return os.path.dirname(os.path.abspath(__file__)) + \"/server.log\"", "def logger():\n return TestListenerDB()", "def get_log_path():\n return LOG_PATH", "def getLog(self):\n return self.session.request('diag/log/')", "def get_system_logfile():\n return \"system\" + get_day() + \".log\"", "def get_log()->dict:\n return execute_command(\"SELECT log FROM log\").fetchall()[0][0]", "def getLog(self):\n return self.log", "def getLog(self):\n return self.log", "def getLogs():\n # in flux, it may be possible to provide more structured information\n # like python Failure instances", "def set_database_log(config):\n global DATABASE_LOG\n DATABASE_LOG = config", "def log (self):\n return self._log", "def get_instance_log_conf(instance_id):\n # Retrieve current log config file\n log_conf_file = None\n\n filename = 'logentries_%s.conf'%instance_id\n rsyslog_conf_name = '/etc/rsyslog.d/%s'%filename\n local_conf_name = '/tmp/%s'%filename\n \n # Clean file present\n try:\n local('rm %s'%local_conf_name)\n except:\n print 'Could not remove %s. It may not exist'%(local_conf_name)\n logger.warning('Could not remove %s. It may not exist'%(local_conf_name))\n # Get remote conf file or return None if it cannot be retrieved\n try:\n get(rsyslog_conf_name,local_conf_name)\n except:\n print '%s does not exist on instance %s'%(rsyslog_conf_name,instance_id)\n logger.warning('%s does not exist on instance %s',rsyslog_conf_name,instance_id)\n return None\n # Open conf file or return None if it cannot be opened\n try:\n log_conf_file = open(local_conf_name,'r')\n except:\n print 'Cannot open %s from instance %s'%(local_conf_name,instance_id)\n logger.warning('Cannot open %s from instance %s',local_conf_name,instance_id)\n return None\n return log_conf_file", "def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"", "def get_logdir(self):\n return self.event_writer.get_logdir()", "def log(self):\n if self._log is None:\n self._log = Log(client=self)\n return self._log", "def get_main_log(self) -> Any:\n return self.logger", "def get_console_log_filename(self):\n return", "def log(self):\n return self._log", "def log(self):\n return self._log", "def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()", "def get_device_log(self, client):\r\n file_path = client.getDeviceLog()\r\n logging.info(str(time.asctime(time.localtime())) + \" Device Logs collected !! \" + file_path)\r\n #self.logger.info(\"Device logs collected at %s\" % file_path)\r\n return file_path", "def log(self):\r\n return self._log", "def read_agent_logfile(self):\n server = self.get_agent()\n assert server.logfile.exists(), \"don't have logfile?\"\n return server.logfile.read_text(errors=\"backslashreplace\")" ]
[ "0.7550862", "0.7224536", "0.6880102", "0.6592017", "0.6583927", "0.6482713", "0.6482713", "0.6469686", "0.6463902", "0.63621795", "0.63040406", "0.6289661", "0.6283953", "0.6207564", "0.6207564", "0.6109618", "0.6103252", "0.6098555", "0.6073537", "0.60647166", "0.6054933", "0.6024553", "0.6009368", "0.59929013", "0.5992791", "0.5992791", "0.5959552", "0.59518474", "0.59449756", "0.5941459" ]
0.7439977
1
get the agent logfile of this instance
def read_agent_logfile(self): server = self.get_agent() assert server.logfile.exists(), "don't have logfile?" return server.logfile.read_text(errors="backslashreplace")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logfile(self):\n return self._get('logfile')", "def get_log(self):\n\n open_lf = open(self.logfile, 'r')\n log_str = open_lf.read()\n sys.stdout.write(log_str)\n\n return log_str", "def getLogFile(self):\r\n return LOG.getLogFile().name", "def getLog(self):\n return self.log", "def getLog(self):\n return self.log", "def getLogFile(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FILE_KEY)", "def log(self) -> DagsterLogManager:\n return self._step_execution_context.log", "def log (self):\n return self._log", "def getLog(self):\n return self.session.request('diag/log/')", "def log(self):\n return self._log", "def log(self):\n return self._log", "def getLog(self):\n \n return self.resp[\"log\"]", "def get_main_log(self) -> Any:\n return self.logger", "def log(self, namespace, agent, ltype: int = 0):\n if isinstance(agent, Agent):\n agent = agent.uid\n\n with self.lock:\n with self.zip.open(f'{namespace}/logs/{agent}_{ltype}.txt', 'r') as log:\n return log.read().decode('utf-8')", "def _create_agent_log():\n log_file = SETTINGS['agent.log_file']\n if not log_file.endswith('.rollbar'):\n log.error(\"Provided agent log file does not end with .rollbar, which it must. \"\n \"Using default instead.\")\n log_file = DEFAULTS['agent.log_file']\n\n retval = logging.getLogger('rollbar_agent')\n handler = logging.FileHandler(log_file, 'a', 'utf-8')\n formatter = logging.Formatter('%(message)s')\n handler.setFormatter(formatter)\n retval.addHandler(handler)\n retval.setLevel(logging.WARNING)\n return retval", "def getlogger(self):\n return self.logger", "def log(self):\n if self._log is None:\n self._log = Logger().get_logger(self.__class__.__name__)\n return self._log", "def getLog(self):\n pass", "def log(self) -> misc_.Logger:\n\t\treturn self._log", "def getLogger(self):\n \n logging.basicConfig(filename=self.__logfile, format='%(asctime)s - %(levelname)s - %(message)s', level=logging.DEBUG)\n return logging.getLogger('loggerAdv')", "def log(self):\n if self._log is None:\n self._log = Log(client=self)\n return self._log", "def log(self):\r\n return self._log", "def get_logdir(self):\n return self.event_writer.get_logdir()", "def get_logger(self):\n return self.logger", "def get_logger(self):\n return self.logger", "def get_log_file(self):\n return self.log_file.read_text(errors=\"backslashreplace\")", "def getLogs():", "def getLogs():", "def get_logger(self):\n return self.__logger", "def logger(self):\n return self.logging" ]
[ "0.79751503", "0.73155695", "0.72758114", "0.72477007", "0.72477007", "0.70513636", "0.6913755", "0.68768865", "0.68585616", "0.6792453", "0.6792453", "0.6751899", "0.6743848", "0.6700573", "0.66865516", "0.66694623", "0.66520137", "0.66206974", "0.66192824", "0.66146636", "0.6584084", "0.65412825", "0.65354013", "0.6520273", "0.6520273", "0.65137196", "0.65035045", "0.65035045", "0.64954853", "0.6485503" ]
0.7742073
1
detect the arangod instance PIDs
def detect_instance_pids(self): for instance in self.all_instances: instance.detect_pid( ppid=self.instance.pid, full_binary_path=self.cfg.real_sbin_dir, offset=0, ) self.show_all_instances() self.detect_arangosh_instances(self.cfg, self.cfg.version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_instances(self):\n lh.subsection(\"Instance Detection for {0.name}\".format(self))\n jwt = self.get_jwt_header()\n self.all_instances = []\n logging.debug(\"waiting for frontend\")\n logfiles = set() # logfiles that can be used for debugging\n\n # the more instances we expect to spawn the more patient:\n tries = 10 * self.expect_instance_count\n\n # Wait for forntend to become alive.\n all_instances_up = False\n while not all_instances_up and tries:\n self.all_instances = []\n detected_instances = []\n sys.stdout.write(\".\")\n sys.stdout.flush()\n\n for root, dirs, files in os.walk(self.basedir):\n for onefile in files:\n # logging.debug(\"f: \" + root + os.path.sep + onefile)\n if onefile.endswith(\"log\"):\n logfiles.add(str(Path(root) / onefile))\n\n for name in dirs:\n # logging.debug(\"d: \" + root + os.path.sep + name)\n match = None\n instance_class = None\n if name.startswith(\"sync\"):\n match = re.match(r\"(syncmaster|syncworker)(\\d*)\", name)\n instance_class = SyncInstance\n else:\n match = re.match(\n r\"(agent|coordinator|dbserver|resilientsingle|single)(\\d*)\",\n name,\n )\n instance_class = ArangodInstance\n # directory = self.basedir / name\n if match and len(match.group(2)) > 0:\n # we may see a `local-slave-*` directory inbetween,\n # hence we need to choose the current directory not\n # the starter toplevel dir for this:\n instance = instance_class(\n match.group(1),\n match.group(2),\n self.cfg.localhost,\n self.cfg.publicip,\n Path(root) / name,\n self.passvoid,\n self.cfg.ssl,\n self.cfg.version,\n self.enterprise,\n jwt=jwt,\n )\n instance.wait_for_logfile(tries)\n instance.detect_pid(\n ppid=self.instance.pid,\n full_binary_path=self.cfg.real_sbin_dir,\n offset=0,\n )\n detected_instances.append(instance.instance_type)\n self.all_instances.append(instance)\n\n print(self.expect_instances)\n detected_instances.sort()\n print(detected_instances)\n attach(str(self.expect_instances), \"Expected instances\")\n attach(str(detected_instances), \"Detected instances\")\n if (self.expect_instances != detected_instances) or (not self.get_frontends()):\n tries -= 1\n time.sleep(5)\n else:\n all_instances_up = True\n\n if not self.get_frontends():\n print()\n logging.error(\"STARTER FAILED TO SPAWN ARANGOD\")\n self.show_all_instances()\n logging.error(\"can not continue without frontend instance\")\n logging.error(\"please check logs in\" + str(self.basedir))\n for logf in logfiles:\n logging.debug(logf)\n message = \"if that does not help try to delete: \" + str(self.basedir)\n logging.error(message)\n raise Exception(message)\n self.show_all_instances()", "def getIDs():", "def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids", "def detect_instance_pids_still_alive(self):\n missing_instances = []\n running_pids = psutil.pids()\n for instance in self.all_instances:\n if instance.pid not in running_pids:\n missing_instances.append(instance)\n\n if len(missing_instances) > 0:\n logging.error(\n \"Not all instances are alive. The following are not running: %s\",\n str(missing_instances),\n )\n logging.error(get_process_tree())\n raise Exception(\"instances missing: \" + str(missing_instances))\n instances_table = get_instances_table(self.get_instance_essentials())\n logging.info(\"All arangod instances still running: \\n%s\", str(instances_table))\n attach_table(instances_table, \"Instances table\")", "def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]", "def inspire_pidstore():", "def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids", "def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []", "def ppid(self):", "def _get_ids_from_ip(self, ip_address):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip_address)\r\n except socket.error:\r\n return []\r\n\r\n # Find the VS via ip address. First try public ip, then private\r\n results = self.list_instances(public_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_instances(private_ip=ip_address, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]", "def _get_ids_from_hostname(self, hostname):\r\n results = self.list_instances(hostname=hostname, mask=\"id\")\r\n return [result['id'] for result in results]", "def get_running_unison_processes(self):\n # Get PIDs\n # Note: throws exception if no instances exist\n try:\n pids = str(subprocess.check_output([\"pidof\", '/usr/bin/unison']))\n\n # Parse command output into list by removing junk chars and exploding\n # string with space delimiter\n pids = pids[2:-3].split(' ')\n\n except subprocess.CalledProcessError:\n # If error caught here, no unison instances are found running\n pids = []\n\n self.logger.debug(\n \"Found \" + str(len(pids)) + \" running instances on this system: PIDs \" +\n \", \".join(pids)\n )\n\n # Return, after converting to ints\n return list(map(int, pids))", "def pid(self):", "def get_instance_ids(temporary_user, config, state, now, tz):\n try:\n data = temporary_user.describe_instances(Filters=[{'Name':'instance-state-name', 'Values': [state]}])\n logger.info(\"The date is : {} , {}\".format(now.strftime(\"%A, %d %B %Y %H:%M:%S\"), tz))\n\n action_required, no_action_required = categorise_instances(data, config, temporary_user)\n return action_required, no_action_required\n except Exception as error:\n logger.info(\"Describing the instances failed with the following error : {}\".format(error))", "def _instantiated_ids(self):\n return self._identity_map.keys()", "def dynamic_pid(self):\n pass", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances += \" - {0.name} (pid: {0.pid})\".format(instance)\n logging.info(\"arangod instances for starter: %s - %s\", self.name, instances)", "def wait_for_exec_to_start():\n node_instances = self.client.node_instances.list()\n for ni in node_instances:\n # this will keyerror out (and be retried) if the operation\n # didn't run yet\n pids[ni.node_id] = ni.runtime_properties['pid']", "def _instancelist(self):\n\n rv = []\n self.iname = {}\n for resv in self.conn.get_all_reservations():\n for inst in resv.instances:\n if inst.state != 'terminated':\n name = inst.tags.get('Name',None)\n rv.append([inst.id,inst.state])\n if name is not None:\n rv.append([name,inst.state])\n else:\n rv.append([inst.id+'-needsName',inst.state])\n self.iname[name] = inst.id\n self.iname[inst.id] = inst.id\n return rv", "def extract_core_ids(self):\n path2folder = 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[self.radii[0]] + '/'\n analysis_files = [dir for dir in os.listdir(path2folder) if dir.startswith('Matrix-analysis-IP_')]\n analysis_file = path2folder + analysis_files[0] #work for 1 component system\n with open(analysis_file, 'r') as fid:\n my_file = yaml.load(fid, Loader=yaml.FullLoader)\n self.core_ids = list(my_file.keys())\n self.mol_name = analysis_files[0].split('_')[1].split('.')[0]\n\n\n print('coreids', self.core_ids)", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def get_ids(self, instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids", "def _get_ids_from_ip(self, ip):\r\n try:\r\n # Does it look like an ip address?\r\n socket.inet_aton(ip)\r\n except socket.error:\r\n return []\r\n\r\n # Find the server via ip address. First try public ip, then private\r\n results = self.list_hardware(public_ip=ip, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]\r\n\r\n results = self.list_hardware(private_ip=ip, mask=\"id\")\r\n if results:\r\n return [result['id'] for result in results]", "def get_ceph_pids():\n pids = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n with open(cfg['pid_file'], 'r') as file_fd:\n pids.append((srv, int(file_fd.read())))\n return pids", "def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids", "def list_instance_uuids(self):\n return self.list_instances()", "def _choose_among_running_instances(self):\n\n instances = self.compute.get_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any running instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id", "def get_id_of_instance(self, label):\n query = read_query('id/id_of_instance') % label\n response = self._submit_query(query)\n return [elem['id']['value'] for elem in response] if response else []", "def get_instance_list():\n return parse_list_output(Popen('nova list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_asg_instance_ids(self, asg_name):\n instance_ids = []\n # Grab the first item in the list because we're only asking for 1 ASG\n try:\n asg_data = self.asg.describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0]\n except Exception as e: \n logger.info(e)\n return []\n\n for instance_data in asg_data['Instances']:\n instance_ids.append(instance_data['InstanceId'])\n\n return instance_ids" ]
[ "0.6319383", "0.61950517", "0.6192594", "0.61528647", "0.6111595", "0.6052284", "0.605194", "0.59694993", "0.59247625", "0.5862299", "0.5763337", "0.5762358", "0.5724921", "0.5709404", "0.5638399", "0.56117487", "0.5553753", "0.55345756", "0.54887575", "0.54887056", "0.5459837", "0.5450734", "0.5407005", "0.5404297", "0.53547686", "0.534805", "0.53450406", "0.53387016", "0.5330876", "0.53133714" ]
0.77291846
0
scan all instances for `FATAL` statements
def detect_fatal_errors(self): for instance in self.all_instances: instance.detect_fatal_errors()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors_fatal(self) -> List[Error]:", "def fatal_error_processor(self):\n while True:\n _ = (yield)\n self.failed = True\n self.converged = False\n self.solve_completed = False", "def getFatalErrors(self):\n global hadFatalErrors\n if hadFatalErrors:\n text = '\\n'.join(hadFatalErrors)\n hadFatalErrors = []\n return text", "def fatal(self, *args, **kwargs):", "def get_fatal_alerts(self, path):", "def test_clean_log():\n\n log_file_path = \"mobile_testkit_tests/test_data/mock_clean_log.txt\"\n\n scan_logs.scan_for_errors(['panic'], log_file_path)", "def has_errors_fatal(self) -> bool:", "def search_for_warnings(self):\n log = str()\n print(self.default_starter_args + self.arguments)\n if not self.log_file.exists():\n print(str(self.log_file) + \" not there. Skipping search\")\n return\n print(str(self.log_file))\n with self.log_file.open(errors=\"backslashreplace\") as log_f:\n for line in log_f.readline():\n if \"WARN\" in line or \"ERROR\" in line:\n print(line.rstrip())\n log += line.rstrip()\n attach(log, \"WARN or ERROR lines from starter log\")", "def check_errors(self):\n\n errors = []\n while True:\n err = self.values(\"SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Agilent 5313xA: {0}: {1}\".format(err[0], err[1])\n log.error(errmsg + '\\n')\n errors.append(errmsg)\n else:\n break\n\n return errors", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def scan_error(self, line: int, message: str):\n self.report(line, \"\", message)", "def filter_unknown_bases(self):\n self.failed[\"unknowns\"] = self.stats.index[\n self.stats[\"unknowns\"] > self.tolerance[\"unknowns\"]\n ]\n self.passed = self.stats.drop(self.failed[\"unknowns\"])", "def errors_fatal(self) -> List[Error]:\n return self._errors_fatal_files + self._errors_fatal", "def scanForSimpleError(script):\n\tlangage = identifyLangage(script)\n\tline_number = 0\n\tlogFile_name = \"scan.log\"\n\n\t# Scanning File\n\tlogFile = open(logFile_name, 'w')\n\tscriptFile = open(script, 'r')\n\tfor line in scriptFile:\n\t\tline_number +=1\n\t\tlineWithoutBackN = line.replace(\"\\n\", \"\")\n\t\tlineInArray = lineWithoutBackN.split(\" \")\n\t\tlastWord = lineInArray[-1]\n\t\tlastWordInArray = list(lastWord)\n\t\tlineInCharacterArray = list(lineWithoutBackN)\n\n\t\t#########################\n\t\t# looking for a shebang #\n\t\t# => for perl\t\t#\n\t\t# => for bash\t\t#\n\t\t#########################\n\t\tif(langage == \"perl\" and line_number == 1 and lineInArray[0] != \"#!/usr/bin/perl\"):\n\t\t\tlogFile.write(\"[WARNING]: SET line \"+str(line_number)+\" TO #!/usr/bin/perl\\n\")\n\t\tif(langage == \"bash\" and line_number == 1 and line != \"#!/bin/bash\"):\n\t\t\tlogFile.write(\"[WARNING]: SET line \"+str(line_number)+\" TO #!/bin/bash\\n\")\n\n\t\t#########################\n\t\t# Check for semi-column\t#\n\t\t# => for perl\t\t#\n\t\t#########################\n\t\tif(len(lastWordInArray) > 0):\n\t\t\tif(langage == \"perl\" and line_number != 1 and lastWordInArray[-1] != \";\"):\n\t\t\t\tif(lastWordInArray != \"}\"):\n\t\t\t\t\tfirstNonEmptyCharacter = getFirstNonEmptyCharInArray(lineInCharacterArray)\n\t\t\t\t\tif(firstNonEmptyCharacter != \"#\"):\n\t\t\t\t\t\tlogFile.write(\"[ERROR]: ADD \\\";\\\" to line \"+str(line_number)+\"\\n\")\n\n\t\t#################################\n\t\t# Check variable declaration\t#\n\t\t# => for perl\t\t\t#\n\t\t#################################\n\t\tif(getFirstNonEmptyCharInArray(lineInCharacterArray) != \"#\" ):\n\t\t\tword_number = 0\n\t\t\tfor word in lineInArray:\n\t\t\t\tif(word == \"my\"):\n\t\t\t\t\tvariable = lineInArray[word_number+1]\n\t\t\t\t\tvariableInArray = list(variable)\n\t\t\t\t\tif(variableInArray[0] != \"$\" and variableInArray[0] != \"@\"):\n\t\t\t\t\t\tif \"list\" in variable:\n\t\t\t\t\t\t\tlogFile.write(\"[ERROR]: ADD \\\"@\\\" to \"+variable+\", line \"+str(line_number)+\"\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlogFile.write(\"[ERROR]: ADD \\\"$\\\" to \"+variable+\", line \"+str(line_number)+\"\\n\")\n\t\t\t\t\n\n\t\t\t\n\t\t\t\t\t\n\n\tscriptFile.close()\n\tlogFile.close()", "def delete_error():\r\n item = core.get_all_items()\r\n for i in item:\r\n if \"Error\" in i or \"Warning\" in i:\r\n if core.does_item_exist(i):\r\n reset_error(i)", "def check_for_errors(self):\n\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"*** Psi4 exiting successfully.\" in line:\n return {\"success\": True}\n\n elif \"*** Psi4 encountered an error.\" in line:\n return {\"success\": False, \"error\": \"Not known\"}\n\n return {\"success\": False, \"error\": \"Segfault\"}", "def test_nonexistent_report(self):\n command_line = [\"report\", \"notreport\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def errors(conf, daemon):\n # persisted dict interface for long term memory\n errors = Shove('file://{0}'.format(conf.app.errors), protocol=2, flag='r')\n if any(errors):\n print(\"errors found\")\n for path, error in six.iteritems(errors):\n pp(error)\n errors.close()\n exit(1)\n # ⏏ exit the program with an error\n else:\n print(\"no errors found - OK\")\n print()\n errors.close()", "def quieter():\n try:\n ttsEng.quieter()\n except Exception, e:\n logging.error(e)", "def fatal(self, msg, exitst):\n return libruss.russ_sconn_fatal(self._ptr, strtobytes(msg), exitst)", "def fatal(self, *args):\n self.mylog.critical(*args)\n sys.exit(1)", "def _log_crash_report():\n # For each crash report we find, dump its contents.\n # In theory we clean up after a crash so there should be only one.\n cwd = os.getcwd()\n for entry in os.listdir('.git'):\n if entry.startswith('fast_import_crash_'):\n with open(os.path.join(cwd, '.git', entry)) as f:\n report = f.read()\n # Keep the message free of repetition.\n LOG.error(\"git {}:\\n{}\".format(entry, report))", "def fatal ( self , message , *args , **kwargs ) :\n return self.logger.fatal ( message , *args , **kwargs )", "def test_explain_non_existent_code(self):\n command_line = [\"pool\", \"explain\", \"bogus\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def report_errors(errors):\n if len(errors) > 0:\n for error in errors:\n logger.debug(error)\n sys.exit(0)", "def listBadRefScripts(self):\n if not self.log: return\n ids = []\n for record in self.records:\n if record.name != 'SCPT': continue\n rnam = record.rnam\n if rnam and rnam.data == chr(255)*4:\n ids.append(record.getId())\n if ids:\n self.log.setHeader(_('Detached Global Scripts'))\n for id in sorted(ids,key=string.lower):\n self.log(id)", "def ignore_errors(self):\n field = \"ignore_errors\"\n value = \"y\"\n ignore_emptys = [step for step in self.all_steps if self.is_value(step, field, value)]\n self.add_all_issues(ignore_emptys, self.WARNINGS, self.issue_messages.ignore_insert_errors)", "def clear_errors(heroku_app=HEROKU_APP):\n subprocess.run(\n ['heroku', 'pg:psql', '--app', heroku_app],\n input=b'SELECT COUNT(*) FROM error_report;',\n )\n subprocess.run(\n ['heroku', 'pg:psql', '--app', heroku_app],\n input=b'DELETE FROM error_report;',\n )\n subprocess.run(\n ['heroku', 'pg:psql', '--app', heroku_app],\n input=b'SELECT COUNT(*) FROM error_report;',\n )", "def check_no_silent_crash(self, override=False):\n if self.results:\n score = self.results.linter.stats.get('global_note', False)\n if score is False:\n messages = self.results.linter.stats.get('by_msg', {})\n if messages.get('syntax-error', False) and not override:\n self.logging.warning('\\n------------------------------------------------------------------')\n self.logging.warning('PYLINT FAILED BECAUSE SYNTAX ERROR.')\n self.logging.warning('------------------------------------------------------------------')\n self.logging.warning('\\n')\n self.failed_files.append(self.fname)\n return False\n self.logging.info('\\n------------------------------------------------------------------')\n self.logging.info('FILE WAS IGNORED.')\n self.logging.info('------------------------------------------------------------------')\n return True\n return False", "def clean_errors(self):\n self._vim.eval('clearmatches()')\n self._errors = []\n self._matches = []\n # Reset Syntastic notes - TODO: bufdo?\n self._vim.current.buffer.vars['ensime_notes'] = []" ]
[ "0.61938083", "0.6015998", "0.59813035", "0.5981033", "0.5679613", "0.55990446", "0.550483", "0.54907763", "0.5387474", "0.5358392", "0.5331162", "0.53206944", "0.5311066", "0.5309457", "0.5303004", "0.5290556", "0.5203423", "0.5195811", "0.51785743", "0.517628", "0.51631856", "0.51450163", "0.5143785", "0.51363826", "0.5134749", "0.51189876", "0.51103044", "0.5080967", "0.506726", "0.50608265" ]
0.69902325
0
launch an arangobench instance to the frontend of this starter
def launch_arangobench(self, testacse_no, moreopts=None): arangobench = ArangoBenchManager(self.cfg, self.get_frontend()) arangobench.launch(testacse_no, moreopts) return arangobench
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n tng.api.runner()", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def launch_test():\n import sys\n from kothrak.envs.KothrakEnv import KothrakEnv\n from kothrak.envs.game.MyApp import style\n from PyQt5.QtWidgets import QApplication, QWidget\n\n qapp = QApplication(sys.argv)\n qapp.setStyleSheet(style)\n window = QWidget()\n window.setWindowTitle('Kothrak training')\n\n env = KothrakEnv(qapp, window)\n window.show()\n\n trainer = Trainer(env)\n # trainer.load('saves/031421-1523.zip')\n trainer.run()\n\n qapp.exec_()", "def launch_analysis_v2():\n\n # add explicit instructions for user\n\n os.system(\"pip install -r requirements.txt\")\n os.chdir(f'{os.getcwd()}/gui')\n\n # explicit version checking\n if os.system(\"node -v\") != 0:\n print(\"Please install node before proceeding.\")\n exit(-1)\n\n if os.system(\"npm install\") != 0:\n print(\"Could not install npm packages. \")\n\n os.system(\"npm run start-backend &\")\n os.system(\"npm start\")", "def launch(self):", "def main():\n driver = Driver()\n driver.start()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def main():\n\n # Chdir into script directory so to properly resolve relative paths in configuration\n os.chdir(os.path.dirname(os.path.realpath(__file__)) + \"/\")\n\n # Disable proxy as we access localhost, both to avoid overhead and issues with proxy misconfiguration\n os.environ['NO_PROXY'] = '*'\n\n # Stop any GraphDB server that we previously started and is possibly still around due to script interruption/crash\n shell(f\"{cmd_graphdb} stopall\")\n\n # Generate synthetic traces, both for populating the repositories and for the {sf, sp, pf, pp} tests\n prepare_traces()\n \n # Generate central repositories (if needed)\n for size, approach in itertools.product(sizes, approaches):\n prepare_repository(size, approach)\n \n # Run experiments (if needed)\n for size, approach in itertools.product(sizes, approaches):\n run_experiments(size, approach)", "def run_experiment():\n pass", "def main():\n configuration = {'resource-folder': 'resources',\n 'build-folder': 'build',\n 'log-folder': 'logfiles',\n 'use-preloaded': False,\n 'addi-metrics': 'addi-metrics.json',\n 'jenkins': {'dependency-filename': 'dependencies.txt',\n 'server': 'http://is.dbc.dk',\n 'repository-project': 'opensearch-3rd-party-dependencies'},\n 'log-zip-file':'logs.zip'}\n configuration.update(cli())\n setup_logger(configuration['verbose'])\n run_performance_test(configuration)", "def main():\n indicator = AyatanaIndicator()\n indicator.run()", "def main():\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def startTest(asset):", "def main():\n subcommands = {\n \"train\": train.train,\n \"tune\": train_tune.train,\n \"predict\": predict.cli_predict,\n \"evaluate\": evaluate.cli_evaluate,\n \"version\": version,\n }\n\n try:\n import xarray_behave.gui.app\n\n subcommands[\"gui\"] = xarray_behave.gui.app.main_das\n except (ImportError, ModuleNotFoundError):\n logging.exception(\"No GUI avalaible.\")\n # fall back to function that displays helpful instructions\n subcommands[\"gui\"] = no_xb_gui\n\n logging.basicConfig(level=logging.INFO, force=True)\n defopt.run(subcommands, show_defaults=False)", "def launch_new_instance():\n import IPython\n\n IPython.Shell.start().mainloop()", "def main(_):\n description = xm.ExperimentDescription(\n FLAGS.exp_name, tags=[\n FLAGS.env_name,\n ])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def main(self, argv):\n\n np.random.seed(42)\n self.setup_logging()\n self.command_line(argv)\n start_time = time.time()\n\n logging.info(\"Starting Kaggle-CTMI Experiment\\n\")\n\n logging.info(\"Finding data and groundtruth...\")\n cohort = Cohort(self.shaip)\n train_cohort, test_cohort = cohort.split_cohort_train_test(0.3)\n logging.info(\"Found %d datasets\", cohort.size)\n\n if self.args.train:\n logging.info(\"Training on %d datasets...\", train_cohort.size)\n model = self.algorithm.train(train_cohort)\n Algorithm.save_model(model, self.shaip.models_dir + 'model')\n else:\n logging.info(\"Skipping training, model saved from earlier run\")\n model = self.algorithm.load_model(self.shaip.models_dir + 'model')\n\n if self.args.predict:\n logging.info(\"Prediction on %d datasets...\", test_cohort.size)\n test_predictions = self.algorithm.predict(model, test_cohort)\n else:\n logging.info(\"Skipping prediction, using predictions from earlier run\")\n # TODO: need to sort out caching of predictions\n test_predictions = None\n\n if self.args.evaluate:\n logging.info(\"Generating results to ShaipWorkspace/outputs/results/index.html...\")\n self.results.show_results(train_cohort, test_cohort,\n self.algorithm.history, test_predictions)\n\n logging.info(\"Kaggle-CTMI Experiment done in %4.1f seconds.\\n\", (time.time() - start_time))", "def run_interactive():\n from cherrypy import engine\n \n # This is what quickstart does but we don't block\n engine.signals.subscribe()\n engine.start()\n #engine.block()", "def main(self):\n\n def _run(args):\n kwargs = vars(args)\n if kwargs.get('host', None) is not None:\n self.config['HOST'] = kwargs.pop('host')\n if kwargs.get('port', None) is not None:\n self.config['PORT'] = kwargs.pop('port')\n self.config['PROFILE'] = kwargs.pop('profile')\n self.config['DEBUG'] = kwargs.pop('debug')\n self.run()\n\n parser = argparse.ArgumentParser(\n description=\"signac-dashboard is a web-based data visualization \"\n \"and analysis tool, part of the signac framework.\")\n parser.add_argument(\n '--debug',\n action='store_true',\n help=\"Show traceback on error for debugging.\")\n parser.add_argument(\n '--version',\n action='store_true',\n help=\"Display the version number and exit.\")\n subparsers = parser.add_subparsers()\n\n parser_run = subparsers.add_parser('run')\n parser_run.add_argument(\n '-p', '--profile',\n action='store_true',\n help='Enable flask performance profiling.')\n parser_run.add_argument(\n '-d', '--debug',\n action='store_true',\n help='Enable flask debug mode.')\n parser_run.add_argument(\n '--host', type=str,\n help='Host (binding address). Default: localhost')\n parser_run.add_argument(\n '--port', type=int,\n help='Port to listen on. Default: 8888')\n parser_run.set_defaults(func=_run)\n\n # This is a hack, as argparse itself does not\n # allow to parse only --version without any\n # of the other required arguments.\n if '--version' in sys.argv:\n print('signac-dashboard', __version__)\n sys.exit(0)\n\n args = parser.parse_args()\n\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n if not hasattr(args, 'func'):\n parser.print_usage()\n sys.exit(2)\n try:\n self.observer.start()\n args.func(args)\n except RuntimeWarning as warning:\n logger.warning(\"Warning: {}\".format(warning))\n if args.debug:\n raise\n sys.exit(1)\n except Exception as error:\n logger.error('Error: {}'.format(error))\n if args.debug:\n raise\n sys.exit(1)\n finally:\n self.observer.stop()\n self.observer.join()", "def init():\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True),\n envvar='TREADMILL_APPROOT', required=True)\n @click.option('--instance', help='Publisher instance.')\n def run(approot, instance):\n \"\"\"Starts discovery publisher process.\"\"\"\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()\n\n return run", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def launch(config):\n \n launch_with_configs([config])", "def test_launch(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.launch(TOOLNAME,username,userpass)", "def start_run(self, context: RobotRunnerContext) -> None:\n rospy.init_node(\"robot_runner\", disable_signals=True)\n self.ina219_profiler = INA219ProfilerClient()\n self.cpu_mem_profiler = ResourceProfilerClient()", "def startapp():", "def __main() :\n launchTests()", "def main():\n\n parser = ArgumentParser()\n parser.add_argument('--config', '-c', type=str, required=True, help='Path to config file')\n parser.add_argument('--input', '-i', type=str, required=True, help='Path to video')\n parser.add_argument('--snapshot_path', '-s', type=str, required=False, default='', help='Path to snapshot')\n parser.add_argument('--out_scale', type=float, default=1.0, help='Output frame scale')\n parser.add_argument('--deploy', '-d', action='store_true', help='Execute in deploy mode')\n args = parser.parse_args()\n\n assert exists(args.config)\n assert exists(args.input)\n assert exists(args.snapshot_path + '.index')\n assert args.out_scale > 0.0\n\n task_monitor = get_monitor(args.config, snapshot_path=args.snapshot_path)\n task_monitor.demo(args.input, args.out_scale, args.deploy)", "def run_experiment(arguments):\n\n logging.info('Arguments: %s', arguments)\n\n # Get estimator\n estimator = get_estimator(arguments)\n # my_module.\n\n # Run training and evaluation\n _train_and_evaluate(estimator, arguments.job_dir)" ]
[ "0.6429091", "0.6309221", "0.62545246", "0.61604047", "0.6152724", "0.60954607", "0.6065459", "0.6057383", "0.60311", "0.59965384", "0.59894437", "0.5950306", "0.58948225", "0.5882779", "0.5840209", "0.5838446", "0.5817836", "0.58095634", "0.5783761", "0.5774965", "0.57688946", "0.576136", "0.5720185", "0.5715629", "0.57140607", "0.5701221", "0.5699784", "0.5675939", "0.5670083", "0.5669999" ]
0.78506184
0
in active failover detect whether we run the leader
def detect_leader(self): # Should this be moved to the AF script? lfs = self.read_db_logfile() became_leader = lfs.find("Became leader in") >= 0 took_over = lfs.find("Successful leadership takeover:" + " All your base are belong to us") >= 0 self.is_leader = became_leader or took_over if self.is_leader: url = self.get_frontend().get_local_url("") reply = requests.get(url, auth=requests.auth.HTTPBasicAuth("root", self.passvoid), timeout=120) print(f"{url} => {str(reply)}") if reply.status_code == 503: self.is_leader = False return self.is_leader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active_failover_detect_host_now_follower(self):\n self.check_that_instance_is_alive()\n lfs = self.get_log_file()\n if lfs.find(\"resilientsingle up and running as follower\") >= 0:\n self.is_master = False\n return True\n return False", "def probe_leader(self):\n # Should this be moved to the AF script?\n self.is_leader = False\n for instance in self.get_frontends():\n if instance.probe_if_is_leader():\n self.is_leader = True\n return self.is_leader", "def is_leader(self):\n return self.__is_leader", "def is_cluster_leader(self):\n return self.leader == 'self'", "def isLeader(self):\n return self.datacenter_id == self.leader_id", "def check_leader(leader=None):\n grastate_dat = '/var/lib/mysql/grastate.dat'\n grastate = open(grastate_dat)\n for line in grastate.readlines():\n if 'safe_to_bootstrap' in line and '1' in line:\n leader = True\n if not leader:\n print 'It may not be safe to bootstrap the cluster from this node.'\n print 'It was not the last one to leave the cluster and may not contain all the updates.'\n print 'To force cluster bootstrap with this node, edit the {} file manually and set safe_to_bootstrap to 1'.format(grastate_dat)\n os.sys.exit(1)", "def is_cluster_leader(target, schema=None):\n try:\n return cluster_status(target, schema=schema).get('leader') == 'self'\n except subprocess.CalledProcessError:\n return False", "def is_elected_leader(resource):\n if is_clustered():\n if not is_crm_leader(resource):\n log('Deferring action to CRM leader.', level=INFO)\n return False\n else:\n peers = peer_units()\n if peers and not oldest_peer(peers):\n log('Deferring action to oldest service unit.', level=INFO)\n return False\n return True", "def start_election(self):\n print \"---------\\nStarting an election...\\n---------\"\n processes = self.get_processes()\n if len(processes) == 0:\n print \"Not enough servers up yet. Cannot initiate election.\"\n return \"Not enough servers up yet.\"\n higher_active_process = False\n for uid, server in processes.iteritems():\n if uid <= self.uid:\n continue # only contact higher processes\n try:\n ack = server.elect_leader()\n if (ack == \"I am leader.\"):\n self.global_time_server = server\n self.time_server_set = True\n print \"OUTCOME:\\nLeader is %d\\n---------\"%(uid)\n higher_active_process = True\n break\n except socket.error:\n pass\n if higher_active_process:\n return \"I am NOT leader.\"\n else:\n self.am_leader = True\n self.time_server_set = True\n print \"OUTCOME:\\nI am leader.\\n---------\"\n return \"I am leader.\"", "def leader(self):\n pass", "def leader(self):\n pass", "def this_needs_work_test_hook_leader_elected(\n self, h_is_leader, h_leader_set\n ):\n self.do_test_we_are_the_leader(h_is_leader, h_leader_set)", "def active_failover_detect_hosts(self):\n self.check_that_instance_is_alive()\n # this is the way to detect the master starter...\n lfs = self.get_log_file()\n if lfs.find(\"Just became master\") >= 0:\n self.is_master = True\n else:\n self.is_master = False\n regx = re.compile(r\"Starting resilientsingle on port (\\d*) .*\")\n match = regx.search(lfs)\n if match is None:\n raise Exception(timestamp() + \"Unable to get my host state! \" + self.basedir + \" - \" + lfs)\n\n self.frontend_port = match.groups()[0]", "def is_crm_leader(resource):\n cmd = [\n \"crm\", \"resource\",\n \"show\", resource\n ]\n try:\n status = subprocess.check_output(cmd)\n except subprocess.CalledProcessError:\n return False\n else:\n if get_unit_hostname() in status:\n return True\n else:\n return False", "def election_winner():\n\t\tglobal leader_ip\n \t\tleader_ip = '10.1.0.{}'.format(request.forms.get('winning_id'))\n \t\tprint(\"new leader is {}\".format(leader_ip))\n \t\treturn False", "def failover_target(self) -> bool:\n return pulumi.get(self, \"failover_target\")", "def attempt_to_acquire_leader(self, permanent=False):", "def check_server_activity(self):\n if (self.am_leader == True):\n return \"Time server connected.\"\n elif (self.time_server_set == False):\n print \"I am not aware of a time server. Fetching from existing process.\"\n if (self.fetch_time_server() == False):\n print \"Fetch failed. Electing a leader.\"\n self.start_election()\n if self.time_server_not_responding():\n print \"The time server is not responding.\" \n self.start_election()\n return \"Time server elected.\"", "def test_bootstrap_source_not_leader(self):\n self.is_leader.return_value = False\n ceph_hooks.bootstrap_source_relation_changed()\n self.assertEqual(self.leader_set.call_count, 0)", "def is_current_node_active(self, device, partition):\n if self.is_version_sufficient(min_version='11.3.0') is False:\n print \"!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!\"\n print \"! UNABLE TO VERIFY FAILOVER STATE !\"\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n stop = raw_input('Do you want to continue? [y|N]')\n if stop.strip() == \"y\" or stop.strip() == \"Y\":\n return True\n else:\n return False\n \"\"\" Determines if the connect device is the master, if not Bail with an error.\"\"\"\n try:\n self.connection.System.Session.set_active_folder(\"/Common\")\n status = self.connection.Management.Device.get_failover_state([device])\n if status == ['HA_STATE_ACTIVE']:\n self.connection.System.Session.set_active_folder(\"/\"+partition)\n return True\n else:\n return False\n except:\n raise Exception(\"Failed to determine if {} is a master\".format(device))", "def same_user_or_shiftleader(self, user):\n try:\n return (\n self.get_object().userid == user\n or user.is_superuser\n or user.userprofile.has_shift_leader_rights\n )\n except UserProfile.DoesNotExist:\n return False", "def start_leader_election():\n\t\ttime.sleep(5)\n\t\ttry:\n\n\t\t\tprint(\"starting the election... \")\n\t\t\tthread = Thread(target=contact_vessel,args=(next_address(),\"/election/electing\",{'start_id':node_id,'highest_value':randomized_value,'winning_id':node_id}))\n\t\t\tthread.daemon = True\n\t\t\tthread.start()\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn True", "def is_running(self):\n\t\treturn self in _running", "def take_leader(self):", "def running(self):\n\t\treturn self._start is not None", "async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True", "def isstarted():", "def running(self) -> bool:", "def mmo_replset_has_primary(self, mmo_connection, rs):\n rs_status = self.mmo_execute_on_secondaries(mmo_connection, { \"replSetGetStatus\": 1 }, replicaset=rs, first_available_only=True)\n has_primary = False\n\n\n for member in rs_status[0][\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n has_primary = True\n break\n return has_primary", "def is_alive(self):" ]
[ "0.73940504", "0.7367778", "0.72519314", "0.724043", "0.7167702", "0.70824057", "0.69250184", "0.69241196", "0.67583", "0.65203124", "0.65203124", "0.6361834", "0.6304819", "0.6293747", "0.6229859", "0.6190928", "0.61597836", "0.604024", "0.60138893", "0.5951151", "0.58495647", "0.5820223", "0.5815441", "0.5748331", "0.574193", "0.5720468", "0.56876665", "0.5686061", "0.5684562", "0.5654774" ]
0.7651802
0
talk to the frontends to find out whether its a leader or not.
def probe_leader(self): # Should this be moved to the AF script? self.is_leader = False for instance in self.get_frontends(): if instance.probe_if_is_leader(): self.is_leader = True return self.is_leader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_leader(self):\n # Should this be moved to the AF script?\n lfs = self.read_db_logfile()\n\n became_leader = lfs.find(\"Became leader in\") >= 0\n took_over = lfs.find(\"Successful leadership takeover:\" + \" All your base are belong to us\") >= 0\n self.is_leader = became_leader or took_over\n if self.is_leader:\n url = self.get_frontend().get_local_url(\"\")\n reply = requests.get(url, auth=requests.auth.HTTPBasicAuth(\"root\", self.passvoid), timeout=120)\n print(f\"{url} => {str(reply)}\")\n if reply.status_code == 503:\n self.is_leader = False\n return self.is_leader", "def leader(self):\n pass", "def leader(self):\n pass", "def is_leader(self):\n return self.__is_leader", "def isLeader(self):\n return self.datacenter_id == self.leader_id", "def this_needs_work_test_hook_leader_elected(\n self, h_is_leader, h_leader_set\n ):\n self.do_test_we_are_the_leader(h_is_leader, h_leader_set)", "def is_cluster_leader(self):\n return self.leader == 'self'", "def start_election(self):\n print \"---------\\nStarting an election...\\n---------\"\n processes = self.get_processes()\n if len(processes) == 0:\n print \"Not enough servers up yet. Cannot initiate election.\"\n return \"Not enough servers up yet.\"\n higher_active_process = False\n for uid, server in processes.iteritems():\n if uid <= self.uid:\n continue # only contact higher processes\n try:\n ack = server.elect_leader()\n if (ack == \"I am leader.\"):\n self.global_time_server = server\n self.time_server_set = True\n print \"OUTCOME:\\nLeader is %d\\n---------\"%(uid)\n higher_active_process = True\n break\n except socket.error:\n pass\n if higher_active_process:\n return \"I am NOT leader.\"\n else:\n self.am_leader = True\n self.time_server_set = True\n print \"OUTCOME:\\nI am leader.\\n---------\"\n return \"I am leader.\"", "def is_elected_leader(resource):\n if is_clustered():\n if not is_crm_leader(resource):\n log('Deferring action to CRM leader.', level=INFO)\n return False\n else:\n peers = peer_units()\n if peers and not oldest_peer(peers):\n log('Deferring action to oldest service unit.', level=INFO)\n return False\n return True", "def election_winner():\n\t\tglobal leader_ip\n \t\tleader_ip = '10.1.0.{}'.format(request.forms.get('winning_id'))\n \t\tprint(\"new leader is {}\".format(leader_ip))\n \t\treturn False", "def take_leader(self):", "def is_cluster_leader(target, schema=None):\n try:\n return cluster_status(target, schema=schema).get('leader') == 'self'\n except subprocess.CalledProcessError:\n return False", "def test_03_leaderboard(self):\r\n # As Anonymou user\r\n url = \"/leaderboard\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_03_leaderboard(self):\r\n # As Anonymou user\r\n url = \"/leaderboard\"\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Leaderboard page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def check_server_activity(self):\n if (self.am_leader == True):\n return \"Time server connected.\"\n elif (self.time_server_set == False):\n print \"I am not aware of a time server. Fetching from existing process.\"\n if (self.fetch_time_server() == False):\n print \"Fetch failed. Electing a leader.\"\n self.start_election()\n if self.time_server_not_responding():\n print \"The time server is not responding.\" \n self.start_election()\n return \"Time server elected.\"", "def _update_leader(self):", "def check_leader(leader=None):\n grastate_dat = '/var/lib/mysql/grastate.dat'\n grastate = open(grastate_dat)\n for line in grastate.readlines():\n if 'safe_to_bootstrap' in line and '1' in line:\n leader = True\n if not leader:\n print 'It may not be safe to bootstrap the cluster from this node.'\n print 'It was not the last one to leave the cluster and may not contain all the updates.'\n print 'To force cluster bootstrap with this node, edit the {} file manually and set safe_to_bootstrap to 1'.format(grastate_dat)\n os.sys.exit(1)", "def leaderboard(self):\n pass", "async def leaderboard(self, ctx):\n settings = config.load_settings()\n if settings['guilds'][str(ctx.guild.id)][\"leveling\"] is True:\n guild = ctx.guild.id\n xp = config.load_xp()\n scores = {}\n if str(guild) in xp['guilds']:\n for user in xp['guilds'][str(guild)]:\n scores.update({ctx.guild.get_member(int(user)).display_name: xp['guilds'][str(guild)][user]['xp']})\n sorted_scores = collections.OrderedDict(sorted(scores.items(), key=lambda x: x[1], reverse=True))\n message = discord.Embed(title='Leaderboard', description=ctx.guild.name + \"'s most active users\")\n current_field = 1\n field_limit = 25\n for index, (key, value) in enumerate(sorted_scores.items()):\n if current_field <= field_limit:\n message.add_field(name=str(index+1) + \": \" + key,\n value=\"with: \" + str(value) + \" xp\",\n inline=False)\n current_field += 1\n else:\n break\n await ctx.send('', embed=message)\n else:\n await ctx.send(\"leveling is currently disabled on this server!\")", "def becomeLeader(self):\n logging.info('become leader for term {}'.format(self.current_term))\n\n # no need to wait for heartbeat anymore\n self.election_timer.cancel()\n\n self.role = 'leader'\n self.leader_id = self.datacenter_id\n # keep track of the entries known to be logged in each data center\n # note that when we are in the transition phase\n # we as the leader need to keep track of nodes in\n # the old and the new config\n self.loggedIndices = dict([(center_id, 0)\n for center_id in self.getAllCenterID()\n if center_id != self.datacenter_id])\n # initialize a record of nextIdx\n self.nextIndices = dict([(center_id, self.getLatest()[1]+1)\n for center_id in self.getAllCenterID()\n if center_id != self.datacenter_id])\n\n self.sendHeartbeat()\n self.heartbeat_timer = Timer(self.heartbeat_timeout, self.sendHeartbeat)\n self.heartbeat_timer.daemon = True\n self.heartbeat_timer.start()", "def do_test_we_are_the_leader(self, h_is_leader, h_leader_set):\n states = r_state.r_get_states()\n r_state.remove_state(LEADER_STATE)\n no_leader = r_state.r_get_states()\n r_state.set_state(LEADER_STATE)\n leader = r_state.r_get_states()\n self.assertNotEquals(no_leader, leader)\n self.assertEquals(no_leader.union(set([LEADER_STATE])), leader)\n\n is_leader_call_count = h_is_leader.call_count\n leader_set_call_count = h_leader_set.call_count\n # is_leader() fails\n h_is_leader.return_value = False\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 1, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 0, h_leader_set.call_count)\n\n def raise_fail(*args, **kwargs):\n \"\"\"\n Simulate a leader_set() failure.\n \"\"\"\n raise Exception(\"oops\")\n\n # is_leader() succeeds, but leader_set() fails\n h_is_leader.return_value = True\n h_leader_set.side_effect = raise_fail\n testee.we_are_the_leader()\n self.assertEquals(no_leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 2, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 1, h_leader_set.call_count)\n\n self.lset_args = None\n self.lset_kwargs = None\n\n def record_leader_set_args(*args, **kwargs):\n \"\"\"\n Make sure leader_set() was invoked with the correct parameters.\n \"\"\"\n self.lset_args = args\n self.lset_kwargs = kwargs\n\n # ...and now it all works out\n h_is_leader.return_value = True\n h_leader_set.side_effect = record_leader_set_args\n testee.we_are_the_leader()\n self.assertEquals(leader, r_state.r_get_states())\n self.assertEquals(is_leader_call_count + 3, h_is_leader.call_count)\n self.assertEquals(leader_set_call_count + 2, h_leader_set.call_count)\n self.assertEquals((), self.lset_args)\n self.assertEquals(\n {\"charm_storpool_block_unit\": sputils.MACHINE_ID}, self.lset_kwargs\n )\n\n r_state.r_set_states(states)", "def attempt_to_acquire_leader(self, permanent=False):", "async def elect_leader( request ):\n resource = request.match_info['resource']\n node = request.match_info['node']\n ttl = int( request.match_info['ttl'] )\n leader_election = await create_leader_election( redises, resource, node, ttl )\n try:\n leader = await leader_election.elect_leader()\n return web.json_response( {\"leader\": leader} , status = 200 )\n except Exception as ex:\n print(ex)\n return web.json_response( {\"error\": \"fail to elect leader\" }, status = 501 )", "def gatekeeper():\n\n if user.name in GATEKEEPERS:\n return True\n\n return False", "async def tod_status(self, ctx, *args):\n n = len(self.players)\n if n > 0:\n if n == 1:\n s = \"person\"\n else:\n s = \"people\"\n message = f\"A Truth or Dare game is currently taking place with {n} {s}!\"\n else:\n message = \"No Truth or Dare game is currently taking place.\"\n await ctx.send(message)", "def active_failover_detect_host_now_follower(self):\n self.check_that_instance_is_alive()\n lfs = self.get_log_file()\n if lfs.find(\"resilientsingle up and running as follower\") >= 0:\n self.is_master = False\n return True\n return False", "def find_leader(self):\r\n # Initialize the leader fitness as an arbitrarly bad value\r\n leaderFitness = -(2**63)\r\n \r\n for number in range(POPULATION_SIZE):\r\n if self.population[number].current_fitness > leaderFitness:\r\n leaderFitness = self.population[number].current_fitness\r\n self.leader = number", "def tellIfStarted(self):\n if self.game_number == 1:\n self.welcome()\n else:\n self.tellGameNumber()", "def on_join(data):\n print(str(data))\n if models.Leaderboard.query.filter_by(\n username=data['user']).first() is None:\n add_user(data['user'])\n users, scores = calculate_scores()\n socketio.emit('leaderboard_info', {'users': users, 'scores': scores})", "def leaderboard():\n # Get leaderboard and user information\n leaderboard, current_user_info = gdb.getleaderboard(current_user.userID)\n # Get top gainer leaderboards\n weektopgainers, monthtopgainers = gdb.gettopgainers()\n # Render template\n return render_template('leaderboard.html',\n leaderboard=leaderboard,\n current_user_info=current_user_info,\n weektopgainers=weektopgainers,\n monthtopgainers=monthtopgainers,\n userbalance=current_user.balance)" ]
[ "0.7640331", "0.74470776", "0.74470776", "0.74285924", "0.713586", "0.6762263", "0.6751401", "0.6734137", "0.6681536", "0.65391123", "0.64464325", "0.629198", "0.6276058", "0.62618124", "0.61972916", "0.61910975", "0.6164121", "0.59911203", "0.5944456", "0.5918337", "0.5872921", "0.5842132", "0.58410966", "0.5818992", "0.5804581", "0.57784945", "0.5749074", "0.5745227", "0.5738587", "0.57067996" ]
0.78123844
0
detect hosts for the active failover
def active_failover_detect_hosts(self): self.check_that_instance_is_alive() # this is the way to detect the master starter... lfs = self.get_log_file() if lfs.find("Just became master") >= 0: self.is_master = True else: self.is_master = False regx = re.compile(r"Starting resilientsingle on port (\d*) .*") match = regx.search(lfs) if match is None: raise Exception(timestamp() + "Unable to get my host state! " + self.basedir + " - " + lfs) self.frontend_port = match.groups()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sniff_hosts(self):\n previous_sniff = self.last_sniff\n hosts = []\n try:\n # reset last_sniff timestamp\n self.last_sniff = time.time()\n try:\n hosts = self.get_es_node_addresses()\n except Exception:\n raise TransportError(\"N/A\", \"Unable to sniff hosts.\" + traceback.format_exc())\n except:\n # keep the previous value on error\n self.last_sniff = previous_sniff\n raise\n\n # we weren't able to get any nodes, maybe using an incompatible\n # transport_schema or host_info_callback blocked all - raise error.\n if not hosts:\n raise TransportError(\"N/A\", \"Unable to sniff hosts - no viable hosts found.\" + traceback.format_exc())\n\n self.set_connections(hosts)", "def all_hosts(self):\n ...", "def getHosts(self):\n raise \"not implemented\"", "def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)", "def get_hosts(self, target, listener_type):", "def select_active_hosts():\n return IMPL.select_active_hosts()", "def get_hosts_retry(self, target, listener_type):", "def _get_hosts_from_state(state):\n active_nodes = set()\n for shard, shard_data in state.get('shards', {}).items():\n replicas = shard_data['replicas']\n for replica, replica_data in replicas.items():\n if replica_data['state'] == 'active':\n active_nodes.add(replica_data['base_url'])\n\n return active_nodes", "def get_hosts(self):\n\n raise NotImplementedError", "def check_hosts(zk,host_name,task,scheduler_log):\n\n #scheduler_log.debug(\"Scheduler Working...!!!\")\n try:\n #Leader Election\n leader = leaderCheck(zk=zk)\n #scheduler_log.debug(\"Leader Election Over\")\n #Update alive status to zookeeper - seems unnecessary\n imalive(zk=zk)\n #scheduler_log.debug(\"Alive Status Updated\")\n\n #If current Host is the Leader perform Scheduled Checks \n if (leader == host_name):\n scheduler_log.debug(\"%s : I am the Leader\"%host_name)\n\n #Fetch List of Hosts - From API\n host_dict = list_hosts(nova)\n allhosts = host_dict['all_list']\n api_down_nodes = host_dict['down_list']\n dishosts = host_dict['disabled_list']\n\n zk_all = zk.get_children(\"/openstack_ha/hosts/all\")\n zk_alive = zk.get_children(\"/openstack_ha/hosts/alive\")\n \n #Fetch Down nodes that are already Handeled - From Zookeeper\n zk_down = zk.get_children(\"/openstack_ha/hosts/down\")\n\n #Fetch nodes that are down and not already handled - From Zookeeper\n calculated_down_nodes = list(set(zk_all) - set(zk_alive))\n\n #Find Nodes Where Scheduler Only failed\n scheduler_down = list(set(calculated_down_nodes).difference(set(api_down_nodes)))\n for node in scheduler_down:\n scheduler_log.debug(\"HA Scheduler Failed on Node : %s \"%node)\n \n #Find Nodes Where API Only failed \n api_down = list(set(api_down_nodes).difference(set(calculated_down_nodes)))\n for node in api_down:\n scheduler_log.debug(\"API Failed on Node : %s \"%node)\n if node not in zk_all:\n scheduler_log.debug(\"HA Scheduler not even initialized %s\"%node)\n\n #Find nodes where both API and Zookeeper are failed \n api_scheduler_down = list(set(api_down_nodes).intersection(set(calculated_down_nodes)))\n\n # Possible Host states - Api only failure | Complete Host Failure ( Not yet Handled | Handling | Handled )\n if(len(api_scheduler_down))==0:\n scheduler_log.debug(\"Hosts working Normally....!!!\")\n else:\n scheduler_log.warning(\"More likely Disaster\")\n #skip if maintance\n # Here check the host in api_down_nodes(api) are present in calculated_down_nodes\n #if present start the instance migrations\n # Checking whether Cluster is Still under HA Policy\n # high availabity contiditions\n if len(api_scheduler_down) <= len(allhosts) - 1:\n scheduler_log.warn(\"Seems like Manageble Disaster\")\n for host in api_scheduler_down:\n scheduler_log.warning(\"Both Api and HA scheduler on\" +host+\" are down\")\n #checks whether down host from api is un handled(not present in down node calculate from zookeeper )\n #(host in zk_all and host not in zk_alive) == calculated_down_nodes\n if host in zk_down:\n #Node will present in zk_down only when all of it's instances are migrated\n scheduler_log.debug(\"Host %s Already handled...!!!!!\"%host)\n else:\n #Node down on api,zk and ( not handled | handling )\n if host not in dishosts:\n #Node Not disabled | disabled reason is not skippable\n scheduler_log.debug(host+\" is not disabled or reason is not maintenance\")\n if(zk.exists(\"/openstack_ha/hosts/time_out/\"+host)==None):\n scheduler_log.debug(\"Inside Time out Node Creation\")\n \n #adding host down time\n host_down_time = time.time()\n host_down_time = str.encode(str(host_down_time))\n scheduler_log.debug(host_down_time)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host, host_down_time)\n \n #adding time_suffix for json_dump file name\n temp_time=time.localtime(time.time()) \n time_suffix=str(temp_time.tm_mday)+\"_\"+str(temp_time.tm_mon)+\"_\"+\\\n str(temp_time.tm_year)+\"_\"+str(temp_time.tm_hour)+\"_\"+\\\n str(temp_time.tm_min)\n enc_time_suffix=str.encode(time_suffix)\n scheduler_log.debug(time_suffix)\n zk.create(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\",enc_time_suffix)\n\n # call notification_mail(subj,msg) | Adding Down Node details to Notification \n try:\n subject = \"DGP Office VDI Node Down: %s\"%host\n message = \"Please Check the Network Connectivity and Powersupply as soon as possible\"\n notification_mail(subject,message,to_email=['[email protected]'])\n\n message = \"Please Contact System Administrator\"\n notification_mail(subject,message)\n scheduler_log.debug(\"mail in Scheduler...!\")\n except Exception as e:\n scheduler_log.debug(e)\n scheduler_log.debug(\"Error....! mail scheduler..!\")\n\n # add ping test\n ping_status=ping_check(host)\n if(ping_status):\n scheduler_log.debug(\"Not a Disaster\")\n scheduler_log.debug(\"ping test success....!!! Node is alive... Please Check the APIs,HA Scheduler and other Openstack Services\")\n\n else:\n scheduler_log.warning(\"Ping test also Failed on \"+host+\" proceed with migration\")\n if (zk.exists(\"/openstack_ha/hosts/start_migration/\"+ host)): # it checks the permission from the dashborad\n scheduler_log.warning(\" api down host :\"+host+\"present in zookeeper down_node:\")\n scheduler_log.debug(\"Strart migration....!!!!!\")\n scheduler_log.debug(\"migrating instances from the \"+host)\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode() \n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n #check for time out\n scheduler_log.debug(\"Checking Timeout for Down Node\",host)\n curent_time = time.time()\n if (zk.exists(\"/openstack_ha/hosts/time_out/\"+host)):\n down_host_failuretime = zk.get(\"/openstack_ha/hosts/time_out/\"+host)[0]\n down_host_failuretime = down_host_failuretime.decode(encoding='UTF-8')\n scheduler_log.warning(\"down_host_failuretime\",down_host_failuretime)\n down_host_failuretime = float(down_host_failuretime)\n time_interval = curent_time - down_host_failuretime\n if time_interval>migrate_time:\n tmp_time_suffix=zk.get(\"/openstack_ha/hosts/time_out/\"+host+\"/time_suffix\")[0]\n zk_time_suffix = tmp_time_suffix.decode()\n instance_migration(nova,api_down_nodes,task,zk_time_suffix)\n else:\n scheduler_log.debug(\"Will Wait for another %d\"%(migrate_time-time_interval))\n else:\n scheduler_log.debug(\"%s Node Does'nt have TimeOut Value. Hence will not migrate forever\"%host)\n else:\n scheduler_log.debug(\"Host %s Under Maintenance\"%host)\n \n else:\n scheduler_log.warning(\"Un-Manageble Disaster Too many Nodes are down\")\n else:\n scheduler_log.debug(\"%s : Leader is %s\"%(host_name,leader))\n\n except Exception as e:\n if issubclass(e.__class__,kexception.NoNodeError):\n scheduler_log.exception(\"No node error\")\n elif any(issubclass(e.__class__, lv) for lv in kazoo_exceptions):\n scheduler_log.exception(\"Kazoo Exception.....: \")\n time.sleep(2)\n try:\n zk = KazooClient(hosts='127.0.0.1:2181')\n zk.start() \n Node_creation = createNodeinAll(zk=zk, host_name=host_name)\n election_Node = election_node(zk=zk, host_name=host_name)\n except:\n pass\n else:\n scheduler_log.warning(\"Unhandled Error \")\n scheduler_log.exception(\"\")", "def active_failover_detect_host_now_follower(self):\n self.check_that_instance_is_alive()\n lfs = self.get_log_file()\n if lfs.find(\"resilientsingle up and running as follower\") >= 0:\n self.is_master = False\n return True\n return False", "def getHostInfo():", "def set_hosts(self, hypervisor_per_cluster=False):\n\n self.conf['hosts'] = set()\n\n host_patterns, host_others = self._sift_patterns(\n self.conf.get('hosts_list')\n )\n datacenter_patterns = self.conf.get('datacenter', [])\n cluster_patterns = self.conf.get('cluster', [])\n\n if host_patterns:\n self.conf['host_pattern'] = host_patterns\n\n self.conf['hosts'] = self._get_hypervisors_from_api()\n # Filter all host specified with -H\n host_filtered = set()\n if host_others:\n host_filtered = set([\n (dc, cl, h, is_spm, is_up)\n for dc, cl, h, is_spm, is_up in self.conf['hosts']\n if h in host_others\n ])\n not_found = host_others - set(host[2] for host in host_filtered)\n if not_found != set():\n # try to resolve to ip specified hosts\n for fqdn in set(not_found):\n try:\n ipaddr = socket.gethostbyname(fqdn)\n logging.debug('%s --> %s' % (fqdn, ipaddr))\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n if h == ipaddr:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(fqdn)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=fqdn,\n )\n )\n if not_found != set():\n # try to resolve to ip known hypervisors\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n try:\n ipaddr = socket.gethostbyname(h)\n logging.debug('%s --> %s' % (h, ipaddr))\n if ipaddr in host_others:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(ipaddr)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=h,\n )\n )\n if not_found != set():\n logging.error(\n _(\n 'The following host are not listed as hypervisors: '\n '{not_listed}. Known hypervisors can be listed using '\n 'the list command'\n ).format(\n not_listed=','.join(not_found)\n )\n )\n sys.exit(ExitCodes.CRITICAL)\n\n orig_hosts = self.conf['hosts'].copy()\n\n if host_patterns:\n for pattern in host_patterns:\n host_filtered |= self._filter_hosts('host', pattern)\n if host_patterns or host_others:\n self.conf['hosts'] &= host_filtered\n\n # Intersect with hosts belonging to the data centers specified with -d\n if datacenter_patterns:\n datacenter_filtered = set()\n for pattern in datacenter_patterns:\n datacenter_filtered |= self._filter_hosts(\n 'datacenter', pattern\n )\n self.conf['hosts'] &= datacenter_filtered\n\n # Intersect with hosts belonging to the clusters specified with -c\n if cluster_patterns:\n # remove all hosts that don't match the patterns\n cluster_filtered = set()\n for pattern in cluster_patterns:\n cluster_filtered |= self._filter_hosts('cluster', pattern)\n self.conf['hosts'] &= cluster_filtered\n\n # If hypervisor_per_cluster is set, collect data only from a single\n # hypervisor per cluster; if the Spm found, collect data from it.\n if hypervisor_per_cluster:\n selected_hosts = dict()\n for dc, cluster, host, is_spm, is_up in self.conf['hosts']:\n # Always add the SPM\n if is_spm:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # For the given cluster, if no host added yet, add it\n elif cluster.name not in selected_hosts:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # If a host is up and the SPM isn't added yet, add this host\n elif is_up and not selected_hosts[cluster.name][3]:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n self.conf['hosts'] &= set(selected_hosts.values())\n\n # warn users if they are going to collect logs from all hosts.\n if orig_hosts and self.conf['hosts'] == orig_hosts:\n logging.warning(\n _(\n 'This ovirt-log-collector call will collect logs from '\n 'all available hosts. This may take long time, '\n 'depending on the size of your deployment'\n )\n )\n\n return bool(self.conf.get('hosts'))", "def get_upgradable_hosts(dbapi):\n all_hosts = dbapi.ihost_get_list()\n # TODO:(mingyuan) Exclude edgeworker host from upgradable hosts\n # until the final phase of the edgeworker feature completed\n hosts = [i for i in all_hosts if i.personality != constants.EDGEWORKER]\n\n return hosts", "def check_all_hosts (self, repo_version_id, version_name):\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query1 = \"SELECT chm.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}';\".format(self.cluster_name)\n else:\n query1 = \"SELECT h.host_name from ClusterHostMapping chm JOIN clusters c ON c.cluster_name = '{0}' JOIN hosts h ON chm.host_id = h.host_id;\".format(self.cluster_name)\n\n if self.compare_versions(self.ambari_version, \"2.1.0\") < 0:\n query2 = \"SELECT hv.host_name, hv.state FROM host_version hv WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n else:\n #query2 = \"SELECT hv.state,h.host_name FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id WHERE hv.repo_version_id = {0};\".format(repo_version_id)\n query2 = \"SELECT hv.state,h.host_name, hs.health_status,hs.agent_version,(h.total_mem/1024/1024) as total_mem_gb,(hs.available_mem/1024/1024) as available_mem_gb FROM hosts h JOIN host_version hv ON h.host_id = hv.host_id JOIN hoststate hs ON h.host_id = hs.host_id WHERE hv.repo_version_id = {0} order by h.host_name;\".format(repo_version_id)\n # All cluster hosts\n host_names = set()\n self.cursor.execute(query1)\n rows = self.cursor.fetchall()\n if self.options.verbose:\n Logger.debug(query1 + \"\\n\")\n if rows and len(rows) > 0:\n host_names = set([row[0] for row in rows if len(row) == 1])\n Logger.debug(\"Hosts: {0}\".format(\", \".join(host_names)))\n\n host_name_to_state = {} # keys should be a subset of host_names\n hosts_with_repo_version_state_not_in_current = set()\n self.cursor.execute(query2 + \"\\n\")\n rows = self.cursor.fetchall()\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST(S) STATE\\t\")\n Logger.info(\"******************************************************************************************************************************************************\\n\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n Logger.info(\"State\\t\\tHostname\\t\\t\\t\\tHealth\\t\\tAgentVersion\\tTotalMemory\\tAvailableMemory\")\n Logger.info(\"------------------------------------------------------------------------------------------------------------------------------------------------------\")\n\n if rows and len(rows) > 0:\n for row in range(len(rows)):\n data = json.loads(rows[row][2])\n data1 = json.loads(rows[row][3])\n Logger.info(\"{0}\\t\\t{1}\\t\\t{2}\\t\\t{3}\\t\\t{4}\\t\\t{5}\".format(rows[row][0], rows[row][1], data[\"healthStatus\"], data1[\"version\"], rows[row][4], rows[row][5]))\n print (\"\\n\")\n Logger.debug(query2)\n if rows and len(rows) > 0:\n for row in rows:\n if len(row) == 6:\n host_name = row[1]\n state = row[0]\n host_name_to_state[host_name] = state\n if state.upper() != \"CURRENT\":\n hosts_with_repo_version_state_not_in_current.add(host_name)\n host_names_with_version = set(host_name_to_state.keys())\n host_names_without_version = host_names - host_names_with_version\n # Logger.info(\"\\t\\tHost(s) state Summary\")\n if len(host_names) > 0:\n if len(host_names_without_version) > 0:\n Logger.error(\"{0} host(s) do not have a Host Version for Repo Version {1}.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(host_names_without_version), version_name, \", \".join(host_names_without_version)))\n\n if len(hosts_with_repo_version_state_not_in_current) > 0:\n Logger.error(\"{0} host(s) have a Host Version for Repo Version {1} but the state is not CURRENT.\\n\" \\\n \"Host(s):\\n{2}\\n\".\n format(len(hosts_with_repo_version_state_not_in_current), version_name, \", \".join(hosts_with_repo_version_state_not_in_current)))\n\n if len(host_names_without_version) == 0 and len(hosts_with_repo_version_state_not_in_current) == 0:\n Logger.info(\"Found {0} host(s) in the cluster, and all have a Host Version of CURRENT for \" \\\n \"Repo Version {1}. Things look good.\\n\".format(len(host_names), version_name))\n else:\n Logger.error(\"Make sure that all of these hosts are heartbeating, that they have the packages installed, the\\n\" \\\n \"hdp-select symlinks are correct, and that the services on these hosts have been restarated.\\n\")\n pass", "def hosts(self) -> List[str]:\n if self.head_host:\n return [self.head_host]\n else:\n return [replica.host for replica in self.pod_args['pods'][0]]", "def _get_active_hosts(self, object):\n\t\t## First, generate the negation list\n\t\tnegate_hosts = []\n\n\t\t## Hostgroups\n\t\tif object.has_key(\"hostgroup_name\"):\n\n\t\t\tfor hostgroup_name in self._get_list(object, 'hostgroup_name'):\n\t\t\t\tif hostgroup_name[0] == \"!\":\n\t\t\t\t\thostgroup_obj = self.get_hostgroup(hostgroup_name[1:])\n\t\t\t\t\tnegate_hosts.extend(self._get_list(hostgroup_obj,'members'))\n\n\t\t## Host Names\n\t\tif object.has_key(\"host_name\"):\n\t\t\tfor host_name in self._get_list(object, 'host_name'):\n\t\t\t\tif host_name[0] == \"!\":\n\t\t\t\t\tnegate_hosts.append(host_name[1:])\n\n\n\t\t## Now get hosts that are actually listed\n\t\tactive_hosts = []\n\n\t\t## Hostgroups\n\t\tif object.has_key(\"hostgroup_name\"):\n\n\t\t\tfor hostgroup_name in self._get_list(object, 'hostgroup_name'):\n\t\t\t\tif hostgroup_name[0] != \"!\":\n\t\t\t\t\tactive_hosts.extend(self._get_list(self.get_hostgroup(hostgroup_name),'members'))\n\n\t\t## Host Names\n\t\tif object.has_key(\"host_name\"):\n\t\t\tfor host_name in self._get_list(object, 'host_name'):\n\t\t\t\tif host_name[0] != \"!\":\n\t\t\t\t\tactive_hosts.append(host_name)\n\n\t\t## Combine the lists\n\t\treturn_hosts = []\n\t\tfor active_host in active_hosts:\n\t\t\tif active_host not in negate_hosts:\n\t\t\t\treturn_hosts.append(active_host)\n\n\t\treturn return_hosts", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def default_node_detector():\n ret = []\n try:\n hostname = socket.gethostname()\n ret.append(hostname)\n except socket.error:\n pass\n\n try:\n fqdn = socket.getfqdn()\n if fqdn not in ret:\n ret.append(fqdn)\n except socket.error:\n pass\n\n if any(ret):\n return ret\n else:\n return None", "def test_reports_enabled_hosts_as_up(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_service('compute', host='host2')\n hosts = self.scheduler.driver.hosts_up(self.context, 'compute')\n self.assertEqual(2, len(hosts))\n compute1.kill()\n compute2.kill()", "def enforce_hostnames(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enforce_hostnames\")", "def all_hosts(*args, **kwargs):\n return True", "def test_vms_hosts(self):\n testflow.step(\"Check if VM's started on the same host\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) ==\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_vms_hosts(self):\n testflow.step(\"Check if VM's started on the same host\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) ==\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def slave_hosts(self) -> 'List[str]':\n raise NotImplementedError", "def verify_lag_host_connectivity(self):\n # Find all LACP hosts\n for lacp_id, host_options in self.host_options.items():\n if 'lacp' in host_options:\n # Found LACP host\n for dst_id in self.host_information:\n if lacp_id == dst_id:\n continue\n # Test connectivity to any other host (might be another LAG host)\n self.check_host_connectivity_by_id(lacp_id, dst_id)", "def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]", "def test_hostmgr_failover(self, failure_tester):\n hosts1 = self._get_hosts(failure_tester)\n\n leader1 = failure_tester.fw.get_leader_info(failure_tester.hostmgr)\n assert leader1\n assert 0 != failure_tester.fw.restart(failure_tester.hostmgr, \"leader\")\n\n failure_tester.wait_for_leader_change(failure_tester.hostmgr, leader1)\n failure_tester.reset_client()\n\n # verify that we can query the new leader\n def check_hosts():\n hosts2 = self._get_hosts(failure_tester)\n return len(hosts1) == len(hosts2)\n\n failure_tester.wait_for_condition(check_hosts)", "def get_host_list():\n gparr = GpArray.initFromCatalog(dbconn.DbURL(port = MASTER_PORT), utility = True)\n segs = gparr.getDbList()\n\n master = None\n standby_host = None\n segment_host_list = []\n\n for seg in segs:\n if seg.isSegmentStandby(current_role=True):\n standby_host = seg.getSegmentHostName()\n elif not seg.isSegmentMaster(current_role=True):\n segment_host_list.append(seg.getSegmentHostName())\n elif seg.isSegmentMaster(current_role=True):\n master = seg.getSegmentHostName()\n\n #Deduplicate the hosts so that we\n #dont install multiple times on the same host\n segment_host_list = list(set(segment_host_list))\n if master in segment_host_list:\n segment_host_list.remove(master)\n\n return (standby_host, segment_host_list)" ]
[ "0.6949691", "0.6628178", "0.6534761", "0.6527784", "0.64729506", "0.64609885", "0.64203495", "0.64133173", "0.63669676", "0.6323245", "0.6245519", "0.6221742", "0.6206262", "0.62053776", "0.61899376", "0.61697066", "0.61608654", "0.6157352", "0.6157352", "0.61356586", "0.612202", "0.6103853", "0.6093331", "0.60810935", "0.60810935", "0.6043534", "0.60243857", "0.5957719", "0.5951593", "0.5944126" ]
0.8014416
0
detect whether we successfully respawned the instance, and it became a follower
def active_failover_detect_host_now_follower(self): self.check_that_instance_is_alive() lfs = self.get_log_file() if lfs.find("resilientsingle up and running as follower") >= 0: self.is_master = False return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = 600", "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = SCREEN_HEIGHT / 2\n self.angle = 0", "def is_alive(self):", "def _players_are_done(self):\n self._waiting_for_players = False\n if self.get_state_info(\"show_waiting_for\"):\n for p in self.all_players:\n p.remove_waiting_message()\n\n info = self.states[self.state]\n if \"post\" in info:\n info[\"post\"]()\n\n self._run_next_state()", "def check_finish(self):\r\n return not self.proc.is_alive()", "def is_alive(self):\n return True", "def alive(self):\n return True", "def respawn(self, xrespawn, yrespawn):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n #self.center_x = SCREEN_WIDTH / 2\n #self.center_y = SCREEN_HEIGHT / 2\n\n self.center_x = xrespawn\n self.center_y = yrespawn\n\n self.angle = 0\n\n self.cur_health = self.max_health", "def is_alive(self):\n pass", "def check( self ):\n\n if ( self.alive is not None ) \\\n and ( time.time() > ( self.alive + self.timeout ) ):\n return False\n return True", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def finished(self) -> bool:\n return self.turn == self.marbles", "def isalive():\n return 'alive'", "def was_followed(sender, instance, created, **kwargs):\n\n sendr = User.objects.get(id=instance.user_id)\n followed = User.objects.get(id=instance.followed_user_id)\n if created:\n notify.send(sender=sendr, recipient=followed, verb='followed',\n description=\"{} followed you.\".format(sendr.username))", "def _episode_success(self, observations):\n dist = self._env.get_metrics()[\"object_to_goal_distance\"]\n if (\n abs(dist) > self._success_distance\n or observations[\"gripped_object_id\"] != -1\n ):\n return False\n return True", "def is_instance_up(self):\n logging.debug(\"checking if starter instance booted: \" + str(self.basedir))\n if not self.instance.is_running():\n message = \"Starter Instance {0.name} is gone!\".format(self)\n logging.error(message)\n raise Exception(message)\n\n # if the logfile contains up and running we are fine\n lfs = self.get_log_file()\n regx = re.compile(r\"(\\w*) up and running \")\n for line in lfs.splitlines():\n match = regx.search(line)\n if match:\n groups = match.groups()\n if len(groups) == 1 and groups[0] == \"agent\":\n continue\n return True\n\n return False", "def have_i_lost(self):\n if self.life_points <= 0:\n self.running = False", "def update_alive(self):\n if (not self.proc is None) and (not self.proc.is_alive()):\n print(\"process died in error, destroying proxy object\")\n self.reset()", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def wait(self, *args):\n # TODO -- say something\n if self.finished_places == 7:\n self.finished_places += 1\n return super(Up, self).wait(*args)", "def should_keep_running(self):\n return len(self.party.active_users())", "def checkForcedPawnPromote(board, positions):\n\n #Get current position of the pawn\n posX = positions[1][0]\n posY = positions[1][1]\n\n #Get prev position of the pawn\n prevX = positions[0][0]\n prevY = positions[0][1]\n\n #Get the pawn\n item = board[posX][posY]\n\n #Forced pawn promotion\n if type(item) == Pawn.Pawn and item.checkForPromotion((prevX, prevY)) and not item.promoted:\n item.promote()\n return 1\n \n else:\n return 0", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n if self.stop_date is None:\n return True\n return bool(self.get_spawns(self.stop_date))", "def respawn_player(self):\n self.rect.x = 50\n self.rect.y = 50\n \n # Specifies the Player's spawnpoint as maze_arrangement[1][1], representing\n # the tile in the top-left corner of the maze\n self.__user_x = 1\n self.__user_y = 1", "def notify_winner(self):\n self.is_winner = True", "def status_callback():\n if args['retire_idle']:\n return False\n\n return True", "def time_server_not_responding(self):\n if not self.time_server_set:\n return False\n if self.am_leader:\n return False\n try:\n uid = self.global_time_server.get_id()\n except socket.error:\n self.global_time_server = None\n self.time_server_set = False\n print \"The time server is not responding.\"\n return True\n print \"The time server is responding!\"\n return False", "async def twitch_follower_checker_loop(self):\n\n new_last_timestamp = dt.utcnow()\n new_followers = collections.defaultdict(list) # channel user id: list of new follower usernames\n db = await self.bot.database.get_connection()\n data = await db(\"SELECT * FROM user_settings WHERE twitch_bearer_token IS NOT NULL\")\n\n # Wew let's do it\n for row in data:\n\n # See if we got their data already\n new_follower_list = new_followers.get(row['twitch_user_id'])\n if new_follower_list is None:\n new_follower_list, new_cursor_value = await self.get_new_followers(row['twitch_bearer_token'], row['twitch_user_id'], row['twitch_cursor'])\n if new_cursor_value:\n await db(\"UPDATE user_settings SET twitch_cursor=$1 WHERE twitch_user_id=$2\", new_cursor_value, row['twitch_user_id'])\n new_followers[row['twitch_user_id']] = new_follower_list\n\n # Update the follower timestamps into real timestamps\n # self.logger.info(new_follower_list)\n filtered_new_follower_list = [i for i in new_follower_list if dt.strptime(i['followed_at'], \"%Y-%m-%dT%H:%M:%SZ\") > self.last_twitch_checked]\n # self.logger.info(filtered_new_follower_list)\n\n # Send DM to the user\n if filtered_new_follower_list:\n discord_user = self.bot.get_user(row['user_id']) or await self.bot.fetch_user(row['user_id'])\n new_follower_string = ', '.join([f\"**{i['from_name']}**\" for i in filtered_new_follower_list])\n if len(new_follower_string) >= 1800:\n new_follower_string = \"\"\n try:\n await discord_user.send(f\"You have **{len(filtered_new_follower_list)}** new Twitch follower{'s' if len(filtered_new_follower_list) > 1 else ''}! {new_follower_string}\")\n except discord.HTTPException:\n pass\n\n # Update timestamp\n self.last_twitch_checked = new_last_timestamp\n await db.disconnect()" ]
[ "0.62327015", "0.61734205", "0.5913024", "0.58508074", "0.58208185", "0.58022654", "0.5687446", "0.56421936", "0.56058353", "0.55823416", "0.55794746", "0.55337363", "0.5507955", "0.5464997", "0.54603153", "0.54300016", "0.5409948", "0.540742", "0.54065454", "0.54039454", "0.5393364", "0.53923327", "0.53808933", "0.53808933", "0.53789896", "0.53744894", "0.5370398", "0.5362218", "0.53591734", "0.53560054" ]
0.6564329
0
Add starter log to allure report
def add_logfile_to_report(self): logfile = str(self.log_file) attach.file(logfile, "Starter log file", AttachmentType.TEXT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logStarted(build, step, log):", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def write_terraform_apply_log_header(self):\n with open(self.terraform_install_log, 'a+') as logfile:\n logfile.write(\"*\" * 100)\n logfile.write(\"\\n*** Terraform Apply Started\")\n logfile.write(\"\\nDateTime: %s\\n\" % datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n logfile.write(\"*\" * 100)\n self.write_debug_log(K.TERRAFORM_APPLY_STARTED)", "def create_hdf5_logger(self):\n super(Inertial_Logger,self).create_hdf5_logger()\n self.logger.add_attribute(self.trial_info_path, 'mode', 'inertial trajectory')", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def report():\n pass", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def _log_trial(self, is_add: bool):\n try:\n with open(str(self.info.trials_log_file), \"r\") as file:\n trials = util.yaml_load(file.read())\n except FileNotFoundError:\n trials = []\n\n if is_add:\n trials.append(self.trial.to_dict())\n else:\n trials[-1] = self.trial.to_dict()\n\n with open(str(self.info.trials_log_file), \"w\") as file:\n file.write(util.yaml_dump(trials))", "def do_rrt(self, arg):\n self.do_timesheet('report extend track today')", "def add_line_in_log():\n logging.info(' ' + '-' * 60 + '\\n')", "def on_start(self):\r\n self.log()", "def report_trial(self):\n pass", "def write_terraform_init_log(self, response):\n head_msg = \"Terraform Init is done\"\n with open(self.terraform_install_log, 'a+') as logfile:\n logfile.write(self._write_header(head_msg))\n logfile.write(response[1])\n\n self.write_debug_log(K.TERRAFORM_INIT_COMPLETED)", "def start_confluence_log_file(S,cfg,bands):\n # RS: making this file store & confluence-markdown-format your data \n if bands is None:\n bands = S.config.get(\"init\").get(\"bands\")\n confluence_fp = os.path.join(S.output_dir,f\"{S.name}_optimization_summary.txt\")\n with open(confluence_fp,'a') as cfile:\n dev_name, crate_and_slot, start_date = get_config_vals(S,cfg)\n cfile.write(f\"h4. *{dev_name} {crate_and_slot}*\\n\")\n #cfile.write(f\"optimization with `{__file__}\")\n cfile.write(\"* Ran {{\" + f\"{' '.join(sys.argv)}\" +\"}}\\n\")\n band_str = ','.join([str(band) for band in bands])\n cfile.write(f\"* Plots of bands {band_str} taken {start_date} in \" +\\\n \"{{\" +f\"{S.plot_dir}\" +\"}}\\n\")\n cfile.write(\"* resultant tunefile: **TODO**\\n\\n\")\n cfile.write(\"|| ||Indiv.||-||-||-||-||togeth||-||\\n\")\n table_top=\"||SMuRF band||uc att (.5dBs)||tone power (3dB steps)||\"+\\\n \"dc att (.5dBs)||Num. Channels||Med. White Noise (pA/rtHz)||\"+\\\n \"Num. Channels||Med. White Noise (pA/rtHz)||\\n\"\n cfile.write(table_top)\n logger.info(f\"made new confluence summary at:\\n{confluence_fp}\")\n return confluence_fp", "def pytest_runtest_logreport(report):\n # ignore setup and teardown reporting of tests that are run.\n # keep skipped items...\n outcome = report.outcome\n if outcome != 'skipped' and report.when != \"call\":\n return\n # print(\"LOGREPORT {} --> {}\".format(report.sco_bla, outcome))\n tst_lst.append(report.sco_bla + (outcome, ))", "def log(self):\n f = open(self.log_dir + 'parsed.log', 'a')\n try:\n # Write: local time | CurrentCost \"time\" | id | temp/C | power/W \n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" \n % (self.ts('now'), self.ts('cc'), self.id, self.temp, self.watts))\n finally:\n f.close()", "def report(self, short=True):\n self.logger.finish()\n print(json.dumps(to_json(self.trial, short), indent=2))\n return self", "def InsertLog():", "def _log_results(self, first_time=False):\n\n if not first_time:\n print(self.READINGS_PRINT_TEMPLATE % self.get_sensors_data())\n\n self._log_timer = self._start_timer(Config.LOG_INTERVAL, self._log_results)", "def write_terraform_plan_log(self, response):\n head_msg = \"Terraform Plan is done\"\n with open(self.terraform_install_log, 'a+') as logfile:\n logfile.write(self._write_header(head_msg))\n logfile.write(response[1])\n\n self.write_debug_log(K.TERRAFORM_PLAN_COMPLETED)", "def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r", "def setup_audit_log(cfg=CFG):\n if not runez.DRYRUN and not runez.log.file_handler:\n runez.log.setup(\n file_format=\"%(asctime)s %(timezone)s [%(process)d] %(context)s%(levelname)s - %(message)s\",\n file_level=logging.DEBUG,\n file_location=cfg.meta.full_path(\"audit.log\"),\n greetings=\":: {argv}\",\n rotate=\"size:500k\",\n rotate_count=1,\n )", "def recordLog(project, status, memo):\n path = getPath(project)\n log = open(path, 'a')\n writer = csv.writer(log, lineterminator='\\n')\n writer.writerow((time.time(), status, memo))\n log.close()\n if status == 'a':\n print(\"Tracking your time on \" + project)\n if status == 's':\n print(\"Tracking suspended on \" + project)\n if status == 't':\n print(\"Time shifted on \" + project)\n if not path == '.sourglass':\n store = open(os.path.join(basepath, 'last'), 'w')\n store.write(project)\n store.close", "def project_report(request, **kwargs):\n\n #Creating the command for the logs \n print(\"in the project_report ...........................................\")\n outputStr = \"Updating the logs...\"\n #Making the output\n context = {\n \"page_title\": _(\"Test Details\"),\n \"test_lists\": 'report_list', #tests_list\n \"log_data\": outputStr\n }\n return render(request, 'rally_dashboard/events/test_logs.html', context)", "def setup_log(self):\n self.logger, _ = get_logger(\"datatransform\")", "def extra_log(self, string):\n if hasattr(self.parent, \"log\"):\n self.parent.log += f\"\\r\\n[{time.process_time()}] \"\n self.parent.log += string + \"\\r\\n\"", "def add_log(self):\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {\"_id\": utils.get_iuid(),\n \"doctype\": constants.DOCTYPE_LOG,\n \"docid\": self.doc[\"_id\"],\n \"diff\": diff,\n \"timestamp\": utils.get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, \"current_user\") and flask.g.current_user:\n entry[\"username\"] = flask.g.current_user[\"username\"]\n else:\n entry[\"username\"] = None\n if flask.has_request_context():\n entry[\"remote_addr\"] = str(flask.request.remote_addr)\n entry[\"user_agent\"] = str(flask.request.user_agent)\n else:\n entry[\"remote_addr\"] = None\n entry[\"user_agent\"] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)", "def homepage_log():\n\n return render_template('home_log.html')", "def demo_log(self):\n self.logger.debug('This is a debug')\n self.logger.debug(self.name)\n self.logger.debug(self.doc)", "def __init__(self, log_dir):\n self.writer = SummaryWriter(log_dir)" ]
[ "0.6069437", "0.5960698", "0.59574604", "0.5930421", "0.5885275", "0.5810344", "0.57965165", "0.5733731", "0.5677679", "0.567086", "0.56483895", "0.5602861", "0.5598323", "0.5581187", "0.5545258", "0.5526509", "0.55119", "0.5511117", "0.5473258", "0.5438435", "0.54359597", "0.5406004", "0.54059684", "0.5400562", "0.53897744", "0.53851074", "0.5377885", "0.5372496", "0.53715295", "0.5361603" ]
0.6250697
0
get HTTP protocol for this starter(http/https)
def get_http_protocol(self): if self.cfg.ssl: return "https" else: return "http"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_protocol():\n if https():\n protocol = 'https'\n else:\n protocol = 'http'\n return protocol", "def get_protocol(self):\n if self.ssl:\n return \"https\"\n else:\n return \"http\"", "def protocol(self):\n return 'https' if self.allow_https and self.is_secure else 'http'", "def getProtocol(self) -> str:\n ...", "def app_protocol(self):\n if settings.INAPP_REQUIRE_HTTPS:\n return 'https'\n else:\n return 'https' if self.is_https else 'http'", "def protocol(self, code: str) -> str:\n return 'https'", "def scheme(self):\n return self.use_ssl and \"https\" or \"http\"", "def protocol(self):\n return helpers.get_protocol()", "def protocol(self) -> str:\n return pulumi.get(self, \"protocol\")", "def protocol(request):\n return request.param", "def protocol(self, code):\n return self.url.scheme", "def protocol(self):\n return self._host[CONF_PROTOCOL]", "def protocol(self):\n return self._config[\"security.protocol\"]", "def protocol(self):\n\n if '://' in self.host:\n scheme, host = self.host.split('://', 1)\n return scheme\n elif self.port == 21:\n return 'ftp'\n elif self.port == 22:\n return 'sftp'\n elif self.port == 990:\n return 'ftps'\n else:\n # Uncertain, assume FTP.\n return 'ftp'", "def get_protocol(url):\n result = re.search(r\"^https?://\", url)\n return result.group(0) if result else None", "def protocol(self) -> Optional[pulumi.Input['TargetServerProtocol']]:\n return pulumi.get(self, \"protocol\")", "def getProtocol(self, _):\r\n return self._protocol", "def protocol(self):\n return self._protocol", "def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()", "def get_protocol(binding_id):\n binding_to_protocol = {VID_TAXII_HTTP_10: \"http\", VID_TAXII_HTTPS_10: \"https\"}\n try:\n return binding_to_protocol[binding_id]\n except:\n raise ValueError(\"Unknown Protocol Binding ID %s\" % binding_id)", "def protocol(self) -> str:\n return self.__parameters.protocol", "def query_scheme(self):\n\n return 'https'", "def proxy_protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"proxy_protocol\")", "def proxy_protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"proxy_protocol\")", "def protocol(self) -> Optional[pulumi.Input[Union[str, 'Protocol']]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")", "def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")" ]
[ "0.85699964", "0.8152591", "0.7916452", "0.74209046", "0.7311242", "0.7305693", "0.7218571", "0.7194539", "0.70896435", "0.69592017", "0.68593144", "0.68068993", "0.6778132", "0.6735765", "0.67253834", "0.65918314", "0.6591383", "0.6480884", "0.6470097", "0.64394146", "0.64071524", "0.6391711", "0.63792455", "0.63792455", "0.63594115", "0.6356992", "0.6356992", "0.6356992", "0.6356992", "0.6356992" ]
0.8308429
1
Check that starter instance is alive
def check_that_instance_is_alive(self): if not self.instance.is_running(): raise Exception(f"Starter instance is not running. Base directory: {str(self.basedir)}") if self.instance.status() == psutil.STATUS_ZOMBIE: raise Exception(f"Starter instance is a zombie. Base directory: {str(self.basedir)}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_alive(self):\n pass", "def is_alive(self):\n return True", "def is_alive(self):", "def alive(self):\n return True", "def is_instance_up(self):\n logging.debug(\"checking if starter instance booted: \" + str(self.basedir))\n if not self.instance.is_running():\n message = \"Starter Instance {0.name} is gone!\".format(self)\n logging.error(message)\n raise Exception(message)\n\n # if the logfile contains up and running we are fine\n lfs = self.get_log_file()\n regx = re.compile(r\"(\\w*) up and running \")\n for line in lfs.splitlines():\n match = regx.search(line)\n if match:\n groups = match.groups()\n if len(groups) == 1 and groups[0] == \"agent\":\n continue\n return True\n\n return False", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def is_instance_running(self):\n try:\n self.instance.wait(timeout=1)\n except psutil.TimeoutExpired:\n pass\n return self.instance.is_running()", "def is_alive(self) -> bool:\n self.check_is_alive()\n return self.__is_alive", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self, site):\n try:\n return requests.get(site).status_code == 200\n except Exception:\n pass", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def is_alive(self):\n if self.health > 0:\n return True\n return False", "def is_alive(self):\n return self.alive", "def is_alive(self):\n return self.alive", "def KeepAlive(self) -> bool:", "def IsAlive(self, *args, **kwargs):\n pass", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}/v1/kv/health'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "def is_alive(self):\n try:\n return self.get_life() > 0\n except KeyError:\n return True", "def isAlive(self):\n return self.is_alive()", "def is_alive(self):\n return self._is_alive", "def is_alive(self) -> bool:\n return self._main_thread.is_alive()", "def service( self ):\n\n self.alive = time.time()", "def alive(self):\n return self._thread is not None", "def isAlive(self):\n raise NotImplementedError", "def is_running(self) -> bool:\n return False", "def alive(p):\n return p.is_alive()", "def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)", "def isalive():\n return 'alive'" ]
[ "0.77166504", "0.7615171", "0.74846673", "0.7292565", "0.7082333", "0.69093466", "0.69083273", "0.687419", "0.6822564", "0.66718936", "0.66718936", "0.66681343", "0.6643307", "0.6634902", "0.6621218", "0.6621218", "0.66178095", "0.66134834", "0.6612024", "0.6602468", "0.6561023", "0.6549695", "0.6541095", "0.65271956", "0.65264034", "0.65168935", "0.6505408", "0.6487338", "0.6476037", "0.6471301" ]
0.820519
0
check whether substring is present in the starter log
def check_that_starter_log_contains(self, substring: str): if self.count_occurances_in_starter_log(substring) > 0: return else: raise Exception( f"Expected to find the following string: {substring}\n in this log file:\n{str(self.log_file)}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_contains(self, s: str) -> bool:\n return len(list(filter(lambda str: s in str, self.logs))) > 0", "def hasSubstring(self, s):\n node, off = self.followPath(s)\n return node is not None", "def _is_substring(s1, s2):\n\treturn s1.find(s2) != -1", "def match_substring(self, str):\n if self.repo_relative_path.find(str) >= 0:\n return True\n\n if self.uuid:\n if (\"uuid://%s%s\" % (self.uuid, self.repo_relative_path)).find(str) >= 0:\n return True\n\n if self.url:\n if (self.url + self.repo_relative_path).find(str) >= 0:\n return True\n\n return False", "def issubstring(substring, string):\n return substring in string", "def search(self):\n if self.substring in [None, \"\"]:\n print(\"Invalid Value For Substring\")\n elif self.string in [None, \"\"]:\n print(\"Invalid Value For String\")\n elif len(self.substring) > len(self.string):\n print(\"Length of Substring Less Than String\")\n else:\n posn = self.comparison()\n if posn == -1:\n print(\" Substring Not Found :: Search Failed\")\n else:\n print(\" Substring Found at Position --> \", posn+1)", "def substring_match(recipe, word):\n if names_only:\n line = recipe.name\n else:\n line = str(recipe)\n\n if not case:\n word = word.lower()\n line = line.lower()\n\n return line.find(word) != -1", "def isSubString(string1, string2, minMatchLength = 0):\n return (True)", "def list_has_substring(substring, l):\n found_substring = False\n for item in l:\n if substring in item:\n found_substring = True\n break\n\n return found_substring", "def isstringIs_substring(str1, str2):\r\n if str1 in str2:\r\n return True\r\n else:\r\n False", "def starts_with(text, substring):\n assert text.startswith(substring), \"%r doesn't start with %r\" % (text,\n substring)", "def dz_is_in(dz_string, substring):\n if substring not in dz_string:\n return 0\n else:\n return 1", "def count_occurances_in_starter_log(self, substring: str):\n number_of_occurances = self.get_log_file().count(substring)\n return number_of_occurances", "def check_prefix(custom_str: str) -> bool:\r\n\r\n return len(custom_str) == 0", "def is_in_log(self, regex, start=0):\n\n ex = re.compile(regex)\n for l in self.logs[start:]:\n if ex.search(l):\n logging.debug(\"Found '%s' in logs\", regex)\n return l\n\n logging.debug(\"Did not find '%s' in logs\", regex)\n return None", "def is_junk(substring):\n return len(substring.strip(' \\t\\0')) == 0 and len(substring) > 10", "def contains(strn, substr):\n try:\n strn.index(substr)\n return True\n except ValueError:\n return False", "def check_for_strings(text, strings):\n for string in strings:\n if text.find(string) >= 0:\n return True\n return False", "def val_starts_with(base_string, strings):\n for the_string in strings:\n if base_string.startswith(the_string):\n return True", "def dzs_are_in(dz_string, substring1, substring2):\n if substring1 not in dz_string:\n return 0\n elif substring2 not in dz_string:\n return 0\n else:\n return 1", "def isSubStringNoCase(string1, string2, minMatchLength = 0):\n return (True)", "def check_string(str_one, str_two):\n str_one = str_one.lower()\n str_two = str_two.lower()\n # print(str_one,str_two)\n if len(str_two) < len(str_one):\n return bool(re.search(str_two+'$',str_one))\n else:\n return bool(re.search(str_one+'$',str_two))", "def startswith(value, s):\n\n if not value: return False\n return value.find(s) == 0", "def has_prefix(cls, string1, string2):\n return len(cls.get_prefix(string1, string2)) > 0", "def test_get_substrings_standard(self):\n ans = self.sf.get_substrings()\n\n for substr in ans:\n self.assertTrue(substr.freq >= 2)\n self.assertTrue(substr.subr_saving() > 0)", "def has_substring(pattern, text):\n M = len(pattern)\n N = len(text)\n\n # create the LPS\n lps = [0] * M\n j = 0\n\n compute_lsp(pattern, M, lps)\n\n i = 0\n final_index = 0\n\n while (N - i) >= (M - j):\n if pattern[j] == text[i]:\n i += 1\n j += 1\n if j == M:\n # on Last index\n final_index = i - j\n j = lps[j - 1]\n\n elif i < N and pattern[j] != text[i]:\n\n if j != 0:\n j = lps[j - 1]\n else:\n i += 1\n\n return final_index", "def check(self, s: str, mem: dict):\n dp = [False for _ in range(len(s)+1)]\n dp[0] = True\n for i in range(1, len(s)+1):\n for j in range(i):\n if dp[j] and s[j:i] in mem:\n dp[i] = True\n return dp[-1]", "def _check_logic_syntax(string):\n return logExp.matches(string)", "def match(self, head_str):\n\t\tif \"masscan\" in head_str.lower():\n\t\t\treturn True\n\t\treturn False", "def find_str_in_file(f: Path, s: str) -> bool:\n return f.read_text(encoding='utf-8').find(s) != -1" ]
[ "0.68737906", "0.6771097", "0.6496623", "0.6410618", "0.63869303", "0.63129365", "0.61710656", "0.61576027", "0.6072304", "0.6054336", "0.6003412", "0.5982007", "0.596408", "0.576148", "0.5692559", "0.5674016", "0.5639905", "0.563524", "0.56121904", "0.56079257", "0.55621016", "0.5531923", "0.5531761", "0.5511909", "0.5508653", "0.5506425", "0.5497619", "0.54950774", "0.54804045", "0.5453835" ]
0.82817847
0
count occurrences of a substring in the starter log
def count_occurances_in_starter_log(self, substring: str): number_of_occurances = self.get_log_file().count(substring) return number_of_occurances
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_substring(string, sub_string):\n return string.count(sub_string)", "def count_sub(sub, s):\n count = 0\n for i in range(len(s) - len(sub) + 1):\n if s[i:i + len(sub)] == sub:\n count += 1\n return count", "def recCountString():\r\n target = raw_input(\"Enter target string: \")\r\n key = raw_input(\"Enter key string: \")\r\n matches = subStringMatchExact(target,key)\r\n print \"match(es) =\",matches", "def custom_count(string1, search_string):\n count = 0\n for index in range(0, len(string1)):\n phrase = string1[index:index + len(search_string)]\n count += (phrase == search_string)\n return count", "def string_freq(self, query_str):\n found = self.search_prefix(query_str)\n # if query is found, go to that node\n if found:\n node = self.saved_node\n # extract relevant count that had been performed during insertion of words and traversal of nodes\n count = node.same_prefix_count\n else:\n return 0\n return count", "def countSubStringMatch(target,key):\n count = 0\n for i in range(0,len(target)-len(key)):\n if target[i:i+len(key)] == key:\n count += 1\n return count", "def count(sub_stng, stng):\n instance_count = 0\n start_index = 0\n while stng.find(sub_stng, start_index) != -1:\n instance_count += 1\n start_index = stng.find(sub_stng, start_index) + 1\n\n return instance_count", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def check_that_starter_log_contains(self, substring: str):\n if self.count_occurances_in_starter_log(substring) > 0:\n return\n else:\n raise Exception(\n f\"Expected to find the following string: {substring}\\n in this log file:\\n{str(self.log_file)}\"\n )", "def CountAppStrMatch(pattern, text, d, debug = False):\n\tcount = 0\n\tif debug:\n\t\tprint len(text)-len(pattern)+1\n\tfor i in range(len(text)-len(pattern)+1):\n\t\tif debug:\n\t\t\tprint text[i:i+len(pattern)]\n\t\t\tprint HammingDist(text[i:i+len(pattern)], pattern)\n\t\tif HammingDist(text[i:i+len(pattern)], pattern) <= d:\n\t\t\tcount += 1\n\treturn count", "def countSubStringMatchRecursive(target,key,count):\r\n print target\r\n index = find(target,key)\r\n if index < 0 :\r\n return 0\r\n else :\r\n count += countSubStringMatchRecursive(target[index+len(key):len(target)+1],key,count)\r\n count += 1\r\n print count\r\n return count", "def numberOfSubstrings(self, s: str) -> int:\n i = 0\n res = 0\n d = {c:0 for c in 'abc'}\n \n for j, val in enumerate(s):\n d[val] += 1\n while all(d.values()):\n d[s[i]] -= 1\n i += 1\n res += i\n \n return res", "def occurrences(substring, string, sensitive=True):\n pos = -1\n o = []\n if not sensitive:\n substring = substring.lower()\n string = string.lower()\n while True:\n pos = string.find(substring, pos + 1)\n if pos == -1:\n return o\n else:\n o.append([pos, pos + len(substring)])", "def count_request_contains_str(sting_input):\n request_list = var_cache['local'].get_request_list()\n match_count = 0\n for url in request_list:\n if url.find(sting_input) > -1:\n match_count += 1\n return match_count", "def count(self):\n string_count = 0\n string = ['abc', 'xyz', 'aba', '1221']\n for elements in string:\n length = len(elements) \n if length >= 2:\n if elements[0] == elements[-1]: \n string_count +=1\n print(\"String count :\", string_count)", "def find_substring(string):\n sub_s = \"\"\n if len(string) == 1:\n print(1)\n for k in range(0, len(string) // 2):\n sub_s = sub_s[:k] + string[k]\n pos = 0\n next_pos = string.find(sub_s, pos + k + 1)\n count = 1\n while next_pos != -1 and next_pos == pos + k + 1:\n count += 1\n pos += k + 1\n next_pos = string.find(sub_s, pos + k + 1)\n if next_pos == -1 and pos == len(string) - k - 1:\n return count\n return 0", "def count_sub(dna, sub):\n sub_len = len(sub)\n dna_len = len(dna)\n count = 0\n\n # iterate over each char of the dna string\n for start in range(dna_len):\n # if we find a match for our substring - reset the current counter\n if dna[start:start+sub_len] == sub:\n tmp_count = 0\n # count how many consecutive occurrences we find\n while dna[start:start+sub_len] == sub:\n tmp_count += 1\n start += sub_len\n # update counter if we find a bigger number of consecutive occurrences\n if tmp_count > count:\n count = tmp_count\n # return the max number of consecutive occurrences\n return count", "def count_hi(str):\n return str.count(\"hi\")", "def CountOccurrences(pattern, bwt, starts, occ_counts_before, suffix_array):\r\n # 0$ 1A 2T 3G 4C\r\n letters = {'$':0, 'A':1, 'T':2, 'G':3, 'C':4}\r\n top=0\r\n bottom = len(bwt)-1\r\n matches_index = []\r\n while True:\r\n if len(pattern)!=0:\r\n char = pattern[-1]\r\n j = letters[char]\r\n pattern = pattern[:-1]\r\n found=False\r\n for i in range(top,bottom+1):\r\n if bwt[i] == char:\r\n top = occ_counts_before[i][j] + starts[char] -1\r\n bottom = occ_counts_before[bottom][j] + starts[char] -1\r\n found = True\r\n break\r\n\r\n if found==False:\r\n return matches_index\r\n # when pattern is finished proccessing\r\n else:\r\n for i in range(top, bottom+1):\r\n matches_index.append(suffix_array[i])\r\n return matches_index\r\n\r\n return matches_index", "def prefix_freq(self, query_str):\n # if query input is empty, return all strings\n if query_str == '':\n return len(self.text)\n found = self.search_prefix(query_str)\n # if query is found, go to that node\n if found:\n node = self.saved_node\n # extract relevant count that had been performed during insertion of words and traversal of nodes\n count = node.prefix_count\n else:\n return 0\n return count", "def count(self, word):\n pass", "def kncount(self, string, prefixes=None): ###\n if prefixes == None:\n prefixes = list(self.dist(\"\").keys())\n return sum([self.count(p + string) >= 1 for p in prefixes])", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter", "def count_segments(s):\n s = s.strip().split()\n return len(s)", "def _substring_occurrences(\n cls, in_str: str, substrings: Iterable[str]\n ) -> Dict[str, List[int]]:\n occurrences = {}\n for substring in substrings:\n occurrences[substring] = list(findall(substring, in_str))\n return occurrences", "def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)", "def counts(self, regex = \"\\w+\"): \n tokenizer = RegexpTokenizer(r'{}'.format(regex))\n count = []\n for i in tqdm(self.text):\n count.append(len(tokenizer.tokenize(i)))\n return count", "def match_specific_string(input_data: list, keyword: str) -> int:\n number_of_words = 0\n for element in input_data:\n number_of_words += len(re.findall(keyword, element, re.IGNORECASE))\n return number_of_words" ]
[ "0.69256353", "0.6731468", "0.6658297", "0.6571659", "0.6564363", "0.6540324", "0.6493924", "0.6485271", "0.64711386", "0.6334691", "0.63335073", "0.6273863", "0.626735", "0.6240192", "0.62162554", "0.6199468", "0.6196827", "0.61422545", "0.6131402", "0.6095801", "0.6036622", "0.6029487", "0.59793204", "0.5976236", "0.5971932", "0.5971555", "0.59228486", "0.5888577", "0.58613247", "0.58471304" ]
0.8001489
0
fake run starter method
def run_starter(self, expect_to_fail=False):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startTestRun(self):", "def test_get_run(self):\n pass", "def run(_):\n pass", "def Run():\r\n pass", "def runtest(self):", "def run_experiment():\n pass", "def run():\n main()", "def runTests(self):\n \n pass", "def test_run_started(self):", "def run_test(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def startup_run(self):\n raise NotImplementedError # implement in subclass", "def main():\n run_test_all()", "def custom():\n run(\"example\")", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def run(self, test, env):\n\n raise NotImplementedError", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass" ]
[ "0.74126184", "0.73602825", "0.7261935", "0.72602695", "0.72600466", "0.7242399", "0.72393954", "0.7110345", "0.71020657", "0.7089417", "0.70837325", "0.7037372", "0.7013169", "0.70076424", "0.6999183", "0.6999183", "0.6967566", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449", "0.6933449" ]
0.8099833
0
Test case for basketballteams_get
def test_basketballteams_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basketballteams_id_get(self):\n pass", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def test_teams_get_teams_v2(self):\n pass", "def test_retrieve_team(self):\n pass", "def test_teams_get_teams_v1(self):\n pass", "def test_teams_get_team_v1(self):\n pass", "def get_teams():", "def test_teams_list(self):\n pass", "def test_teams_read(self):\n pass", "def test_workflows_id_team_get(self):\n pass", "def test_teams_get_users_teams_v2(self):\n pass", "def test_data_source_soaps_id_team_get(self):\n pass", "def test_get_team_history(self):\n pass", "def test_gridironfootballplayers_get(self):\n pass", "def test_get_teams(self):\n owner2 = AnotherUserFactory(email_confirmed=True)\n owner3 = AnotherUserFactory(username='team owner 3', email='[email protected]', email_confirmed=True,)\n TeamFactory(owner=owner2, name='second team')\n TeamFactory(owner=owner3, name='third team')\n\n usual_user = UserFactory(\n username='usualuser',\n email='[email protected]',\n email_confirmed=True,\n )\n token = Token.objects.get(user=usual_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n response = self.client.get(reverse('api:teams-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 3)", "def test_get_list_teams(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def test_teams_get_users_teams_v1(self):\n pass", "def test_get_individual_team(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams/1')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def test_get_all_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def test_get_open_requests_by_team(self):\n pass", "def test_teams_create(self):\n pass", "def test_create_team(self):\n pass", "def test_update_team(self):\n pass", "def determine_basketball_outcome_from_api(market, params, enp_id):\n\n n_bet = 1\n outcome = None\n if market == BasketballMarkets.FULL_TIME_POINT_SPREAD:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n\n if selection == BasketballSelections.HOME_TEAM:\n hc_score = score_home + handicap\n if hc_score == score_away:\n outcome = 0\n elif hc_score > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n hc_score = score_away + handicap\n if hc_score == score_home:\n outcome = 0\n elif hc_score > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTPS bet should be ONE or TWO')\n\n elif market == BasketballMarkets.FULL_TIME_MONEYLINE:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n if selection == BasketballSelections.HOME_TEAM:\n if score_home == score_away:\n outcome = 0\n elif score_home > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n if score_away == score_home:\n outcome = 0\n elif score_away > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('selection should be ONE or TWO')\n elif market == BasketballMarkets.FULL_TIME_TOTAL_POINTS:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n score_total = score_home + score_away\n\n if selection == BasketballSelections.OVER:\n if score_total == handicap:\n outcome = 0\n elif score_total > handicap:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.UNDER:\n if score_total == handicap:\n outcome = 0\n elif score_total < handicap:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTTP bet should be OVER or UNDER')\n else:\n raise ValueError('implement more markets')\n\n return outcome, n_bet", "def get_people(team):", "def test_assign_managing_team(self):\n pass", "def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)", "def test_get_player_battles(self):\n pass", "def test_teams_get_workgroups_v2(self):\n pass" ]
[ "0.8449335", "0.84178495", "0.84178495", "0.81079006", "0.81024987", "0.7863653", "0.7826981", "0.7781774", "0.7720299", "0.7501402", "0.7489745", "0.7379568", "0.7356525", "0.72275877", "0.7195928", "0.71437454", "0.71002096", "0.70476884", "0.69918925", "0.6990166", "0.6847316", "0.66865164", "0.6636804", "0.66096234", "0.6602237", "0.6601782", "0.6584438", "0.65309304", "0.64946145", "0.6462027" ]
0.93158627
0
Test case for basketballteams_id_get
def test_basketballteams_id_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basketballteams_get(self):\n pass", "def test_workflows_id_team_get(self):\n pass", "def test_gridironfootballplayers_id_get(self):\n pass", "def test_data_source_soaps_id_team_get(self):\n pass", "def test_brains_id_get(self):\n pass", "def test_cyclingleagues_id_get(self):\n pass", "def test_retrieve_team(self):\n pass", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def test_teams_get_team_v1(self):\n pass", "def test_plays_id_get(self):\n pass", "def test_teams_get_teams_v2(self):\n pass", "def test_workflows_id_get(self):\n pass", "def test_teams_get_teams_v1(self):\n pass", "def test_racetracks_id_get(self):\n pass", "def test_get_individual_team(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams/1')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)", "def test_sport_id(self):\n result = self.test_client.sport_id\n\n assert result == \"1\"", "def handballteams_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=HANDBALLTEAM_TYPE_URI,\n rdf_type_name=HANDBALLTEAM_TYPE_NAME, \n kls=HandballTeam)", "def get_teams():", "def test_poets_id_get(self):\n pass", "def get_offense_team_id(self):\n pass", "def test_workflows_id_exists_get(self):\n pass", "async def getch_team(self, id: str):\n return self.get_team(id) or await self.fetch_team(id)", "def test_get_one_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n teammate = User.create(name='teammate', email='[email protected]',\n owned_teams=[team.uid])\n teammate.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users/{}'.format(team.uid, teammate.uid),\n headers=self.login_headers(user),\n )\n response_dict = json.loads(response.body)\n self.assertEqual(response_dict['uid'], teammate.uid)", "def test_solareclipses_id_get(self):\n pass", "def test_teams_get_users_teams_v2(self):\n pass", "def test_teams_list(self):\n pass", "def test_groups_group_id_get(self):\n pass", "def test_user_id_get(self):\n pass", "def test_team_template_folders_id_team_get(self):\n pass" ]
[ "0.7799063", "0.7685305", "0.7607886", "0.74427456", "0.72060305", "0.6990276", "0.6840412", "0.68341905", "0.68341905", "0.6772549", "0.6731343", "0.67035055", "0.6620671", "0.66105074", "0.6533537", "0.6475667", "0.645218", "0.6435485", "0.63834304", "0.6345525", "0.630249", "0.6249877", "0.6200476", "0.618257", "0.61821526", "0.6166438", "0.61655784", "0.6103684", "0.6068002", "0.60654026" ]
0.939899
0
Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state.
def update(self, key): return self.state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_frame(self, key, ranges=None):", "def _get_frame(self, key):\n layout_frame = self.layout.clone(shared_data=False)\n keyisint = isinstance(key, int)\n if not isinstance(key, tuple): key = (key,)\n nthkey_fn = lambda x: zip(tuple(x.name for x in x.kdims),\n list(x.data.keys())[min([key[0], len(x)-1])])\n if key == self.current_key:\n return self.current_frame\n else:\n self.current_key = key\n\n for path, item in self.layout.items():\n if self.dynamic == 'open':\n if keyisint:\n counts = item.traverse(lambda x: x.counter, (DynamicMap,))\n if key[0] >= counts[0]:\n item.traverse(lambda x: next(x), (DynamicMap,))\n dim_keys = item.traverse(nthkey_fn, (DynamicMap,))[0]\n else:\n dim_keys = zip([d.name for d in self.dimensions\n if d in item.dimensions('key')], key)\n self.current_key = tuple(k[1] for k in dim_keys)\n elif item.traverse(lambda x: x, [DynamicMap]):\n with dimensionless_cache(item, not self._force or not self.drawn):\n key, frame = util.get_dynamic_item(item, self.dimensions, key)\n layout_frame[path] = frame\n continue\n elif self.uniform:\n dim_keys = zip([d.name for d in self.dimensions\n if d in item.dimensions('key')], key)\n else:\n dim_keys = item.traverse(nthkey_fn, (HoloMap,))[0]\n if dim_keys:\n obj = item.select((HoloMap,), **dict(dim_keys))\n if isinstance(obj, HoloMap) and len(obj) == 0:\n continue\n else:\n layout_frame[path] = obj\n else:\n layout_frame[path] = item\n traverse_setter(self, '_force', False)\n\n self.current_frame = layout_frame\n return layout_frame", "def update_frame(self, key, ranges=None, plot=None):\n element = self._get_frame(key)\n source = self.handles['source']\n data, mapping = self.get_data(element, ranges)\n self._update_datasource(source, data)", "def __getitem__(self, frame):\n if not self.dynamic == 'open' and isinstance(frame, int) and frame > len(self):\n self.warning(\"Showing last frame available: %d\" % len(self))\n if not self.drawn: self.handles['fig'] = self.initialize_plot()\n if not self.dynamic == 'open' and not isinstance(frame, tuple):\n frame = self.keys[frame]\n self.update_frame(frame)\n return self.state", "def update(self, key, val):\n state_dict = self.todict()\n assert key in state_dict\n state_dict[key] = val\n return self.state_factory.build(state_dict)", "def update_key(self):\n self.__prev_key = self.__new_key", "def __setitem__(self, key, value):\n if (key in ['__id', '__src_id', '__dst_id']):\n raise KeyError('Cannot modify column %s. Changing __id column will\\\n change the graph structure' % key)\n else:\n self.__is_dirty__ = True\n super(GFrame, self).__setitem__(key, value)", "def __setitem__(self, key: tuple, value: float):\n s, a = key\n if not isinstance(s, self.observation_space) or not isinstance(a, self.action_space):\n raise KeyError\n self.store.setdefault(s, dict())[a] = value", "def send_state(self, key=None):\n state = self.get_state(key=key)\n if len(state) > 0:\n if self._property_lock: # we need to keep this dict up to date with the front-end values\n for name, value in state.items():\n if name in self._property_lock:\n self._property_lock[name] = value\n state, buffer_paths, buffers = _remove_buffers(state)\n msg = {'method': 'update', 'state': state, 'buffer_paths': buffer_paths}\n self._send(msg, buffers=buffers)", "def _set_tuple_structure(self, key):\n if len(key) == 2:\n self.ks = list(np.array(key[1]))\n self.set_neighs(key[0])", "def __setitem__(self, *args):\n return _osgAnimation.vectorMatrixKeyframe___setitem__(self, *args)", "def __setitem__(self, key: Tuple, value: np.array) -> np.array:\n\n if self.axis_order == AxisOrder.XYZ:\n key = (key[2], key[1], key[0])\n\n # Set experiment if unset:\n if self._exp is None:\n self._populate_exp()\n\n # Set cframe if unset:\n if self._coord_frame is None:\n self._populate_coord_frame()\n\n _normalize_units = (1, 1, 1)\n if isinstance(key[-1], str) and len(key) == 4:\n if key[-1] != self._coord_frame.voxel_unit:\n raise NotImplementedError(\n \"Can only reference voxels in native size format which is \"\n f\"{self._coord_frame.voxel_unit} for this dataset.\"\n )\n _normalize_units = self.voxel_size\n\n if isinstance(key[2], int):\n xs = (key[2], key[2] + 1)\n else:\n start = key[2].start if key[2].start else 0\n stop = key[2].stop if key[2].stop else self.shape[0]\n\n start = start / _normalize_units[0]\n stop = stop / _normalize_units[0]\n\n xs = (int(start), int(stop))\n\n if isinstance(key[1], int):\n ys = (key[1], key[1] + 1)\n else:\n start = key[1].start if key[1].start else 0\n stop = key[1].stop if key[1].stop else self.shape[1]\n\n start = start / _normalize_units[1]\n stop = stop / _normalize_units[1]\n\n ys = (int(start), int(stop))\n\n if isinstance(key[0], int):\n zs = (key[0], key[0] + 1)\n else:\n start = key[0].start if key[0].start else 0\n stop = key[0].stop if key[0].stop else self.shape[2]\n\n start = start / _normalize_units[2]\n stop = stop / _normalize_units[2]\n\n zs = (int(start), int(stop))\n\n if len(value.shape) == 2:\n # TODO: Support other 2D shapes as well\n value = np.array([value])\n\n cutout = self.volume_provider.create_cutout(\n self._channel, self.resolution, xs, ys, zs, value\n )", "def _set_tuple_tuple_structure(self, key):\n if len(key) == 2:\n ks = [key[1]] if type(key[1]) == int else key[1]\n self.ks = list(np.array([ks]).ravel())\n self._set_tuple_only_structure(key[0])", "def __setitem__(self, key, value):\n assert not isinstance(value, Slot), \\\n \"Can't use setitem to connect slots. Use connect()\"\n assert self.level == 0, \\\n (\"setitem can only be used with slots of level 0.\"\n \" Did you forget to append a key?\")\n assert self.operator is not None, \\\n \"cannot do __setitem__ on Slot '{}' -> no operator !!\"\n assert slicingtools.is_bounded(key), \\\n \"Can't use Slot.__setitem__ with keys that include : or ...\"\n roi = self.rtype(self, pslice=key)\n if self._value is not None:\n self._value[key] = value\n\n # only propagate the dirty key at the very beginning of\n # the chain\n self.setDirty(roi)\n if self._type == \"input\":\n self.operator.setInSlot(self, (), roi, value)\n\n # Forward to partners\n for p in self.partners:\n p[key] = value", "def __getitem__(self, key):\n nrows, ncols = self.get_geometry()\n\n def _normalize(key, size): # Includes last index.\n if isinstance(key, slice):\n start, stop, _ = key.indices(size)\n if stop > start:\n return start, stop - 1\n else:\n if key < 0:\n key += size\n if 0 <= key < size:\n return key, key\n raise IndexError(\"invalid index\")\n\n if isinstance(key, tuple):\n try:\n k1, k2 = key\n except ValueError:\n raise ValueError(\"unrecognized subplot spec\")\n num1, num2 = np.ravel_multi_index(\n [_normalize(k1, nrows), _normalize(k2, ncols)], (nrows, ncols))\n else: # Single key\n num1, num2 = _normalize(key, nrows * ncols)\n\n return SubplotSpec(self, num1, num2)", "def _get_frame(self, key):\n pass", "def __setitem__(self, *args):\n return _osgAnimation.vectorFloatKeyframe___setitem__(self, *args)", "def update( self ):\n\t\t# Read the current state\n\t\tself.read( store=1 )\n\t\tfor key in range( 12 ):\n\t\t\t# Key state has changed? (pressed or release)\n\t\t\tif self.touched[key] != self.touched[key+12]:\n\t\t\t\tself.debug( \"Key %i is %s\" %(key,\"PRESSED\" if self.touched[key]>0 else \"Released\") )\n\t\t\t\tif self.on_key_change:\n\t\t\t\t\tself.on_key_change( key, pressed=(self.touched[key]>0) )\n\t\t\t\t# remember the current state as last state\n\t\t\t\tself.touched[key+12]=self.touched[key]", "def update_overlaid_plot(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n waveforms = [trigger, trace]\n\n first_peak, second_peak = self.get_windowed_data(waveforms[0], waveforms[1])\n self.overlaid_lines[0].set_ydata(first_peak)\n self.overlaid_lines[0].set_xdata(range(len(first_peak)))\n self.overlaid_lines[1].set_ydata(second_peak)\n self.overlaid_lines[1].set_xdata(range(len(second_peak)))\n\n areas = [integ.simps(first_peak), integ.simps(second_peak)]\n labels = ['%.1f' % areas[0], '%.1f' % areas[1]]\n\n# for area in areas:\n# if area < 0.1:\n# raise RangeError # calculation warning error for example\n self.ax2.legend([self.overlaid_lines[0], self.overlaid_lines[1]],\n labels)\n\n self.draw()", "def _refreshKey(self, displayKey):\n refreshRect = Rect(*displayKey.scaled)\n refreshRect.Inflate(2, 2)\n self.RefreshRect(refreshRect.Get())", "def __setitem__(self, *args):\n return _osgAnimation.vectorQuatKeyframe___setitem__(self, *args)", "def __setitem__(self, key, value: numbers.Number) -> None:\n if key in self.layout.bladeTupMap.keys():\n self.value[self.layout.bladeTupMap[key]] = value\n elif isinstance(key, tuple):\n sign, blade = compute_reordering_sign_and_canonical_form(key, np.array(self.layout.sig),\n self.layout.firstIdx)\n self.value[self.layout.bladeTupMap[blade]] = sign*value\n else:\n self.value[key] = value", "def __setitem__(self, key: tuple, value: float):\n s, a = key\n self.store.setdefault(s, dict())[a] = value", "def _set_tuple_k_structure(self, key):\n self.ks = [key[1]] if type(key[1]) == int else key[1]\n self.set_neighs(key[0])", "def __getitem__(self, key):\n return self._to_draw[key]", "def __setitem__(self, *args):\n return _osgAnimation.vectorVec3Keyframe___setitem__(self, *args)", "def myUpdate(self, stateDict=None):\n\n # store stateDict so we can replot on changing dark theme\n if stateDict is None and self.stateDict is not None:\n # re-use our stateDict\n stateDict = self.stateDict\n else:\n if stateDict is None:\n return\n self.stateDict = stateDict.copy()\n\n if stateDict is None:\n return\n \n dataType = stateDict['dataType']\n hue = stateDict['hue']\n groupByColumnName = stateDict['groupByColumnName']\n\n plotType = stateDict['plotType']\n #self.plotType = plotType\n\n xStatHuman = stateDict['xStatHuman']\n yStatHuman = stateDict['yStatHuman']\n\n xStat = stateDict['xStat']\n yStat = stateDict['yStat']\n\n '''\n print('=== myMplCanvas.myUpdate()')\n print(' ', plotType)\n print(' ', 'xStatHuman:', xStatHuman, 'yStatHuman:', yStatHuman)\n print(' ', 'xStat:', xStat, 'yStat:', yStat)\n '''\n\n xIsCategorical = stateDict['xIsCategorical']\n yIsCategorical = stateDict['yIsCategorical']\n\n masterDf = stateDict['masterDf']\n meanDf = stateDict['meanDf']\n\n self.plotDf = meanDf\n\n self.canvas.axes.clear()\n\n picker = 5\n if plotType in ['Scatter Plot', 'Scatter + Raw + Mean']:\n # scatter plot user selection\n self.scatterPlotSelection, = self.canvas.axes.plot([], [], 'oy',\n markersize=12, fillstyle='none')\n\n # main scatter\n try:\n self.whatWeArePlotting = sns.scatterplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes, picker=picker,\n zorder=0)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print(' EXCEPTION: in myUpdate() \"Scatter Plot\", exception is:')\n print(' ', e)\n print(' ', 'plotType:', plotType)\n print(' ', 'xStat:', xStat)\n print(' ', 'yStat:', yStat)\n print(' ', 'hue:', hue)\n\n # sem in both x and y, pulling from masterDf\n if dataType=='File Mean' or plotType=='Scatter + Raw + Mean':\n # we need to do this for each hue???\n # if x or y is in categorical (e.g. a string) then do not do this ...\n if xIsCategorical or yIsCategorical:\n pass\n else:\n print(' grabbing mean +- sem for self.groupByColumnName:', groupByColumnName)\n color = 'k'\n xd = masterDf.groupby(groupByColumnName).mean()[xStat]\n xerrd = masterDf.groupby(groupByColumnName).sem()[xStat]\n yd = masterDf.groupby(groupByColumnName).mean()[yStat]\n yerrd = masterDf.groupby(groupByColumnName).sem()[yStat]\n \n # logger.info('2023 declan')\n # print(' groupByColumnName:', groupByColumnName)\n # print(' xd:', xd)\n # print(' yd:', yd)\n # print(' xerrd:', xerrd)\n # print(' yerrd:', yerrd)\n \n self.canvas.axes.errorbar(xd, yd, xerr=xerrd, yerr=yerrd,\n fmt='none', capsize=0, zorder=10, color=color, alpha=0.5);\n\n elif plotType == 'Histogram':\n yStatHuman = 'Count'\n doKde = False #stateDict['doKDE']\n try:\n g = sns.histplot(x=xStat, hue=hue, kde=doKde,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTIONin Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Violin Plot':\n if not xIsCategorical:\n warningStr = 'Violin plot requires a categorical x statistic'\n else:\n g = sns.violinplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Box Plot':\n if not xIsCategorical:\n warningStr = 'Box plot requires a categorical x statistic'\n else:\n g = sns.boxplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Raw + Mean Plot':\n if not xIsCategorical:\n warningStr = 'Raw + Mean plot requires a categorical x statistic'\n else:\n try:\n # does not work here for categorical x\n #self.scatterPlotSelection, = self.canvas.axes[0].plot([], [], 'oy',\n # markersize=12, fillstyle='none')\n\n '''\n colorList = [('red'), ('green'), 'b', 'c', 'm', 'y']\n hueList = meanDf[hue].unique()\n palette = {}\n for idx, hue in enumerate(hueList):\n palette[hue] = colorList[idx]\n print(palette)\n '''\n\n palette = sns.color_palette(\"Paired\")\n #palette = ['r', 'g', 'b']\n\n # stripplot\n #g = sns.swarmplot(x=xStat, y=yStat,\n g = sns.stripplot(x=xStat, y=yStat,\n hue=hue,\n palette=palette,\n data=meanDf,\n ax=self.canvas.axes,\n #color = color,\n dodge=True,\n alpha=0.6,\n picker=picker,\n zorder=1)\n\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n logger.info(f' REMAKING LEGEND sns.pointplot() plotNumber:{self.plotNumber}')\n handles, labels = self.canvas.axes.get_legend_handles_labels()\n l = self.canvas.axes.legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n #self.myLegend = self.canvas.axes.Legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n '''\n if self.darkTheme:\n color = 'w'\n else:\n color = 'k'\n color = [color] * len(hueList)\n print('color:', color)\n '''\n\n self.whatWeArePlotting = sns.pointplot(x=xStat, y=yStat,\n hue=hue,\n #palette=palette,\n data=meanDf,\n estimator=np.nanmean,\n errorbar=('ci', 68),\n capsize=0.1,\n ax=self.canvas.axes,\n color='r',\n #legend='full',\n #zorder=10)\n )\n except (ValueError) as e:\n print('EXCEPTION in \"Raw + Mean Plot\":', e)\n traceback.print_exc()\n\n elif plotType == 'Regression Plot':\n # regplot does not have hue\n if xIsCategorical or yIsCategorical:\n warningStr = 'Regression plot requires continuous x and y statistics'\n else:\n # todo: loop and make a regplot\n # for each unique() name in\n # hue (like Region, Sex, Condition)\n hueList = masterDf[hue].unique()\n for oneHue in hueList:\n if oneHue == 'None':\n continue\n tmpDf = meanDf [ meanDf[hue]==oneHue ]\n #print('regplot oneHue:', oneHue, 'len(tmpDf)', len(tmpDf))\n sns.regplot(x=xStat, y=yStat, data=tmpDf,\n ax=self.canvas.axes);\n else:\n print(' did not understand plot type:', plotType)\n\n\n #\n # update\n self.canvas.axes.figure.canvas.mpl_connect(\"pick_event\", self.onPick)\n\n self.mplCursorHover = None\n if stateDict['doHover'] and self.whatWeArePlotting is not None:\n self.mplCursorHover = mplcursors.cursor(self.whatWeArePlotting, hover=True)\n @self.mplCursorHover.connect(\"add\")\n def _(sel):\n #sel.annotation.get_bbox_patch().set(fc=\"white\")\n sel.annotation.arrow_patch.set(arrowstyle=\"simple\", fc=\"white\", alpha=.5)\n # row in df is from sel.target.index\n #print('sel.target.index:', sel.target.index)\n ind = sel.target.index\n annotationDict = self.getAnnotation(ind)\n myText = ''\n for k,v in annotationDict.items():\n myText += f'{k}: {v}\\n'\n sel.annotation.set_text(myText)\n\n #\n #self.mySetStatusBar(warningStr)\n\n self.canvas.axes.spines['right'].set_visible(False)\n self.canvas.axes.spines['top'].set_visible(False)\n\n if not stateDict['showLegend']:\n #print('self.canvas.axes.legend():', self.canvas.axes.legend())\n #print('self.canvas.axes.legend:', self.canvas.axes.legend)\n #if self.canvas.axes.legend() is not None:\n if 1:\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #print('myUpdate() self.plotSize:', self.plotSize)\n self.canvas.axes.set_xlabel(xStatHuman)\n self.canvas.axes.set_ylabel(yStatHuman)\n '''\n if self.plotSize == 'paper':\n fontsize = 10\n self.canvas.axes[0].set_xlabel(xStatHuman, fontsize=fontsize)\n self.canvas.axes[0].set_ylabel(yStatHuman, fontsize=fontsize)\n else:\n self.canvas.axes[0].set_xlabel(xStatHuman)\n self.canvas.axes[0].set_ylabel(yStatHuman)\n '''\n\n # subplots_adjust\n #self.fig.canvas.draw_idle()\n self.fig.canvas.draw()", "def __setitem__(self, key, value):\n self.xg[key] = value", "def setValue(self, *args):\n return _osgAnimation.MatrixKeyframe_setValue(self, *args)", "def __setitem__(self, key: Tuple[int, int], value: complex) -> None:\n self.coeff[self._core.index_alpha(key[0]),\n self._core.index_beta(key[1])] = value" ]
[ "0.6366824", "0.57968193", "0.57502097", "0.5714252", "0.56339014", "0.5459689", "0.54484665", "0.54372066", "0.5365949", "0.5340403", "0.5326292", "0.5320078", "0.5305696", "0.52860016", "0.52631617", "0.52473605", "0.5218694", "0.52184427", "0.5185107", "0.51785284", "0.5154304", "0.51286566", "0.5123627", "0.5122041", "0.51158696", "0.5092829", "0.50886256", "0.50725776", "0.50688666", "0.5040174" ]
0.64864296
0
Get the state of the Plot for a given frame number.
def __getitem__(self, frame): if not self.dynamic == 'open' and isinstance(frame, int) and frame > len(self): self.warning("Showing last frame available: %d" % len(self)) if not self.drawn: self.handles['fig'] = self.initialize_plot() if not self.dynamic == 'open' and not isinstance(frame, tuple): frame = self.keys[frame] self.update_frame(frame) return self.state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFrame(self, num):\n\n return self.data[num]", "def get_plot_state(self_or_cls, obj, renderer=None, **kwargs):\n if not isinstance(obj, Plot):\n obj = self_or_cls.get_plot(obj=obj, renderer=renderer, **kwargs)\n return obj.state", "def get_frame(self, frame):\n return self.frames[frame]", "def get_frame(self, index):\n filename = self.get_filename(index)\n return plt.imread(fname=filename)", "def get_state(self):\r\n return self.currentObservation", "def _get_frame(frame_index, plots):\n\n # TODO Using the indices of the self.frames, plot in correct location.\n # Okay right now there is a problem where it's unknown whether the set of coordinates\n # is a line or a dot -- that info got lost up there\n\n for amb_index in range(len(self.frames[frame_index])):\n xs = self.frames[frame_index][amb_index][0]\n ys = self.frames[frame_index][amb_index][1]\n\n # if len(xs) > 1:\n # if xs[0] == xs[1]:\n # plots[amb_index][1].set_data([xs[0]], [ys[0]])\n # if xs[-2] == xs[-1]:\n # plots[amb_index][1].set_data([xs[-1]], [ys[-1]])\n\n plots[amb_index][0].set_data(xs, ys)\n\n print(plots[len(self.ambulance_locations)])\n\n return plots,", "def get_state(self) -> np.array:\n return self.rstate.render_frame(self.rsimulator, self.grayscale)", "def get_state(self):\n return self.get_pose()", "def get_trace_state(self):\n return self.__sensor_states[4]", "def getFrame(self, num):\n\n return self.data[:, :, num]", "def get_frame(self, f):\n return self._frames[f, :]", "def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]", "def get_state(self,settime=False):\n\t\tif (settime):\n\t\t\tself.t=0\n\t\t\tself._history.clear()\n\t\t\tself._history = {'time':[self.t],'state': np.array(self.x)}\n\t\treturn self.x", "def get_frame(self):\n return self.frames.get()", "def _get_state(self):\n # gst's get_state function returns a 3-tuple; we just want the\n # status flag in position 1.\n return self.pipeline.get_state(Gst.CLOCK_TIME_NONE)[1]", "def get_frame(self, frame_number=None):\n try:\n import cv2\n except (ImportError, ModuleNotFoundError):\n logger.error(\n 'Import Error! Cant import cv2. Annotations operations will be limited. import manually and fix errors')\n raise\n if self.vid.isOpened():\n if self.frame_number is None:\n self.frame_number = self.vid.get(cv2.CAP_PROP_POS_FRAMES)\n else:\n self.frame_number += 1\n if frame_number is not None:\n self.frame_number = frame_number\n self.vid.set(cv2.CAP_PROP_POS_FRAMES, frame_number)\n ret, frame = self.vid.read()\n\n if ret:\n # Return a boolean success flag and the current frame converted to BGR\n return ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n else:\n return ret, None\n else:\n return False, None", "def get_frame(self):\n return self.get_frame_at_index(self.current_frame)", "def get_frame(self, ind):\n pass", "def get_current_state(self):\n return self.nextYs[-1]", "def getObservation(self):\n return self._cur_state", "def getframe(self, num):\n if num < 0 or num > self.nframes:\n raise Exception(\"Requested frame number is out of range\")\n # Do a deep copy of the header to make a new one\n newheader = {}\n for k in self.header.keys():\n newheader[k] = self.header[k]\n frame = pixiimage(header=newheader)\n frame.nframes = self.nframes\n frame.sequencefilename = self.sequencefilename\n infile = frame._open(self.sequencefilename, \"rb\")\n frame._readframe(infile, num)\n infile.close()\n return frame", "def get_state(self) -> FrameState:\n assert self.__state is not None\n return self.__state", "def getframe(self, num):\n if num < 0 or num > self.nframes:\n raise RuntimeError(\"Requested frame number is out of range\")\n # Do a deep copy of the header to make a new one\n frame = hdf5image(header=self.header.copy())\n frame.header_keys = self.header_keys[:]\n for key in (\"dim1\", \"dim2\", \"nframes\", \"bytecode\", \"hdf5\", \"ds\"):\n frame.__setattr__(key, self.__getattribute__(key))\n frame.hdf5_location = copy.deepcopy(self.hdf5_location)\n frame.hdf5_location.set_index(num)\n if self.hdf5_location.slice:\n self.data = self.ds[tuple(self.hdf5_location.slice)]\n self.nframes = self.ds.shape[self.hdf5_location.last_index]\n else:\n self.data = self.ds[:]\n return frame", "def _state_index(state):\n delta_y, delta_x, bird_lmh, pipe_lmh, is_flapping = state\n actions, height, width, _, _, _ = Q.shape\n\n y = int((height / 2) + (delta_y / step_r) - 1)\n x = int((width / 2) + (delta_x / step_c) - 1)\n\n return y, x, bird_lmh, pipe_lmh, is_flapping", "def get_state(self):\n return self.env.sim.get_state()", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_frame(self, frame_num=1, display=False):\n #pdb.set_trace()\n frame_size=(512,512)\n frame_data=win32com.client.VARIANT(pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_UI2, numpy.empty(frame_size))\n\n #frame_data2=win32com.client.VARIANT(pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_UI2, numpy.empty(frame_size))\n\n frame_data=self.appdoc.GetFrame(1,frame_data)\n #frame_data2=self.appdoc.GetFrame(2,frame_data2)\n #pdb.set_trace()\n if hasattr(self,\"display\"):\n display = (display or self.display)\n if display:\n plt.imshow(frame_data, cmap='gray')\n plt.show()\n return numpy.array(frame_data, dtype=numpy.uint16)", "def state(self):\n return self.probe.get_data(self.variable)", "def state(self):\n return self.probe.get_data(self.variable)" ]
[ "0.6576043", "0.64040893", "0.6168167", "0.61134017", "0.6076815", "0.6055128", "0.5951637", "0.5882845", "0.58641803", "0.5848422", "0.5799237", "0.5756058", "0.5718137", "0.5702686", "0.56998396", "0.56985414", "0.5595627", "0.5593545", "0.5543105", "0.5521952", "0.5484779", "0.5452552", "0.54350305", "0.54116875", "0.5400628", "0.53795344", "0.53795344", "0.53495896", "0.53447247", "0.53447247" ]
0.6989357
0
Traverses any nested DimensionedPlot returning a list of all plots that match the specs. The specs should be supplied as a list of either Plot types or callables, which should return a boolean given the plot class.
def traverse(self, fn=None, specs=None, full_breadth=True): accumulator = [] matches = specs is None if not matches: for spec in specs: matches = self.matches(spec) if matches: break if matches: accumulator.append(fn(self) if fn else self) # Assumes composite objects are iterables if hasattr(self, 'subplots') and self.subplots: for el in self.subplots.values(): accumulator += el.traverse(fn, specs, full_breadth) if not full_breadth: break return accumulator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_available_figures(self):\n return sorted((method[5:], func) \\\n for method, func in self.__class__.__dict__.iteritems() \\\n if method.startswith(\"plot_\") and callable(func))", "def get_plots(self):\n return list(self.plots.values())", "def _specs_for_flat_tensors(element_spec):\n if isinstance(element_spec, StructuredTensor.Spec):\n specs = []\n for _, field_spec in sorted(\n element_spec._field_specs.items(), key=lambda t: t[0]): # pylint: disable=protected-access\n specs.extend(_specs_for_flat_tensors(field_spec))\n elif isinstance(element_spec, type_spec.BatchableTypeSpec) and (\n element_spec.__class__._flat_tensor_specs is # pylint: disable=protected-access\n type_spec.BatchableTypeSpec._flat_tensor_specs): # pylint: disable=protected-access\n # Classes which use the default `_flat_tensor_specs` from\n # `BatchableTypeSpec` case (i.e. a derived class does not override\n # `_flat_tensor_specs`.) are encoded using `component_specs`.\n specs = nest.flatten(\n element_spec._component_specs, # pylint: disable=protected-access\n expand_composites=False)\n else:\n # In addition flatting any nesting in Python,\n # this default case covers things that are encoded by one tensor,\n # such as dense tensors which are unchanged by encoding and\n # ragged tensors and sparse tensors which are encoded by a variant tensor.\n specs = nest.flatten(element_spec, expand_composites=False)\n return specs", "def latex_figure_list(spec, includes, outfile, **kwargs):\n # Enable subsetting of, e.g. figures and tables...\n type = kwargs.pop(\"type\",'figure')\n for cfg, (id,item) in zip(spec,includes):\n if type != cfg.get('type','figure'):\n continue\n print(item, file=outfile)", "def plot_power_spectrum_fits(self, figsize=(20, 10)):\n\n debug_figs = []\n debug_fig_names = []\n # individual power spectra\n for ii in range(self.nangles):\n fig = plot_power_spectrum_fit(self.separated_components_ft[ii, 0], self.otf,\n {'pixel_size': self.dx, 'wavelength': self.wavelength, 'na': self.na},\n self.power_spectrum_params[ii, 0], frq_sim=(0, 0), mask=self.pspec_masks[ii, 0],\n figsize=figsize, ttl_str=\"Unshifted component, angle %d\" % ii)\n debug_figs.append(fig)\n debug_fig_names.append(\"power_spectrum_unshifted_component_angle=%d\" % ii)\n\n fig = plot_power_spectrum_fit(self.separated_components_ft[ii, 1], self.otf,\n {'pixel_size': self.dx, 'wavelength': self.wavelength, 'na': self.na},\n self.power_spectrum_params[ii, 1], frq_sim=self.frqs[ii], mask=self.pspec_masks[ii, 1],\n figsize=figsize, ttl_str=\"Shifted component, angle %d\" % ii)\n\n debug_figs.append(fig)\n debug_fig_names.append(\"power_spectrum_shifted_component_angle=%d\" % ii)\n\n return debug_figs, debug_fig_names", "def available_plots(self):\n return self.visualizer.available_plots()", "def test_plot_graphs(self):\n\n # Graphs who are not embedded, i.e., have no coordinates.\n COORDS_NO = {\n 'Graph',\n 'BarabasiAlbert',\n 'ErdosRenyi',\n 'FullConnected',\n 'RandomRegular',\n 'StochasticBlockModel',\n }\n\n # Coordinates are not in 2D or 3D.\n COORDS_WRONG_DIM = {'ImgPatches'}\n\n Gs = []\n for classname in set(graphs.__all__) - COORDS_NO - COORDS_WRONG_DIM:\n Graph = getattr(graphs, classname)\n\n # Classes who require parameters.\n if classname == 'NNGraph':\n Xin = np.arange(90).reshape(30, 3)\n Gs.append(Graph(Xin))\n elif classname in ['ImgPatches', 'Grid2dImgPatches']:\n Gs.append(Graph(img=self._img, patch_shape=(3, 3)))\n elif classname == 'LineGraph':\n Gs.append(Graph(graphs.Sensor(20, seed=42)))\n else:\n Gs.append(Graph())\n\n # Add more test cases.\n if classname == 'TwoMoons':\n Gs.append(Graph(moontype='standard'))\n Gs.append(Graph(moontype='synthesized'))\n elif classname == 'Cube':\n Gs.append(Graph(nb_dim=2))\n Gs.append(Graph(nb_dim=3))\n elif classname == 'DavidSensorNet':\n Gs.append(Graph(N=64))\n Gs.append(Graph(N=500))\n Gs.append(Graph(N=128))\n\n for G in Gs:\n self.assertTrue(hasattr(G, 'coords'))\n self.assertEqual(G.N, G.coords.shape[0])\n\n signal = np.arange(G.N) + 0.3\n\n G.plot(backend='pyqtgraph')\n G.plot(backend='matplotlib')\n G.plot(signal, backend='pyqtgraph')\n G.plot(signal, backend='matplotlib')\n plotting.close_all()", "def show_all_elements(ds, shapes_only=True):\n if shapes_only:\n log_print = lambda i, e: logger.info(\n \"Element %d:\\nshapes:\\n %s\",\n i, _dict_to_logstring(_element_shapes_dict(e)))\n else:\n log_print = lambda i, e: logger.info(\n \"Element %d:\\nshapes:\\n %s\\ncontents:\\n %s\",\n i, _dict_to_logstring(_element_shapes_dict(e)), _dict_to_logstring(e))\n\n logger.info(\"Showing all elements.\")\n i = 0\n for i, element in ds.enumerate(start=1).as_numpy_iterator():\n if not isinstance(element, dict):\n element = dict(enumerate(element))\n log_print(i, element)\n\n logger.info(\"All %d elements shown.\", i)\n return ds", "def plots():\n out = interactive_output(generate_plots, {'gsize':gridSlider, 'ra':RABox, 'ra':RASlider, 'dec':DECBox, 'dec':DECSlider, 'ang':radBox, 'ang':radSlider, 'style':hexDrop})\n return display(widgrid, out)", "def all(folder, mt=False):\n handles = []\n experiments = get_experiment_series(folder, mT=mt)\n for ex in experiments:\n if mt:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm {}mT'.format(ex.height, ex.magnet))[0])\n else:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm'.format(ex.height))[0])\n plt.legend()\n plt.show()", "def plot_list(self):\n wrapper = TextWrapper(subsequent_indent = \" \" * 22,\n width = 78)\n for method, func in self.get_available_figures():\n if method != \"list\":\n wrapper.initial_indent = (\"%-20s \" % method).ljust(22)\n print wrapper.fill(func.figure_name)", "def _iter_panels(self, sides='lrbt'):\n axs = [self] if self.get_visible() else []\n if not ({*sides} <= {*'lrbt'}):\n raise ValueError(f'Invalid sides {sides!r}.')\n for s in sides:\n for ax in getattr(self, '_' + s + 'panels'):\n if not ax or not ax.get_visible():\n continue\n axs.append(ax)\n return axs", "def plot_figs(self):\n tstart = time.process_time()\n\n saving = self.save_dir is not None\n\n # todo: populate these\n figs = []\n fig_names = []\n\n # plot images\n figh = self.plot_sim_imgs(self.frqs_guess, figsize=self.figsize)\n\n if saving:\n figh.savefig(os.path.join(self.save_dir, \"raw_images.png\"))\n if not self.hold_figs_open:\n plt.close(figh)\n\n # plot frequency fits\n fighs, fig_names = self.plot_frequency_fits(figsize=self.figsize)\n for fh, fn in zip(fighs, fig_names):\n if saving:\n fh.savefig(os.path.join(self.save_dir, \"%s.png\" % fn))\n if not self.hold_figs_open:\n plt.close(fh)\n\n # plot power spectrum fits\n fighs, fig_names = self.plot_power_spectrum_fits(figsize=self.figsize)\n for fh, fn in zip(fighs, fig_names):\n if saving:\n fh.savefig(os.path.join(self.save_dir, \"%s.png\" % fn))\n if not self.hold_figs_open:\n plt.close(fh)\n\n # widefield power spectrum fit\n figh = plot_power_spectrum_fit(self.widefield_ft, self.otf,\n {'pixel_size': self.dx, 'wavelength': self.wavelength, 'na': self.na},\n self.pspec_params_wf, mask=self.mask_wf, figsize=self.figsize,\n ttl_str=\"Widefield power spectrum\")\n if saving:\n figh.savefig(os.path.join(self.save_dir, \"power_spectrum_widefield.png\"))\n if not self.hold_figs_open:\n plt.close(figh)\n\n # plot filters used in reconstruction\n fighs, fig_names = self.plot_reconstruction_diagnostics(figsize=self.figsize)\n for fh, fn in zip(fighs, fig_names):\n if saving:\n fh.savefig(os.path.join(self.save_dir, \"%s.png\" % fn))\n if not self.hold_figs_open:\n plt.close(fh)\n\n # plot reconstruction results\n fig = self.plot_reconstruction(figsize=self.figsize)\n if saving:\n fig.savefig(os.path.join(self.save_dir, \"sim_reconstruction.png\"), dpi=400)\n if not self.hold_figs_open:\n plt.close(fig)\n\n # plot otf\n fig = self.plot_otf(figsize=self.figsize)\n if saving:\n fig.savefig(os.path.join(self.save_dir, \"otf.png\"))\n if not self.hold_figs_open:\n plt.close(fig)\n\n tend = time.process_time()\n # print_tee(\"plotting results took %0.2fs\" % (tend - tstart), self.log_file)\n print(\"plotting results took %0.2fs\" % (tend - tstart))\n\n return figs, fig_names", "def getPlottableReactions(reactionSuite, observable='crossSection'):\n result = []\n for reactionList in reactionSuite.reactions, reactionSuite.sums.crossSectionSums, \\\n reactionSuite.sums.multiplicitySums, \\\n reactionSuite.fissionComponents, reactionSuite.productions:\n for r in reactionList:\n if hasattr(r, observable):\n result.append(r)\n return result", "def axes_contains(ax, obj_list):\n # Get plot elements\n elems = ax.get_children()\n\n # Loop over list of objects that should be in the plot\n contains_all = False\n for obj in obj_list:\n objtype, num_expected = obj\n num = 0\n for elem in elems:\n if isinstance(elem, objtype): num += 1\n if num != num_expected:\n return False\n\n # Return True if no problems found\n return True", "def which_patches(extent):\n # TODO check input\n ramin, ramax, decmin, decmax = extent\n p1 = which_patch(ramin, decmin) # lower left\n p2 = which_patch(ramax, decmin) # lower right\n p3 = which_patch(ramin, decmax) # upper left\n if not ((p1 >= 0) & (p2 >= 0) & (p3 >= 0)):\n patch_ids = []\n else:\n patch_ids = [range(y, y + p2 - p1 + 1) for y in range(p1, p3 + 9, 9)]\n return np.array(patch_ids).flatten()", "def print_parsed(specs):\n observed_types = set()\n for i in specs.values():\n observed_types.update(i['types'])\n observed_types = sorted(observed_types)\n\n s = ['# Observed types from the parsed document']\n s.append('TRACKTYPES = [')\n for i in observed_types:\n s.append(\" '{}',\".format(i))\n s.append(']')\n print('\\n'.join(s) + '\\n')\n\n data_types = specs['bigDataUrl']['types']\n\n s = ['# Tracks for which the definition specifies bigDataUrl']\n s.append('DATA_TRACKTYPES = [')\n for i in data_types:\n s.append(\" '{}',\".format(i))\n s.append(']')\n print('\\n'.join(s) + '\\n')\n print('param_defs = [')\n print()\n for k, v in sorted(specs.items()):\n print(\n (\n '''\n Param(\n name=\"{k}\",\n fmt={v[format]},\n types={v[types]},\n required={v[required]},\n validator=str),'''.format(**locals())\n )\n )", "def plot_all(best_results: BestResults,\n *args,\n **kwargs) -> plt.Figure:\n if isinstance(best_results, BestResultsOne):\n return plot_all_one(best_results, *args, **kwargs)\n elif isinstance(best_results, BestResultsTwo):\n return plot_all_two(best_results, *args, **kwargs)\n else:\n raise ValueError('best_results argument is of unknown type')", "def test_get_axes():\n fig, axs = plt.subplots()\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(axs)\n )\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(None)\n )\n with pytest.raises(TypeError):\n prettypyplot.tools.get_axes(fig)", "def get_renderables(y_hat, args):\n # len(y_hat.fit) == 1 means that we do not have hierarhcy\n if len(y_hat.fit) == 1:\n if args.from_flat_partition:\n return _renderables_from_flat_partition(y_hat, args)\n else:\n return _renderables_from_flat_primitives(y_hat, args)\n\n if args.from_fit:\n return _renderables_from_fit(y_hat, args)\n else:\n return _renderables_from_partition(y_hat, args)", "def plot(self, **kwargs):\n from ..plot.plotutil import PlotUtilities\n\n if not self.plottable:\n raise TypeError(\"Simulation level packages are not plottable\")\n\n axes = PlotUtilities._plot_package_helper(self, **kwargs)\n return axes", "def plot(self, ax: Axes):\n\n plotted_objects = Element.plot(self, ax)\n plotted_objects += plotting.plot_aperture(ax, self)\n\n if plot_blockers:\n plotted_objects += plotting.plot_blocker(ax, self, self.blocker_diameter)\n\n return plotted_objects", "def convert_plots(self, workdir, imgFormat):\n plotList = list()\n for (label, plot) in self._expectedPlots_globalAvg:\n plotList.append(plot)\n for (label, plot) in self._expectedPlots_Nino:\n plotList.append(plot)\n for (label, plot) in self._expectedPlots_transportDiags:\n plotList.append(plot)\n\n self._convert_plots(workdir, imgFormat, plotList )", "def scree_plots(t, ndim = []):\n total_dim = len(t.shape)\n if not ndim: # case with no input ndim\n for i in range(total_dim):\n ndim.append(t.shape[i])\n elif len(ndim) != total_dim: # case that input ndim does not agree with number of dimensions of the input tensor\n for i in range(total_dim):\n ndim.append(t.shape[i])\n else: # check whether the number in ndim is less than the size of that dimension\n for i in range(total_dim):\n if ndim[i] > t.shape[i]:\n ndim[i] = t.shape[i]\n \n scree = []\n for i in range(total_dim):\n t_unfold = unfold_axis(t, i)\n [ _, e, _ ] = fast_svd(np.matmul(t_unfold,np.transpose(t_unfold)),ndim[i],n_iter=15)\n e = np.sqrt(e)\n e = np.real(e)\n scree.append(e)\n\n return scree", "def spectators(self):\n return self._return_if('_spectators')", "def plot_all_subplot_trends(resolutions, temporal_resolution, temporal_decomposition, detrend, imagefolder = 'images/timeseries/SIC/',seaice_source='nsidc'):\n for n, temp_res, temp_decomp, dt in itertools.product(resolutions, temporal_resolution, temporal_decomposition, detrend):\n plot_subplot_trend(anomlous = 'anomalous' == temp_decomp, temporal_resolution = temp_res, spatial_resolution = n, detrend = dt == 'detrended',seaice_source=seaice_source)", "def seemlike_dimension(vols):\n count = 0\n params = VolCaps.PARAMS_MAPPER.values()\n sorted_vols_caps = [VolCaps.basic_cap_by_method\n (vols, param, sorted, reverse=True) for param in\n params]\n for sorted_dimension in sorted_vols_caps:\n max_cap = sorted_dimension[0]\n min_cap = sorted_dimension[1]\n curr_rsd = get_rsd(max_cap[0], min_cap[0])\n if curr_rsd < 0.1:\n objective = sum([max_cap[0], min_cap[0]]) / 2\n yield (objective, max_cap[1], min_cap[1])\n count += 1\n else:\n yield False\n assert not count, \"There aren't seem like dimensions at all\"", "def getAllWidgets(self):\n \n visualisations = Visualisation.objects.filter(dataSource=self)\n widgets = []\n for vis in visualisations:\n widgets.append(vis.getWidget())\n return widgets", "def plot_explorer_panels(self, param_val, photonnumber, initial_index, final_index, qbt_index, osc_index):\n def fig_ax(index):\n return fig, axes_list_flattened[index]\n\n param_index = np.searchsorted(self.param_vals, param_val)\n param_val = self.param_vals[param_index]\n\n initial_bare = self.sweep.lookup.bare_index(initial_index, param_index)\n final_bare = self.sweep.lookup.bare_index(final_index, param_index)\n energy_ground = self.sweep.lookup.energy_dressed_index(0, param_index)\n energy_initial = self.sweep.lookup.energy_dressed_index(initial_index, param_index) - energy_ground\n energy_final = self.sweep.lookup.energy_dressed_index(final_index, param_index) - energy_ground\n qbt_subsys = self.sweep.hilbertspace[qbt_index]\n\n nrows = 3\n ncols = 2\n fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=self.figsize)\n axes_list_flattened = [elem for sublist in axs for elem in sublist]\n\n # Panel 1 ----------------------------------\n panels.display_bare_spectrum(self.sweep, qbt_subsys, param_val, fig_ax(0))\n\n # Panels 2 and 6----------------------------\n if type(qbt_subsys).__name__ in ['Transmon', 'Fluxonium']: # do not plot wavefunctions if multi-dimensional\n panels.display_bare_wavefunctions(self.sweep, qbt_subsys, param_val, fig_ax(1))\n panels.display_charge_matrixelems(self.sweep, initial_bare, qbt_subsys, param_val, fig_ax(5))\n\n # Panel 3 ----------------------------------\n panels.display_dressed_spectrum(self.sweep, initial_bare, final_bare, energy_initial, energy_final, param_val,\n fig_ax(2))\n\n # Panel 4 ----------------------------------\n panels.display_n_photon_qubit_transitions(self.sweep, photonnumber, initial_bare, param_val, fig_ax(3))\n\n # Panel 5 ----------------------------------\n panels.display_chi_01(self.sweep, qbt_index, osc_index, param_index, fig_ax(4))\n\n fig.tight_layout()\n return fig, axs", "def dims_list(self):\n return [n for n in self.schema.names if n in self.dims]" ]
[ "0.52858", "0.52142483", "0.51448363", "0.50744987", "0.50589556", "0.48802635", "0.48739997", "0.48073155", "0.4798928", "0.4749356", "0.47428975", "0.47420156", "0.47197765", "0.46932372", "0.46918872", "0.4680866", "0.46731734", "0.46569225", "0.4638593", "0.46093962", "0.45868868", "0.45852435", "0.457482", "0.45559883", "0.4552447", "0.45511317", "0.4548975", "0.45413592", "0.45412245", "0.45367965" ]
0.56958634
0
Given an object, a specific key and the normalization options this method will find the specified normalization options on the appropriate OptionTree, group the elements according to the selected normalization option (i.e. either per frame or over the whole animation) and finally compute the dimension ranges in each group. The new set of ranges is returned.
def compute_ranges(self, obj, key, ranges): all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element])) if obj is None or not self.normalize or all_table: return OrderedDict() # Get inherited ranges ranges = self.ranges if ranges is None else dict(ranges) # Get element identifiers from current object and resolve # with selected normalization options norm_opts = self._get_norm_opts(obj) # Traverse displayed object if normalization applies # at this level, and ranges for the group have not # been supplied from a composite plot return_fn = lambda x: x if isinstance(x, Element) else None for group, (axiswise, framewise) in norm_opts.items(): elements = [] # Skip if ranges are cached or already computed by a # higher-level container object. framewise = framewise or self.dynamic if group in ranges and (not framewise or ranges is not self.ranges): continue elif not framewise: # Traverse to get all elements elements = obj.traverse(return_fn, [group]) elif key is not None: # Traverse to get elements for each frame frame = self._get_frame(key) elements = [] if frame is None else frame.traverse(return_fn, [group]) if not axiswise or ((not framewise or len(elements) == 1) and isinstance(obj, HoloMap)): # Compute new ranges self._compute_group_range(group, elements, ranges) self.ranges.update(ranges) return ranges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_norm_opts(self, obj):\n norm_opts = {}\n\n # Get all elements' type.group.label specs and ids\n type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False),\n util.label_sanitizer(x.label, escape=False))) \\\n if isinstance(x, Element) else None\n element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn)\n if idspec is not None}\n\n # Group elements specs by ID and override normalization\n # options sequentially\n key_fn = lambda x: -1 if x[0] is None else x[0]\n id_groups = groupby(sorted(element_specs, key=key_fn), key_fn)\n for gid, element_spec_group in id_groups:\n gid = None if gid == -1 else gid\n group_specs = [el for _, el in element_spec_group]\n\n backend = self.renderer.backend\n optstree = Store.custom_options(\n backend=backend).get(gid, Store.options(backend=backend))\n # Get the normalization options for the current id\n # and match against customizable elements\n for opts in optstree:\n path = tuple(opts.path.split('.')[1:])\n applies = any(path == spec[:i] for spec in group_specs\n for i in range(1, 4))\n if applies and 'norm' in opts.groups:\n nopts = opts['norm'].options\n if 'axiswise' in nopts or 'framewise' in nopts:\n norm_opts.update({path: (nopts.get('axiswise', False),\n nopts.get('framewise', False))})\n element_specs = [spec for _, spec in element_specs]\n norm_opts.update({spec: (False, False) for spec in element_specs\n if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))})\n return norm_opts", "def normalize_all_data_in_dict(data: Data_dict_type, normalizers: Tuple[object, ...]) -> Data_dict_type:\n for key, item in data.items():\n values, sample_rate = item\n # save old shape and reshape data to supported format for normalizer\n old_shape = values.shape\n values = values.reshape((-1, values.shape[-1]))\n # normalize data\n for normalizer in normalizers:\n values = normalizer.transform(values)\n # Reshape data back to old shape\n values = values.reshape(old_shape)\n data[key] = (values, sample_rate)\n return data", "def _apply_compositor(self, holomap, ranges=None, keys=None, dimensions=None):\n # Compute framewise normalization\n defaultdim = holomap.ndims == 1 and holomap.kdims[0].name != 'Frame'\n\n if keys and ranges and dimensions and not defaultdim:\n dim_inds = [dimensions.index(d) for d in holomap.kdims]\n sliced_keys = [tuple(k[i] for i in dim_inds) for k in keys]\n frame_ranges = OrderedDict([(slckey, self.compute_ranges(holomap, key, ranges[key]))\n for key, slckey in zip(keys, sliced_keys) if slckey in holomap.data.keys()])\n else:\n mapwise_ranges = self.compute_ranges(holomap, None, None)\n frame_ranges = OrderedDict([(key, self.compute_ranges(holomap, key, mapwise_ranges))\n for key in holomap.keys()])\n ranges = frame_ranges.values()\n\n return Compositor.collapse(holomap, (ranges, frame_ranges.keys()), mode='display')", "def all_gizmo_to_group():\n\n for n in nuke.allNodes():\n # Avoid scripted gizmo.\n if nuke.knobChangeds.get(n.Class()):\n continue\n\n gizmo_to_group(n)", "def scan_range(self, obj):\n detect_minmax = []\n for item in self._category:\n cat = item.replace(' ', '')\n has_minmax = False\n for k, v in obj.items():\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(in_v.items())[-1]\n \n if has_minmax:\n detect_minmax.append('Min ' + item)\n detect_minmax.append('Max ' + item)\n else:\n detect_minmax.append(item)\n \n self._category_aux = detect_minmax\n for c in self._category_aux:\n self._data[c] = []", "def _init_group_dicts(self):\n\n all_groups = set()\n\n for detection in config['detections'].values():\n if 'action' in detection and detection['action'] == 'buy':\n if 'groups' in detection:\n for group in detection['groups']:\n all_groups.add(group)\n\n for group in all_groups:\n self.trade_sizes[group] = config['trade_min_size']\n self.trade_proceeds[group] = {}\n\n self.trade_sizes['default'] = config['trade_min_size']\n self.trade_proceeds['default'] = {}", "def recalculate_groups(dynamic_list):\n new_final_list = []\n for page in dynamic_list:\n page_list = []\n for group in page:\n new_dict = {}\n lowest_x = 100\n highest_x = 0\n for line in group:\n # find if x is the lowest or highest\n if line['bbox'][0] < lowest_x:\n lowest_x = line['bbox'][0]\n if line['bbox'][2] > highest_x:\n highest_x = line['bbox'][2]\n x_one = lowest_x\n x_two = highest_x\n y_one = group[0]['bbox'][1]\n y_two = h.get_biggest_y(group)\n new_bbox = [x_one, y_one, x_two, y_two]\n new_dict['bbox'] = new_bbox\n new_dict['font'] = h.find_font_in_group(group)\n new_dict['size'] = h.find_font_size_in_group(group)\n new_dict['page'] = group[0]['page']\n new_dict['lines'] = group\n\n page_list.append(new_dict)\n\n new_final_list.append(page_list)\n return new_final_list", "def group_normalize(strokes):\n\n long_stroke = concat(strokes)\n x_min = min(long_stroke.x)\n x_max = max(long_stroke.x)\n y_min = min(long_stroke.y)\n y_max = max(long_stroke.y)\n x_range = float(x_max - x_min)\n y_range = float(y_max - y_min)\n normalized_strokes = []\n for stroke in strokes:\n x = ((np.array(stroke.x) - x_min) / x_range).tolist()\n y = ((np.array(stroke.y) - y_min) / y_range).tolist()\n normalized_strokes.append(Stroke(x, y))\n return normalized_strokes", "def get_entityset_ranges(my_core, meshset, geom_dim):\n\n entityset_ranges = {}\n entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes']\n for dimension, set_type in enumerate(entityset_types):\n entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim,\n [dimension])\n return entityset_ranges", "def group_normalize_wrt_max(strokes):\n\n long_stroke = concat(strokes)\n x_min = min(long_stroke.x)\n x_max = max(long_stroke.x)\n y_min = min(long_stroke.y)\n x_range = float(x_max - x_min)\n y_range = float(y_max - y_min)\n max_range = max(x_range, y_range)\n normalized_strokes = []\n for stroke in strokes:\n x = ((np.array(stroke.x) - x_min) / max_range).tolist()\n y = ((np.array(stroke.y) - y_min) / max_range).tolist()\n normalized_strokes.append(Stroke(x, y))\n return normalized_strokes", "def _normalize(self, value_dict):\n median = np.median([value_dict[i] for i in list(value_dict.keys())])\n n = len(value_dict.keys())\n if median < 1.0 / float(n):\n divisor = 1.0 / float(n)\n else:\n divisor = median\n return_dict = {}\n for i in list(value_dict.keys()):\n return_dict[i] = float(value_dict[i]) / float(divisor)\n return return_dict", "def __call__(self, results):\n\n for key in results.get('seg_fields', []):\n if self.scale_factor != 1:\n results[key] = general_ocr.imrescale(\n results[key],\n self.scale_factor,\n interpolation='nearest',\n backend=self.backend)\n return results", "def normalize_other_inputs(X, Args):\n other_keys = list(X.keys())\n other_keys.remove(\"blend_image\")\n for key in other_keys:\n X[key] = (X[key] - np.mean(X[key])) / np.std(X[key])\n if Args.model == \"orchid\":\n loc_im = np.zeros_like(X[other_keys[0]])\n for i, key in enumerate(other_keys):\n im = X.pop(key)\n maximum = np.min((im.max(axis=2).max(axis=1)))\n im[im < maximum / 1.5] = 0\n im[im >= maximum / 1.5] = i + 1\n loc_im += im\n X['loc_im'] = loc_im\n return X", "def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True):\n def lookup(x):\n \"\"\"\n Looks up options for object, including plot defaults,\n keyfn determines returned key otherwise None key is used.\n \"\"\"\n options = cls.lookup_options(x, opt_type)\n selected = {o: options.options[o]\n for o in opts if o in options.options}\n if opt_type == 'plot' and defaults:\n plot = Store.registry[cls.backend].get(type(x))\n selected['defaults'] = {o: getattr(plot, o) for o in opts\n if o not in selected and hasattr(plot, o)}\n key = keyfn(x) if keyfn else None\n return (key, selected)\n\n # Traverse object and accumulate options by key\n traversed = obj.traverse(lookup, specs)\n options = defaultdict(lambda: defaultdict(list))\n default_opts = defaultdict(lambda: defaultdict(list)) \n for key, opts in traversed:\n defaults = opts.pop('defaults', {})\n for opt, v in opts.items():\n options[key][opt].append(v)\n for opt, v in defaults.items():\n default_opts[key][opt].append(v)\n\n # Merge defaults into dictionary if not explicitly specified\n for key, opts in default_opts.items():\n for opt, v in opts.items():\n if opt not in options[key]:\n options[key][opt] = v\n return options if keyfn else options[None]", "def _get_cmap_normalisation(self):\n\n # Non-normalisable data should return cmap = None\n if not self.normalisable:\n return\n\n # Get min/max based upon ZScale with contrast parameter\n contrast = self.options.get('contrast', 0.2)\n vmin, vmax = ZScaleInterval(contrast=contrast).get_limits(self.data)\n\n # Make this symmetric if using Stokes V\n if self.stokes == 'v':\n v = max(abs(vmin), abs(vmax))\n vmin = -v\n vmax = v\n\n # Override with user-supplied values if present\n if self.options.get('vmin') or self.options.get('vmax'):\n vmin = self.options.get('vmin', -2)\n vmax = self.options.get('vmax', 1)\n\n # Normalise with maximum value in data\n if self.options.get('maxnorm'):\n vmax = np.nanmax(self.data)\n vmin = None\n\n norm = ImageNormalize(\n self.data,\n interval=ZScaleInterval(),\n vmin=vmin,\n vmax=vmax,\n clip=True\n )\n\n return norm", "def __iter__(self):\n for key in self._group._opts.keys():\n yield key", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def transform(self, applyfunc):\n result = self.obj.copy()\n\n for name, group in self:\n # XXX\n group.groupName = name\n res = applyfunc(group)\n\n indexer, _ = common.get_indexer(self.obj.index, group.index, None)\n np.put(result, indexer, res)\n\n return result", "def preprocess_data(self):\n\n selected_data = []\n selected_name = []\n quant_norm_applied = []\n\n rgb_color_to_keys = self.get_rgb_items_for_plot()\n for data_key in rgb_color_to_keys.values():\n if data_key in self.dict_to_plot:\n selected_name.append(data_key)\n\n if self.scaler_data is not None:\n if np.count_nonzero(self.scaler_data) == 0:\n logger.warning(\"scaler is zero - scaling was not applied\")\n elif len(self.scaler_data[self.scaler_data == 0]) > 0:\n logger.warning(\"scaler data has zero values\")\n\n for i, k in enumerate(selected_name):\n q_norm_applied = False\n if self.quantitative_normalization:\n # Quantitative normalization\n (\n data_arr,\n q_norm_applied,\n ) = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(\n data_in=self.dict_to_plot[k],\n scaler_dict=self.scaler_norm_dict,\n scaler_name_default=self.get_selected_scaler_name(),\n data_name=k,\n ref_name=self.quantitative_ref_eline,\n name_not_scalable=self.name_not_scalable,\n )\n else:\n # Normalize by the selected scaler in a regular way\n data_arr = normalize_data_by_scaler(\n data_in=self.dict_to_plot[k],\n scaler=self.scaler_data,\n data_name=k,\n name_not_scalable=self.name_not_scalable,\n )\n\n selected_data.append(data_arr)\n quant_norm_applied.append(q_norm_applied)\n\n return selected_data, selected_name, rgb_color_to_keys, quant_norm_applied", "def normalize(self, argumentMap):\r\n method = moduleName + '.' + self.className + '.' + 'normalize'\r\n global graphAPI\r\n \r\n #Create a list of indices that we'll use later to control a for loop. flowControlList governs the value j\r\n # in the inner loop. It contains a list of bucket indices. flowControlList will be destructively evaluated\r\n # by removing i at each iteration of the outer loop, so we want to start a copy of the key list. \r\n flowControlList = list(self.buckets.keys())\r\n \r\n for indexKey in sorted(self.buckets.keys()):\r\n #Outer loop,from i to k\r\n try:\r\n flowControlList.remove(indexKey)\r\n if len(flowControlList) >= 1:\r\n #Take the agents from the current bucket.\r\n # Iterate over the rest to make sure that the agents from the current bucket don't appear in any\r\n stimulusProfile = self.buckets[indexKey]\r\n try:\r\n if len(stimulusProfile.agentSet) > 0:\r\n for flowControlListKey in flowControlList:\r\n #inner loop,from j to k\r\n nextStimulusProfile = self.buckets[flowControlListKey]\r\n nextStimulusProfile.agentSet.difference_update(stimulusProfile.agentSet)\r\n except Exception as e:\r\n stimulusMeme = graphAPI.getEntityMemeType(stimulusProfile.stimulusID)\r\n errorMsg = \"Can't disentangle lower prio conditional stimulus %s agent set from higher prio agent set. Traceback = %s\" %(stimulusMeme,e)\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n except Exception as e:\r\n errorMsg = \"\"\r\n try:\r\n remaining = len(flowControlList)\r\n stimulusProfile = self.buckets[indexKey]\r\n stimulusMeme = graphAPI.getEntityMemeType(stimulusProfile.stimulusID)\r\n errorMsg = \"Can't normalize conditional stimulus %s agent set with regard to lower prio stimuli. %s lower prio stimuli unnormalized. Traceback = %s\" %(stimulusMeme, remaining, e)\r\n except Exception as ee:\r\n errorMsg = \"Unexpected error %s occurred while trying to normalized conditional stimulus set. Traceback = %s\" %(ee, e)\r\n finally:\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])", "def normalization(obj):\n dic = obj.mainfield.para_dict.copy()\n for item in obj.forfield: dic.update(item.para_dict)\n for item in obj.existfield: dic.update(item.para_dict)\n\n global_dic = number_type(dic)\n obj.normal_guards = norm_rep(global_dic, obj.all_sentence)\n\n main_dic = number_type(obj.mainfield.para_dict)\n obj.mainfield.content = norm_rep(main_dic, obj.mainfield.content)\n\n for index in range(len(obj.forfield)):\n obj.forfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # for_dic = number_type(temp_dic)\n obj.forfield[index].content = norm_rep(global_dic, obj.forfield[index].content)\n print(global_dic, obj.forfield[index].para_dict)\n obj.forfield[index].para_dict = pair_2_dict(global_dic, obj.forfield[index].para_dict)\n\n for index in range(len(obj.existfield)):\n obj.existfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # exist_dic = number_type(temp_dic)\n obj.existfield[index].content = norm_rep(global_dic, obj.existfield[index].content)\n obj.existfield[index].para_dict = pair_2_dict(global_dic, obj.existfield[index].para_dict)\n\n # change para_dict: {'i':'NODE} -> {'NODE_1', 'NODE'}\n obj.mainfield.para_dict = pair_2_dict(global_dic, obj.mainfield.para_dict)", "def rf_extents(rf_dict):\n x_min, y_min, x_max, y_max = np.inf, np.inf, -np.inf, -np.inf\n for rf in rf_dict:\n x_min = np.min([rf['on_center_x'], x_min])\n x_max = np.max([rf['on_center_x'], x_max])\n y_min = np.min([rf['on_center_y'], y_min])\n y_max = np.max([rf['on_center_y'], y_max])\n if x_min == x_max:\n x_max += 1\n if y_min == y_max:\n y_max += 1\n return {\n 'x_min': x_min,\n 'x_max': x_max,\n 'y_min': y_min,\n 'y_max': y_max\n }", "def groups(self):\n\n\t\tprint \"completed minimization\"\n\t\tcopy(self.rootdir+'counterions-minimized.gro',self.rootdir+'system.gro')\n\t\tcopy(self.rootdir+'counterions.top',self.rootdir+'system.top')\n\t\tif self.simscale == 'aamd': grouptype = 'standard'\n\t\tif self.simscale == 'cgmd': grouptype = 'cgmd_water'\n\t\tself.grouping(grouptype=grouptype)", "def __call__(self, results):\n for key in results.get('img_fields', ['img']):\n results[key] = general_ocr.imnormalize(results[key], self.mean, self.std,\n self.to_rgb)\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n return results", "def normalize(self, redraw=True):\n if self.screen_rect is not None:\n self.relative_sizes = []\n\n height = self.screen_rect.height\n left, right = self._get_columns()\n\n if left.count > 0:\n self.relative_sizes += self._split_integer(height, left.count)\n if right.count > 0:\n self.relative_sizes += self._split_integer(height, right.count)\n\n if redraw:\n self.group.layout_all()\n self.do_normalize = False", "def _group_objects(list_, attr=None, key=None, default=None,\n minimum=MIN_GROUPED):\n if not bool(attr) ^ bool(key):\n raise AttributeError(\"Either an attribute or a key must be specified.\")\n\n name = \"A-Z\" if default is None else default\n groups = collections.defaultdict(list)\n\n if list_ and (minimum is None or len(list_) > minimum):\n for item in list_:\n value = getattr(item, attr) if attr is not None else item[key]\n letter = value[0].upper()\n if letter not in string.ascii_uppercase:\n groups[\"#\"].append(item)\n else:\n groups[letter].append(item)\n elif list_:\n groups[name] = list_\n\n return groups", "def group_normalize_wrt_x(strokes):\n\n long_stroke = concat(strokes)\n x_min = min(long_stroke.x)\n x_max = max(long_stroke.x)\n y_min = min(long_stroke.y)\n x_range = float(x_max - x_min)\n normalized_strokes = []\n for stroke in strokes:\n x = ((np.array(stroke.x) - x_min) / x_range).tolist()\n y = ((np.array(stroke.y) - y_min) / x_range).tolist()\n normalized_strokes.append(Stroke(x, y))\n return normalized_strokes", "def _resize_seg(self, results):\n for key in results.get('seg_fields', []):\n if self.keep_ratio:\n gt_seg = mmcv.imrescale(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n else:\n gt_seg = mmcv.imresize(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n results[key] = gt_seg", "def groupCtrl (sel=None):\n if sel==None:\n sel = pm.ls(sl =1)\n listGrp = []\n for obj in sel:\n rotOrder = pm.getAttr (obj+'.rotateOrder')\n names = obj.split('_')\n\n if len(names) > 3:\n auto = obj.replace( names[-1], '%s_AUTO'%( pm.util.capitalize( names[-1] ) ) )\n zero = obj.replace( names[-1], '%s_ZERO'%( pm.util.capitalize( names[-1] ) ) )\n\n elif len(names)==3:\n cap = pm.util.capitalize( names[2] )\n auto = names[0]+'_'+names[1]+cap+'_AUTO'\n zero = names[0]+'_'+names[1]+cap+'_ZERO'\n\n elif len(names)<=2:\n auto = obj+'_AUTO'\n zero = obj+'_ZERO'\n\n auto = pm.group (em=1, n=auto )\n pm.setAttr ( auto+'.rotateOrder', rotOrder)\n\n zero = pm.group (em=1, n=zero )\n pm.setAttr (zero+'.rotateOrder', rotOrder)\n # create attr Mem\n if pm.objExists(obj+'.AUTO')==0:\n pm.addAttr (obj,ln ='AUTO', dt ='string')\n\n pm.setAttr (obj+'.AUTO' ,auto ,k=0, l=0, type ='string')\n\n if pm.objExists(obj+'.ZERO')==0:\n pm.addAttr (obj,ln ='ZERO', dt ='string')\n\n pm.setAttr (obj+'.ZERO' ,zero ,k=0, l=0, type ='string')\n # ZERO obj\n pm.addAttr (zero,ln ='obj', dt ='string')\n pm.setAttr (zero+'.obj' ,obj ,k=0, l=0, type ='string')\n # AUTO obj\n pm.addAttr (auto,ln ='obj', dt ='string')\n pm.setAttr (auto+'.obj' ,obj ,k=0, l=0, type ='string')\n #check parent of selectObj\n listParent = pm.listRelatives (obj ,p=1,typ='transform')\n #if num==0, do nothing or if num==1 do parent\n if len(listParent)==1 :\n #print 'yes'\n pm.parent (zero , listParent[0] )\n # match positionand rotation\n pm.delete (pm.pointConstraint ( obj , auto))\n pm.delete (pm.orientConstraint ( obj , auto))\n pm.delete (pm.pointConstraint ( obj , zero))\n pm.delete (pm.orientConstraint ( obj , zero))\n pm.parent ( auto , zero)\n pm.parent ( obj , auto )\n # set nonKeyAble\n grp = [zero , auto , obj]\n listGrp.append(grp)\n #print grp\n return listGrp", "def _parse_groupped_data(self):\n for i, val in enumerate(self.values.keys()):\n xy = self.values[val]\n self._set_and_get(\"x_\", val, xy[:, 0])\n self._set_and_get(\"y_\", val, xy[:, 1])" ]
[ "0.6445774", "0.48843622", "0.4852263", "0.48259893", "0.4770748", "0.47455063", "0.4726611", "0.4720005", "0.4644569", "0.46267", "0.45563284", "0.4543702", "0.454177", "0.45273983", "0.4494524", "0.4494449", "0.44733185", "0.44697282", "0.44692782", "0.44428465", "0.44359696", "0.4435475", "0.44344687", "0.44155663", "0.4409761", "0.43901405", "0.43896997", "0.4389262", "0.43817148", "0.43754458" ]
0.689226
0
Gets the normalization options for a LabelledData object by traversing the object for to find elements and their ids. The id is then used to select the appropriate OptionsTree, accumulating the normalization options into a dictionary. Returns a dictionary of normalization options for each element in the tree.
def _get_norm_opts(self, obj): norm_opts = {} # Get all elements' type.group.label specs and ids type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False), util.label_sanitizer(x.label, escape=False))) \ if isinstance(x, Element) else None element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn) if idspec is not None} # Group elements specs by ID and override normalization # options sequentially key_fn = lambda x: -1 if x[0] is None else x[0] id_groups = groupby(sorted(element_specs, key=key_fn), key_fn) for gid, element_spec_group in id_groups: gid = None if gid == -1 else gid group_specs = [el for _, el in element_spec_group] backend = self.renderer.backend optstree = Store.custom_options( backend=backend).get(gid, Store.options(backend=backend)) # Get the normalization options for the current id # and match against customizable elements for opts in optstree: path = tuple(opts.path.split('.')[1:]) applies = any(path == spec[:i] for spec in group_specs for i in range(1, 4)) if applies and 'norm' in opts.groups: nopts = opts['norm'].options if 'axiswise' in nopts or 'framewise' in nopts: norm_opts.update({path: (nopts.get('axiswise', False), nopts.get('framewise', False))}) element_specs = [spec for _, spec in element_specs] norm_opts.update({spec: (False, False) for spec in element_specs if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))}) return norm_opts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dictize(self):\n dict = {}\n for node in self.sort():\n logger.debug(\"Dictize: id %s has name %s\" % (node._id, node.name))\n x = node._kwargs()\n dict[node._id]={\"klass\":node.__class__.__name__, \n \"kwargs\": x,\n \"children\":[child._id for child in node.children()]}\n return dict", "def collect_children_by_id(self):\n self.children_by_id = {}\n self.root_by_id = {}\n self.ns_for_root_id = {}\n\n def recursive_fill_root_id(entry):\n root_id = self.root_by_id.get(entry.mount_id)\n if root_id is not None:\n return root_id\n\n if entry.parent_id == entry.mount_id:\n # self-referencing is a root\n root_id = entry.mount_id\n self.root_by_id[root_id] = root_id\n return root_id\n\n parent_entry = self.items.get(entry.parent_id)\n if parent_entry is None:\n # The parent is unknown, so it is an implicit root\n root_id = entry.mount_id\n self.root_by_id[root_id] = root_id\n return root_id\n\n root_id = recursive_fill_root_id(parent_entry)\n self.root_by_id[entry.mount_id] = root_id\n return root_id\n\n for entry in self.items.values():\n if entry.parent_id not in self.children_by_id:\n self.children_by_id[entry.parent_id] = {}\n self.children_by_id[entry.parent_id][entry.mount_id] = entry.abs_mount_point(no_question=True)\n root_id = recursive_fill_root_id(entry)\n if root_id not in self.ns_for_root_id:\n self.ns_for_root_id[root_id] = set()\n self.ns_for_root_id[root_id].add(entry.mount_ns)\n\n # Sanity check\n assert len(self.items) == len(self.root_by_id)", "def clean(self, data: BaseModel, id: Optional[str] = None) -> Dict[str, Any]:\n r = {}\n for name, value in data.dict().items():\n for fn in self.cleaners[name]:\n value = fn(value)\n r[name] = value\n return r", "def get_descendant_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def _mapping(self, pdb_id, loop_type, normalizer):\n\n mapping = {}\n with self.session() as session:\n query = self.query(session, pdb_id).filter_by(type=loop_type)\n for result in query:\n unit_ids = normalizer(result.unit_ids)\n if unit_ids in mapping:\n self.logger.error(\"Loop %s duplicates %s\",\n result.loop_id, mapping[unit_ids])\n continue\n mapping[unit_ids] = result.loop_id\n return mapping", "def normalize_features(self, data_dict, ind):\n pre_norm_list = []\n for title in data_dict:\n pre_norm_list.append(data_dict[title][ind])\n if self.normalization_method == 'min_max':\n mini, maxi, norm_list = normalize.min_max_normalize(pre_norm_list)\n self.normalization_n.append(mini)\n self.normalization_d.append(maxi - mini)\n elif self.normalization_method == 'z_score':\n mean, var, norm_list = normalize.z_score_normalize(pre_norm_list)\n self.normalization_n.append(mean)\n self.normalization_d.append(var)\n elif self.normalization_method == 'none':\n norm_list = pre_norm_list[:]\n self.normalization_n.append(0)\n self.normalization_d.append(1)\n for i, title in enumerate(data_dict):\n data_dict[title][ind] = norm_list[i]", "def normalization(obj):\n dic = obj.mainfield.para_dict.copy()\n for item in obj.forfield: dic.update(item.para_dict)\n for item in obj.existfield: dic.update(item.para_dict)\n\n global_dic = number_type(dic)\n obj.normal_guards = norm_rep(global_dic, obj.all_sentence)\n\n main_dic = number_type(obj.mainfield.para_dict)\n obj.mainfield.content = norm_rep(main_dic, obj.mainfield.content)\n\n for index in range(len(obj.forfield)):\n obj.forfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # for_dic = number_type(temp_dic)\n obj.forfield[index].content = norm_rep(global_dic, obj.forfield[index].content)\n print(global_dic, obj.forfield[index].para_dict)\n obj.forfield[index].para_dict = pair_2_dict(global_dic, obj.forfield[index].para_dict)\n\n for index in range(len(obj.existfield)):\n obj.existfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # exist_dic = number_type(temp_dic)\n obj.existfield[index].content = norm_rep(global_dic, obj.existfield[index].content)\n obj.existfield[index].para_dict = pair_2_dict(global_dic, obj.existfield[index].para_dict)\n\n # change para_dict: {'i':'NODE} -> {'NODE_1', 'NODE'}\n obj.mainfield.para_dict = pair_2_dict(global_dic, obj.mainfield.para_dict)", "def get_list_by_id(driver, id):\n try:\n flat_list = [\n a.get_attribute(\"value\")\n for a in driver.find_element_by_id(id).find_elements_by_tag_name(\"option\")\n ]\n return flat_list\n except Exception:\n return []", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def normalize(ds, config):\n logger.info(\"Applying normalization with config:\\n %s\", _dict_to_logstring(config))\n\n key = config[\"key\"]\n def _normalize(x):\n return dict(x, **{key: features.cmvn(x[key], **config.get(\"kwargs\", {}))})\n\n return (ds.batch(config.get(\"batch_size\", 1))\n .map(_normalize, num_parallel_calls=TF_AUTOTUNE)\n .unbatch())", "def words_to_word_ids(data, word_to_id):\n # if isinstance(data[0], six.string_types):\n # print(type(data[0]))\n # # exit()\n # print(data[0])\n # print(word_to_id)\n # return [word_to_id[str(word)] for word in data]\n # else:\n return [word_to_id[word] for word in data]\n\n # if isinstance(data[0], str):\n # # print('is a string object')\n # return [word_to_id[word] for word in data]\n # else:#if isinstance(s, bytes):\n # # print('is a unicode object')\n # # print(data[0])\n # return [word_to_id[str(word)] f", "def flatten(orig):\n\n\t# Empty dictionary\n\tdata = {}\n\tfor c in orig['tree']['children']:\n\t\t# in operator\n\t\tif 'children' in c:\n\t\t\tfor c2 in c['children']:\n\t\t\t\tif 'children' in c2:\n\t\t\t\t\tfor c3 in c2['children']:\n\t\t\t\t\t\tif 'children' in c3:\n\t\t\t\t\t\t\tfor c4 in c3['children']:\n\t\t\t\t\t\t\t\tif (c4['category'] == 'personality'):\n\t\t\t\t\t\t\t\t\tdata[c4['id']] = c4['percentage']\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif (c3['category'] == 'personality'):\n\t\t\t\t\t\t\t\tdata[c3['id']] = c3['percentage']\n\n\treturn data", "def unflatten_to_tree(df, label_map=None, label_col='label', id_col='id'):\r\n\r\n tf_df = df.filter(like='level')\r\n n_lvls = len(tf_df.columns)\r\n lvl_list = range(n_lvls)\r\n\r\n # Construct all nodes\r\n uniq_ids = pd.Series(pd.unique(tf_df.values.ravel()))\r\n uniq_ids = uniq_ids.dropna()\r\n\r\n if label_map is not None:\r\n assert len(set(uniq_ids)-set(label_map[label_col].unique()))==0, '''\r\n If a label_map is specified, all labels in df must\r\n be present in the map '''\r\n rdict = { r[label_col]: r[id_col] for i, r in label_map.iterrows() }\r\n tf_df = tf_df.replace(rdict)\r\n uniq_ids = pd.Series(pd.unique(tf_df.values.ravel()))\r\n uniq_ids = uniq_ids.dropna()\r\n uniq_ids = uniq_ids.astype('int')\r\n\r\n assert len(tf_df['level_0'].unique())==1, '''there can only be\r\n one level_0 id'''\r\n root_id = tf_df['level_0'].unique()[0]\r\n\r\n nodes = {}\r\n for nid in uniq_ids:\r\n nodes[nid] = Node(nid, {}, None)\r\n\r\n # Make relationships\r\n for i in lvl_list:\r\n lvl_col = 'level_%s' % i\r\n nxtlvl_col = 'level_%s' % (i+1)\r\n assert ~tf_df[lvl_col].isin(tf_df.drop(lvl_col, axis=1)).any(), '''\r\n ids cannot span multiple levels'''\r\n\r\n if i<lvl_list[-1]:\r\n for pnid in tf_df[lvl_col].unique():\r\n child_locs = pd.Series(tf_df.ix[tf_df[lvl_col]==pnid,\r\n nxtlvl_col].unique()).dropna()\r\n for cnid in child_locs:\r\n nodes[cnid].parent = nodes[pnid]\r\n nodes[pnid].add_child(nodes[cnid])\r\n\r\n t = Tree(nodes[root_id])\r\n return t", "def get_label_set(corpusID):\n existing_corpus = DBcorpus.query.get_or_404(corpusID)\n corpus_data = fix_corpus_format(CorpusSchema().dump(existing_corpus).data)\n\n results = []\n for label in labels_set(existing_corpus):\n results.append(LabelSchema().dump(label).data)\n\n return {\"corpus\": corpus_data, \"labels\": results }, 200", "def get_descendant_objective_bank_id_terms(self):\n return # osid.search.terms.IdTerm", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def id_forms(self, pydic_id):\n try:\n return self.dictionaries[pydic_id.dict].id_forms(pydic_id)\n except (ValueError, KeyError):\n return None", "def preprocess_data(self):\n\n selected_data = []\n selected_name = []\n quant_norm_applied = []\n\n rgb_color_to_keys = self.get_rgb_items_for_plot()\n for data_key in rgb_color_to_keys.values():\n if data_key in self.dict_to_plot:\n selected_name.append(data_key)\n\n if self.scaler_data is not None:\n if np.count_nonzero(self.scaler_data) == 0:\n logger.warning(\"scaler is zero - scaling was not applied\")\n elif len(self.scaler_data[self.scaler_data == 0]) > 0:\n logger.warning(\"scaler data has zero values\")\n\n for i, k in enumerate(selected_name):\n q_norm_applied = False\n if self.quantitative_normalization:\n # Quantitative normalization\n (\n data_arr,\n q_norm_applied,\n ) = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(\n data_in=self.dict_to_plot[k],\n scaler_dict=self.scaler_norm_dict,\n scaler_name_default=self.get_selected_scaler_name(),\n data_name=k,\n ref_name=self.quantitative_ref_eline,\n name_not_scalable=self.name_not_scalable,\n )\n else:\n # Normalize by the selected scaler in a regular way\n data_arr = normalize_data_by_scaler(\n data_in=self.dict_to_plot[k],\n scaler=self.scaler_data,\n data_name=k,\n name_not_scalable=self.name_not_scalable,\n )\n\n selected_data.append(data_arr)\n quant_norm_applied.append(q_norm_applied)\n\n return selected_data, selected_name, rgb_color_to_keys, quant_norm_applied", "def getAttributsByIdref(self, id) :\n\t\t# if id in self.lid.keys() :\n\t\t# \treturn self.lid[id]\n\t\t# else :\n\t\treturn self._getIdrefs(self.doc.documentElement, id)", "def get_ancestor_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def get_translated_ids(id):", "def get_descendant_agency_id_terms(self):\n return # osid.search.terms.IdTerm", "def compute_ranges(self, obj, key, ranges):\n all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element]))\n if obj is None or not self.normalize or all_table:\n return OrderedDict()\n # Get inherited ranges\n ranges = self.ranges if ranges is None else dict(ranges)\n\n # Get element identifiers from current object and resolve\n # with selected normalization options\n norm_opts = self._get_norm_opts(obj)\n\n # Traverse displayed object if normalization applies\n # at this level, and ranges for the group have not\n # been supplied from a composite plot\n return_fn = lambda x: x if isinstance(x, Element) else None\n for group, (axiswise, framewise) in norm_opts.items():\n elements = []\n # Skip if ranges are cached or already computed by a\n # higher-level container object.\n framewise = framewise or self.dynamic\n if group in ranges and (not framewise or ranges is not self.ranges):\n continue\n elif not framewise: # Traverse to get all elements\n elements = obj.traverse(return_fn, [group])\n elif key is not None: # Traverse to get elements for each frame\n frame = self._get_frame(key)\n elements = [] if frame is None else frame.traverse(return_fn, [group])\n if not axiswise or ((not framewise or len(elements) == 1)\n and isinstance(obj, HoloMap)): # Compute new ranges\n self._compute_group_range(group, elements, ranges)\n self.ranges.update(ranges)\n return ranges", "def get_level_id_terms(self):\n return # osid.search.terms.IdTerm", "def labels_set(self):\n if len(self.children) == 0:\n return {self.label}\n else:\n children_labels = set()\n for c in self.children:\n children_labels = children_labels | c.labels_set()\n return set([self.label]) | children_labels", "def get_equivalent_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def prepare_value(self, data):\n\n if data is None:\n data = {}\n\n relations = self.instance.get_related(self.relation_type)\n\n relations = [x for x in relations if x not in data.get('rm', [])]\n\n relations += data.get('add', [])\n\n return [{'label': rel.title, 'value': object_to_urn(rel)} for rel in\n relations if rel]", "def build_hierarchy_from_id_lookup(id_lookup_file=\"idlookups.csv\"):\n df_id_lookups = pd.read_csv(id_lookup_file, index_col=0)\n\n # The naming convention separates layers of the hierarchy with a colon ':', so we can break this into a list of descendents, and calculate the depth of the tree.\n df_id_lookups[\"parsed_name\"] = df_id_lookups.name.apply(lambda s: s.split(\": \"))\n df_id_lookups[\"depth\"] = df_id_lookups.parsed_name.apply(lambda d: len(d))\n\n # The two top nodes \"Biota\" and \"Physical\" are not prepended to their children, so we need to do this manually.\n # Manually define biota and physical children\n biota_kids = [\n \"Worms\",\n \"Sponges\",\n \"Seagrasses\",\n \"Molluscs\",\n \"Macroalgae\",\n \"Jellies\",\n \"Fishes\",\n \"Echinoderms\",\n \"Crustacea\",\n \"Cnidaria\",\n \"Bryozoa\",\n \"Bioturbation\",\n \"Bacterial mats\",\n \"Ascidians\",\n ]\n\n physical_kids = [\"Substrate\"]\n\n # Prepend them to name lists, and add to depth.\n biota_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in biota_kids)\n df_id_lookups.loc[biota_inds, \"depth\"] += 1\n df_id_lookups.loc[biota_inds, \"parsed_name\"] = df_id_lookups.loc[biota_inds, \"parsed_name\"].apply(\n lambda d: [\"Biota\"] + d\n )\n\n physical_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in physical_kids)\n df_id_lookups.loc[physical_inds, \"depth\"] += 1\n df_id_lookups.loc[physical_inds, \"parsed_name\"] = df_id_lookups.loc[physical_inds, \"parsed_name\"].apply(\n lambda d: [\"Physical\"] + d\n )\n\n # Create columns for ancestor and descendant lists.\n df_id_lookups[\"child_name\"] = df_id_lookups.parsed_name.apply(lambda d: d[-1])\n\n df_id_lookups[\"ancestor_id_list\"] = [get_ancestor_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n df_id_lookups[\"descendant_id_list\"] = [get_descendant_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n # Create a multilabel, one hot encoded bit vector for each class, taking into account the hierarchy of ancestors, and unspecified descendants.\n # We now want to represent this class hierarchy as a bit-vector. Each class index has a unique bit in the vector. A root level class will turn on a single bit. A depth 4 class will turn on 4 bits.\n df_id_lookups[\"bit_vector\"] = [get_bit_vector(d, df_id_lookups) for d in df_id_lookups.index]\n df_id_lookups\n\n return df_id_lookups", "def word_ids_to_words(data, id_to_word):\n return [id_to_word[i] for i in data]" ]
[ "0.4882127", "0.48138362", "0.46735653", "0.46150512", "0.45790586", "0.44817707", "0.44758487", "0.4465512", "0.442252", "0.44222006", "0.43991864", "0.43809223", "0.4366402", "0.43599775", "0.4356347", "0.43552524", "0.43511787", "0.42940468", "0.42261603", "0.42139208", "0.41993126", "0.416335", "0.414635", "0.41318846", "0.41266203", "0.41098994", "0.40977052", "0.4061388", "0.40588298", "0.4056097" ]
0.6927882
0
Traverses the supplied object getting all options in opts for the specified opt_type and specs. Also takes into account the plotting class defaults for plot options. If a keyfn is supplied the returned options will be grouped by the returned keys.
def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True): def lookup(x): """ Looks up options for object, including plot defaults, keyfn determines returned key otherwise None key is used. """ options = cls.lookup_options(x, opt_type) selected = {o: options.options[o] for o in opts if o in options.options} if opt_type == 'plot' and defaults: plot = Store.registry[cls.backend].get(type(x)) selected['defaults'] = {o: getattr(plot, o) for o in opts if o not in selected and hasattr(plot, o)} key = keyfn(x) if keyfn else None return (key, selected) # Traverse object and accumulate options by key traversed = obj.traverse(lookup, specs) options = defaultdict(lambda: defaultdict(list)) default_opts = defaultdict(lambda: defaultdict(list)) for key, opts in traversed: defaults = opts.pop('defaults', {}) for opt, v in opts.items(): options[key][opt].append(v) for opt, v in defaults.items(): default_opts[key][opt].append(v) # Merge defaults into dictionary if not explicitly specified for key, opts in default_opts.items(): for opt, v in opts.items(): if opt not in options[key]: options[key][opt] = v return options if keyfn else options[None]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _all_opt_infos(self):\n for info in self._opts.values():\n yield info, None\n for group in self._groups.values():\n for info in group._opts.values():\n yield info, group", "def get_plot_kwargs(cfg, option, key=None):\n plot_kwargs = cfg.get(option, {}).get('plot_kwargs', {})\n if key is None:\n return plot_kwargs\n if '_xy' in option:\n additional_plot_kwargs = cfg.get('additional_plot_kwargs_xy_plots', {})\n if key in additional_plot_kwargs:\n return {**plot_kwargs, **additional_plot_kwargs[key]}\n subkey = key.split(SEP)[-1]\n if subkey in additional_plot_kwargs:\n return {**plot_kwargs, **additional_plot_kwargs[subkey]}\n return deepcopy(plot_kwargs)", "def _parse_options(options):\n opts = dict()\n for attr in dir(options):\n if attr.startswith(\"__\"):\n continue\n opts[attr] = getattr(options, attr)\n return opts", "def _all_cli_opts(self):\n for item in self._cli_opts:\n yield item['opt'], item['group']", "def options(self):\n result = []\n for typ in type(self).mro():\n result.extend(k for k, v in typ.__dict__.items()\n if isinstance(v, Option))\n return dict((o, getattr(self, o)) for o in result)", "def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out", "def static_opts(ftype, **kwargs):\n\n opts = dict()\n\n if ftype == 'sequential_feature_selector':\n # check if we got the features\n features = kwargs.pop('features', None)\n if features is not None:\n opts['hidden_layer_sizes'] = (features.shape[0], features.shape[1])\n\n if ftype == 'vote':\n # check if we got the training data\n X_train = kwargs.pop('X_train', None)\n if X_train is not None:\n # TODO: check dimensions!\n opts['hidden_layer_sizes'] = (X_train.shape[1], X_train.shape[1])\n\n return opts", "def lookup(x):\n options = cls.lookup_options(x, opt_type)\n selected = {o: options.options[o]\n for o in opts if o in options.options}\n if opt_type == 'plot' and defaults:\n plot = Store.registry[cls.backend].get(type(x))\n selected['defaults'] = {o: getattr(plot, o) for o in opts\n if o not in selected and hasattr(plot, o)}\n key = keyfn(x) if keyfn else None\n return (key, selected)", "def get_options(self, key):\n if key in self.options.get_option_names():\n return self.options\n\n try:\n scope, scoped_key = key.split('.')\n except ValueError:\n return None\n\n if scope == 'input' and scoped_key in self.input.options.get_option_names():\n return self.input.options\n elif scope == 'output' and scoped_key in self.output.options.get_option_names():\n return self.output.options\n elif scope == 'exploit' and scoped_key in self.exploit.options.get_option_names():\n return self.exploit.options\n else:\n return None", "def __iter__(self):\n for key in itertools.chain(list(self._opts.keys()),\n list(self._groups.keys())):\n yield key", "def get_options(cls):\n for option in cls._general_options.items():\n yield option\n for option in cls._specific_options.items():\n yield option", "def get_options():\n user_options = {}\n user_options['surface'] = {'label': 'Surface',\n 'type': 'stringList',\n 'default': 'bcc100',\n 'values': surface_selections}\n\n user_options['metal'] = {'label': 'Metal',\n 'type': 'string',\n 'default': 'Au'}\n\n user_options['a'] = {'label': 'Lattice Constant',\n 'type': 'float',\n 'precision': 3,\n 'suffix': 'Å'}\n\n user_options['size-x'] = {'label': 'Size X',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-y'] = {'label': 'Size Y',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-z'] = {'label': 'Size Z',\n 'type': 'integer',\n 'default': 3}\n\n user_options['vacuum'] = {'label': 'Vacuum distance',\n 'type': 'float',\n 'precision': 1,\n 'suffix': 'Å'}\n\n user_options['orthogonal'] = {'label': 'Orthogonal',\n 'type': 'stringList',\n 'default': 'True',\n 'values': ['True', 'False']}\n\n return {'userOptions': user_options }", "def _get_norm_opts(self, obj):\n norm_opts = {}\n\n # Get all elements' type.group.label specs and ids\n type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False),\n util.label_sanitizer(x.label, escape=False))) \\\n if isinstance(x, Element) else None\n element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn)\n if idspec is not None}\n\n # Group elements specs by ID and override normalization\n # options sequentially\n key_fn = lambda x: -1 if x[0] is None else x[0]\n id_groups = groupby(sorted(element_specs, key=key_fn), key_fn)\n for gid, element_spec_group in id_groups:\n gid = None if gid == -1 else gid\n group_specs = [el for _, el in element_spec_group]\n\n backend = self.renderer.backend\n optstree = Store.custom_options(\n backend=backend).get(gid, Store.options(backend=backend))\n # Get the normalization options for the current id\n # and match against customizable elements\n for opts in optstree:\n path = tuple(opts.path.split('.')[1:])\n applies = any(path == spec[:i] for spec in group_specs\n for i in range(1, 4))\n if applies and 'norm' in opts.groups:\n nopts = opts['norm'].options\n if 'axiswise' in nopts or 'framewise' in nopts:\n norm_opts.update({path: (nopts.get('axiswise', False),\n nopts.get('framewise', False))})\n element_specs = [spec for _, spec in element_specs]\n norm_opts.update({spec: (False, False) for spec in element_specs\n if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))})\n return norm_opts", "def get_all_combinations(param_opt):\n\tif not param_opt:\n\t\treturn {}\n\treturn (dict(zip(param_opt.keys(), x)) for x in itertools.product(*param_opt.values()))", "def options(name, option=None, value=None, opt_dict=None):\n\n if isinstance(name, int):\n name = list(pytplot.data_quants.keys())[name]\n\n if opt_dict is None:\n opt_dict = {option: value}\n else:\n if not isinstance(opt_dict,dict):\n print(\"dict must be a dictionary object. Returning.\")\n return\n\n if not isinstance(name, list):\n name = [name]\n\n for i in name:\n\n for option, value in opt_dict.items():\n\n # Lower case option for consistency\n option = option.lower()\n\n if i not in pytplot.data_quants.keys():\n print(str(i) + \" is currently not in pytplot.\")\n return\n\n if option == 'color':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['extras']['line_color'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['line_color'] = [value]\n\n if option == 'link':\n if isinstance(value, list):\n pytplot.link(i, value[1], value[0])\n\n if option == 'colormap':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['extras']['colormap'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['colormap'] = [value]\n\n if option == 'spec':\n _reset_plots(i)\n if value:\n if 'spec_bins' not in pytplot.data_quants[i].coords:\n print(f\"{i} does not contain coordinates for spectrogram plotting. Continuing...\")\n continue\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec'] = value\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec'] = value\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n # Set the default dimension to plot by. All others will be summed over.\n if 'spec_dim_to_plot' not in pytplot.data_quants[i].attrs['plot_options']['extras']:\n if 'v' in pytplot.data_quants[i].coords:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v'\n elif 'v2' in pytplot.data_quants[i].coords:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v2'\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v1'\n\n if option == 'alt':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['alt'] = value\n\n if option == 'map':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['map'] = value\n\n if option == 'legend_names':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['legend_names'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['legend_names'] = [value]\n\n if option == 'xlog_slice':\n if value:\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_axis_type'] = 'linear'\n\n if option == 'ylog':\n negflag = 0 # _ylog_check(data_quants, value, i)\n if negflag == 0 and value:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_axis_type'] = 'linear'\n\n if option == 'ylog_slice':\n if value:\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_axis_type'] = 'linear'\n\n if option == 'zlog':\n # check for negative values and warn the user that they will be ignored\n negflag = _zlog_check(pytplot.data_quants, value, i)\n if negflag != 0 and value:\n print(str(i) + ' contains negative values; setting the z-axis to log scale will cause the negative values to be ignored on figures.')\n\n if value:\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_axis_type'] = 'linear'\n\n if option == 'nodata':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['visible'] = value\n\n if option == 'line_style':\n if value == 0 or value == 'solid_line':\n to_be = []\n elif value == 1 or value == 'dot':\n to_be = [2, 4]\n elif value == 2 or value == 'dash':\n to_be = [6]\n elif value == 3 or value == 'dash_dot':\n to_be = [6, 4, 2, 4]\n elif value == 4 or value == 'dash_dot_dot_dot':\n to_be = [6, 4, 2, 4, 2, 4, 2, 4]\n elif value == 5 or value == 'long_dash':\n to_be = [10]\n else:\n to_be=value\n\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['line_style'] = to_be\n\n if(value == 6 or value == 'none'):\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['visible'] = False\n\n if option == 'char_size':\n pytplot.data_quants[i].attrs['plot_options']['extras']['char_size'] = value\n\n if option == 'name':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['name'] = value\n\n if option == \"panel_size\":\n if value > 1 or value <= 0:\n print(\"Invalid value. Should be (0, 1]\")\n return\n pytplot.data_quants[i].attrs['plot_options']['extras']['panel_size'] = value\n\n if option == 'basemap':\n pytplot.data_quants[i].attrs['plot_options']['extras']['basemap'] = value\n\n if option == 'alpha':\n if value > 1 or value < 0:\n print(\"Invalid value. Should be [0, 1]\")\n return\n pytplot.data_quants[i].attrs['plot_options']['extras']['alpha'] = value\n\n if option == 'thick':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['line_width'] = value\n\n if option == 'yrange' or option == 'y_range':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = [value[0], value[1]]\n\n if option == 'zrange' or option == 'z_range':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_range'] = [value[0], value[1]]\n\n if option == 'xrange_slice':\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_range'] = [value[0], value[1]]\n\n if option == 'yrange_slice':\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_range'] = [value[0], value[1]]\n\n if option == 'xtitle':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['axis_label'] = value\n\n if option == 'ytitle':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['axis_label'] = value\n\n if option == 'ztitle':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['axis_label'] = value\n\n if option == 'xsubtitle':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['axis_subtitle'] = value\n\n if option == 'ysubtitle':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['axis_subtitle'] = value\n\n if option == 'zsubtitle':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['axis_subtitle'] = value\n\n if option == 'ybar':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ybar'] = value\n\n if option == 'ybar_color':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ybar'] = value\n\n if option == 'ybar_size':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ysize'] = value\n\n if option == 'plotter':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['plotter'] = value\n\n if option == 'crosshair_x':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['crosshair'] = value\n\n if option == 'crosshair_y':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['crosshair'] = value\n\n if option == 'crosshair_z':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['crosshair'] = value\n\n if option == 'static':\n pytplot.data_quants[i].attrs['plot_options']['extras']['static'] = value\n\n if option == 'static_tavg':\n pytplot.data_quants[i].attrs['plot_options']['extras']['static_tavg'] = [value[0], value[1]]\n\n if option == 't_average':\n pytplot.data_quants[i].attrs['plot_options']['extras']['t_average'] = value\n\n if option == 'spec_dim_to_plot' or option == 'spec_plot_dim':\n if len(pytplot.data_quants[i].values.shape) <= 2:\n print(f\"Must have more than 2 coordinate dimensions to set spec_coord_to_plot for {pytplot.data_quants[i].name}\")\n continue\n\n # Set the 'spec_dim_to_plot' value to either 'v' or 'v1', 'v2', 'v3', etc.\n if isinstance(value, int):\n coord_to_plot = \"v\" + str(value)\n if coord_to_plot not in pytplot.data_quants[i].coords:\n if value == 1:\n coord_to_plot = \"v\"\n if coord_to_plot not in pytplot.data_quants[i].coords:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n else:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = coord_to_plot\n elif isinstance(value, str):\n coord_to_plot = value\n if coord_to_plot not in pytplot.data_quants[i].coords:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = value\n\n # If we're plotting against different coordinates, we need to change what we consider the \"spec_bins\"\n pytplot.data_quants[i].coords['spec_bins'] = pytplot.data_quants[i].coords[coord_to_plot]\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n if option == 'spec_slices_to_use':\n if not isinstance(value, dict):\n print(\"Must be a dictionary object in the format {'v2':15, 'v3':7}\")\n return\n else:\n for coord in value:\n if coord not in pytplot.data_quants[i].coords:\n print(f\"Dimension {coord} not found in {pytplot.data_quants[i].name}\")\n continue\n\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_slices_to_use'] = value\n\n if option == 'border':\n pytplot.data_quants[i].attrs['plot_options']['extras']['border'] = value\n\n if option == 'var_label_ticks':\n pytplot.data_quants[i].attrs['plot_options']['var_label_ticks'] = value\n\n\n return", "def options(self):\n options_to_report = dict()\n for cls in inspect.getmro(type(self)):\n parameter_names, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.__init__)\n if defaults:\n class_options = {parameter_name: getattr(self, '_' + parameter_name) for\n parameter_name in parameter_names[-len(defaults):]}\n options_to_report.update(class_options)\n options_to_report.pop('mcmc_moves')\n return options_to_report", "def processOptions_(self, opts):\n\n for opt in opts.keys():\n val = opts[opt]\n\n # Skip actions, they are processed later in initializeActions_()\n if opt in self.main_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n if opt in self.aux_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n \n\n elif ( opt == '-cfg' ):\n pass\n\n elif ( opt in ('-continue', '-c') ):\n # Already processed in processContinueOption_()\n pass\n\n elif ( opt == '-Q' ):\n self.flag_quiet = 1\n pass\n\n elif ( opt == '-debug' ):\n if val: self.debug_level = int(val)\n else: self.debug_level = 1\n pass\n\n elif string.find(opt,'.') == -1:\n print common.prog_name+'. Unrecognized option '+opt\n usage()\n pass\n\n # Override config parameters from INI-file with cmd-line params\n if string.find(opt,'.') == -1 :\n self.cfg_params['SKIM.'+opt[1:]] = val\n pass\n else:\n # Command line parameters in the form -SECTION.ENTRY=VALUE\n self.cfg_params[opt[1:]] = val\n pass\n pass\n return", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def read_all_options(self, test_case=None):\n args = self.get_parsed_cmd_args(test_case)\n\n Options.validate_methods(args[\"methods\"])\n\n self.read_config_file(args[\"config_file\"])\n\n for option in self.options:\n if args[option] not in [None, []]:\n self.options[option] = args[option]\n\n if option in self.method_options:\n method, method_option = self.method_options[option]\n Options.available_methods()[method].options[method_option] = args[option]\n\n #remove duplicate\n for option in [\"methods\", \"packages\"]:\n self.options[option] = list(set(self.options[option]))\n\n return self.options", "def get_options(cls):\n return {\n \"name\": str,\n ConfigOption(\"install_files\", default=None): Or(None, list),\n ConfigOption(\"timeout\", default=300): int,\n ConfigOption(\"log_regexps\", default=None): Or(None, list),\n ConfigOption(\"stdout_regexps\", default=None): Or(None, list),\n ConfigOption(\"stderr_regexps\", default=None): Or(None, list),\n ConfigOption(\"file_logger\", default=None): Or(None, str),\n ConfigOption(\"async_start\", default=False): bool,\n ConfigOption(\"report_errors_from_logs\", default=False): bool,\n ConfigOption(\"error_logs_max_lines\", default=10): int,\n ConfigOption(\"path_cleanup\", default=True): bool,\n ConfigOption(\"pre_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"pre_stop\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_stop\", default=None): validate_func(\"driver\"),\n }", "def create_options(options, passthru_args=None, fingerprintable_options=None):\n fingerprintable = fingerprintable_options or defaultdict(dict)\n\n class FakeOptions(object):\n def for_scope(self, scope):\n # TODO(John Sirois): Some users pass in A dict of scope -> _FakeOptionValues instead of a\n # dict of scope -> (dict of option name -> value). Clean up these usages and kill this\n # accommodation.\n options_for_this_scope = options.get(scope) or {}\n if isinstance(options_for_this_scope, _FakeOptionValues):\n options_for_this_scope = options_for_this_scope.option_values\n\n scoped_options = {}\n if scope:\n scoped_options.update(self.for_scope(enclosing_scope(scope)).option_values)\n scoped_options.update(options_for_this_scope)\n return _FakeOptionValues(scoped_options)\n\n def for_global_scope(self):\n return self.for_scope('')\n\n def passthru_args_for_scope(self, scope):\n return passthru_args or []\n\n def items(self):\n return options.items()\n\n @property\n def scope_to_flags(self):\n return {}\n\n def get_fingerprintable_for_scope(self, bottom_scope, include_passthru=False):\n \"\"\"Returns a list of fingerprintable (option type, option value) pairs for\n the given scope.\n\n Note that this method only collects values for a single scope, NOT from\n all enclosing scopes as in the Options class!\n\n :param str bottom_scope: The scope to gather fingerprintable options for.\n :param bool include_passthru: Whether to include passthru args captured by `bottom_scope` in the\n fingerprintable options.\n \"\"\"\n pairs = []\n if include_passthru:\n pu_args = self.passthru_args_for_scope(bottom_scope)\n pairs.extend((str, arg) for arg in pu_args)\n\n option_values = self.for_scope(bottom_scope)\n for option_name, option_type in fingerprintable[bottom_scope].items():\n pairs.append((option_type, option_values[option_name]))\n return pairs\n\n def __getitem__(self, scope):\n return self.for_scope(scope)\n\n return FakeOptions()", "def create_options_for_optionables(optionables,\n options=None,\n options_fingerprintable=None,\n passthru_args=None):\n all_options = defaultdict(dict)\n fingerprintable_options = defaultdict(dict)\n bootstrap_option_values = None\n\n if options_fingerprintable:\n for scope, opts in options_fingerprintable.items():\n fingerprintable_options[scope].update(opts)\n\n def register_func(on_scope):\n scoped_options = all_options[on_scope]\n scoped_fingerprintables = fingerprintable_options[on_scope]\n register = _options_registration_function(scoped_options, scoped_fingerprintables)\n register.bootstrap = bootstrap_option_values\n register.scope = on_scope\n return register\n\n # TODO: This sequence is a bit repetitive of the real registration sequence.\n\n # Register bootstrap options and grab their default values for use in subsequent registration.\n GlobalOptionsRegistrar.register_bootstrap_options(register_func(GLOBAL_SCOPE))\n bootstrap_option_values = _FakeOptionValues(all_options[GLOBAL_SCOPE].copy())\n\n # Now register the full global scope options.\n GlobalOptionsRegistrar.register_options(register_func(GLOBAL_SCOPE))\n\n for optionable in optionables:\n optionable.register_options(register_func(optionable.options_scope))\n\n if options:\n for scope, opts in options.items():\n all_options[scope].update(opts)\n\n return create_options(all_options,\n passthru_args=passthru_args,\n fingerprintable_options=fingerprintable_options)", "def get_specific_options(cls):\n for option in cls._specific_options.items():\n yield option", "def get_options(self, panel=\"\"):\n return dict()", "def register_opts(self, opts, group=None):\n for opt in opts:\n self.register_opt(opt, group, clear_cache=False)", "def get_create_options(self, ds_options, section, pretty=True):\r\n return_value = None\r\n\r\n if 'datacenter' == section:\r\n datacenters = [loc['keyname']\r\n for loc in ds_options['locations']]\r\n return_value = [('datacenter', datacenters)]\r\n elif 'cpu' == section and 'server' in ds_options['categories']:\r\n results = []\r\n\r\n for item in ds_options['categories']['server']['items']:\r\n results.append((\r\n item['description'],\r\n item['price_id']\r\n ))\r\n\r\n return_value = results\r\n elif 'memory' == section and 'ram' in ds_options['categories']:\r\n ram = []\r\n for option in ds_options['categories']['ram']['items']:\r\n ram.append((int(option['capacity']), option['price_id']))\r\n\r\n return_value = [('memory', ram)]\r\n elif 'server_core' == section and \\\r\n 'server_core' in ds_options['categories']:\r\n mem_options = {}\r\n cpu_regex = re.compile(r'(\\d+) x ')\r\n memory_regex = re.compile(r' - (\\d+) GB Ram', re.I)\r\n\r\n for item in ds_options['categories']['server_core']['items']:\r\n cpu = cpu_regex.search(item['description']).group(1)\r\n memory = memory_regex.search(item['description']).group(1)\r\n\r\n if cpu and memory:\r\n if memory not in mem_options:\r\n mem_options[memory] = []\r\n\r\n mem_options[memory].append((cpu, item['price_id']))\r\n\r\n results = []\r\n for memory in sorted(mem_options.keys(), key=int):\r\n key = memory\r\n\r\n if pretty:\r\n key = memory\r\n\r\n results.append((key, mem_options[memory]))\r\n\r\n return_value = results\r\n elif 'os' == section:\r\n os_regex = re.compile(r'(^[A-Za-z\\s\\/\\-]+) ([\\d\\.]+)')\r\n bit_regex = re.compile(r' \\((\\d+)\\s*bit')\r\n extra_regex = re.compile(r' - (.+)\\(')\r\n\r\n os_list = {}\r\n flat_list = []\r\n\r\n # Loop through the operating systems and get their OS codes\r\n for opsys in ds_options['categories']['os']['items']:\r\n if 'Windows Server' in opsys['description']:\r\n os_code = self._generate_windows_code(opsys['description'])\r\n else:\r\n os_results = os_regex.search(opsys['description'])\r\n\r\n # Skip this operating system if it's not parsable\r\n if os_results is None:\r\n continue\r\n\r\n name = os_results.group(1)\r\n version = os_results.group(2)\r\n bits = bit_regex.search(opsys['description'])\r\n extra_info = extra_regex.search(opsys['description'])\r\n\r\n if bits:\r\n bits = bits.group(1)\r\n if extra_info:\r\n extra_info = extra_info.group(1)\r\n\r\n os_code = self._generate_os_code(name, version, bits,\r\n extra_info)\r\n\r\n name = os_code.split('_')[0]\r\n\r\n if name not in os_list:\r\n os_list[name] = []\r\n\r\n os_list[name].append((os_code, opsys['price_id']))\r\n flat_list.append((os_code, opsys['price_id']))\r\n\r\n if pretty:\r\n results = []\r\n for opsys in sorted(os_list.keys()):\r\n results.append(('os (%s)' % opsys, os_list[opsys]))\r\n\r\n return_value = results\r\n else:\r\n return_value = [('os', flat_list)]\r\n\r\n elif 'disk' == section:\r\n disks = []\r\n type_regex = re.compile(r'^[\\d\\.]+[GT]B\\s+(.+)$')\r\n for disk in ds_options['categories']['disk0']['items']:\r\n disk_type = 'SATA'\r\n disk_type = type_regex.match(disk['description']).group(1)\r\n\r\n disk_type = disk_type.replace('RPM', '').strip()\r\n disk_type = disk_type.replace(' ', '_').upper()\r\n disk_type = str(int(disk['capacity'])) + '_' + disk_type\r\n disks.append((disk_type, disk['price_id'], disk['id']))\r\n\r\n return_value = [('disk', disks)]\r\n elif 'nic' == section:\r\n single = []\r\n dual = []\r\n\r\n for item in ds_options['categories']['port_speed']['items']:\r\n if 'dual' in item['description'].lower():\r\n dual.append((str(int(item['capacity'])) + '_DUAL',\r\n item['price_id']))\r\n else:\r\n single.append((str(int(item['capacity'])),\r\n item['price_id']))\r\n\r\n return_value = [('single nic', single), ('dual nic', dual)]\r\n elif 'disk_controller' == section:\r\n options = []\r\n for item in ds_options['categories']['disk_controller']['items']:\r\n text = item['description'].replace(' ', '')\r\n\r\n if 'Non-RAID' == text:\r\n text = 'None'\r\n\r\n options.append((text, item['price_id']))\r\n\r\n return_value = [('disk_controllers', options)]\r\n\r\n return return_value", "def iterate(self, compmgr=None, defaults=True):\n options = set()\n name_str = self.name\n for setting in ProductSetting.select(self.env,\n where={'product': self.product,\n 'section': name_str}):\n option = self.optionxform(setting.option)\n options.add(option)\n yield option\n for parent in self.config.parents:\n for option in parent[self.name].iterate(defaults=False):\n loption = self.optionxform(option)\n if loption not in options:\n options.add(loption)\n yield option\n if defaults:\n for section, option in Option.get_registry(compmgr).keys():\n if section == self.name and \\\n self.optionxform(option) not in options:\n yield option", "def options(self):\n options = {\n o.name: getattr(self, o.name)\n for o in _OPTIONS\n }\n return options", "def options(self, **kwds):\n opts = dict(self.opts)\n for k in kwds:\n try:\n # Ensure that the key exists because we want to change\n # existing options, not add new ones.\n _ = opts[k]\n except KeyError:\n raise ValueError(\"invalid option {!r}\".format(k))\n opts[k] = kwds[k]\n return type(self)(self.cls, opts, self.kwargs)", "def __iter__(self):\n for key in self._group._opts.keys():\n yield key" ]
[ "0.5892671", "0.5606828", "0.56063414", "0.550141", "0.54399914", "0.5378535", "0.5304472", "0.52757764", "0.52567726", "0.52553415", "0.52336997", "0.5221113", "0.5217911", "0.51816285", "0.5152726", "0.51514137", "0.5142801", "0.51401424", "0.5134124", "0.5131874", "0.51075584", "0.51026165", "0.5091025", "0.50848156", "0.50646865", "0.50575083", "0.5042416", "0.5036603", "0.5033443", "0.5019094" ]
0.86038965
0
Looks up options for object, including plot defaults, keyfn determines returned key otherwise None key is used.
def lookup(x): options = cls.lookup_options(x, opt_type) selected = {o: options.options[o] for o in opts if o in options.options} if opt_type == 'plot' and defaults: plot = Store.registry[cls.backend].get(type(x)) selected['defaults'] = {o: getattr(plot, o) for o in opts if o not in selected and hasattr(plot, o)} key = keyfn(x) if keyfn else None return (key, selected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True):\n def lookup(x):\n \"\"\"\n Looks up options for object, including plot defaults,\n keyfn determines returned key otherwise None key is used.\n \"\"\"\n options = cls.lookup_options(x, opt_type)\n selected = {o: options.options[o]\n for o in opts if o in options.options}\n if opt_type == 'plot' and defaults:\n plot = Store.registry[cls.backend].get(type(x))\n selected['defaults'] = {o: getattr(plot, o) for o in opts\n if o not in selected and hasattr(plot, o)}\n key = keyfn(x) if keyfn else None\n return (key, selected)\n\n # Traverse object and accumulate options by key\n traversed = obj.traverse(lookup, specs)\n options = defaultdict(lambda: defaultdict(list))\n default_opts = defaultdict(lambda: defaultdict(list)) \n for key, opts in traversed:\n defaults = opts.pop('defaults', {})\n for opt, v in opts.items():\n options[key][opt].append(v)\n for opt, v in defaults.items():\n default_opts[key][opt].append(v)\n\n # Merge defaults into dictionary if not explicitly specified\n for key, opts in default_opts.items():\n for opt, v in opts.items():\n if opt not in options[key]:\n options[key][opt] = v\n return options if keyfn else options[None]", "def get_plot_kwargs(cfg, option, key=None):\n plot_kwargs = cfg.get(option, {}).get('plot_kwargs', {})\n if key is None:\n return plot_kwargs\n if '_xy' in option:\n additional_plot_kwargs = cfg.get('additional_plot_kwargs_xy_plots', {})\n if key in additional_plot_kwargs:\n return {**plot_kwargs, **additional_plot_kwargs[key]}\n subkey = key.split(SEP)[-1]\n if subkey in additional_plot_kwargs:\n return {**plot_kwargs, **additional_plot_kwargs[subkey]}\n return deepcopy(plot_kwargs)", "def _plot_option_logic(plot_options_from_args):\n default_plot_options = copy.deepcopy(DEFAULT_PLOT_OPTIONS)\n file_options = tools.get_config_file()\n session_options = session.get_session_plot_options()\n plot_options_from_args = copy.deepcopy(plot_options_from_args)\n\n # Validate options and fill in defaults w world_readable and sharing\n for option_set in [plot_options_from_args, session_options, file_options]:\n utils.validate_world_readable_and_sharing_settings(option_set)\n utils.set_sharing_and_world_readable(option_set)\n\n user_plot_options = {}\n user_plot_options.update(default_plot_options)\n user_plot_options.update(file_options)\n user_plot_options.update(session_options)\n user_plot_options.update(plot_options_from_args)\n user_plot_options = {\n k: v\n for k, v in user_plot_options.items()\n if k in default_plot_options or k == \"filename\"\n }\n\n return user_plot_options", "def __getitem__(self, key):\n if hasattr(self, key):\n return getattr(self, key)\n else:\n raise KeyError('No such option `{}`.'.format(key))", "def gen_default_plot_options(plot_type, fa_label, fn_label,\r\n plot_title=None):\r\n\r\n plot_opts = OrderedDict([\r\n ('title', \"Performance\" if plot_title is None else plot_title),\r\n ('suptitle', ''),\r\n ('figsize', (8, 6)),\r\n ('title_fontsize', 13),\r\n ('suptitle_fontsize', 11),\r\n ('xlim', [0, 1]),\r\n ('ylim', [0, 1]),\r\n ('xticks_label_size', 'medium'),\r\n ('yticks_label_size', 'medium'),\r\n ('xlabel', \"False Alarm Rate [%]\"),\r\n ('xlabel_fontsize', 11),\r\n ('ylabel_fontsize', 11)])\r\n\r\n if plot_type.lower() == \"det\" or plot_type.lower() == \"detpmthr\":\r\n if plot_type.lower() == \"detpmthr\": ### X-axis is the threshold\r\n plot_opts[\"xscale\"] = \"linear\"\r\n plot_opts[\"xlabel\"] = \"PresenceConf Value\"\r\n plot_opts[\"xticks\"] = [0.0, 0.2, 0.4, 0.6, 0.8, 1]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.0\", \"0.2\", \"0.4\", \"0.6\", \"0.8\", \"1.0\"]\r\n \r\n elif (fa_label == \"TFA\"):\r\n plot_opts[\"xscale\"] = \"log\"\r\n plot_opts[\"xlabel\"] = \"Time-based False Alarm\"\r\n plot_opts[\"xticks\"] = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.01\", \"0.02\", \"0.05\", \"0.1\", \"0.2\", \"0.5\", \"1.0\"]\r\n elif (fa_label == \"RFA\"):\r\n plot_opts[\"xscale\"] = \"log\"\r\n plot_opts[\"xlabel\"] = \"Rate of False Alarms (#FAs/minute)\"\r\n plot_opts[\"xticks\"] = [\r\n 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.01\", \"0.02\", \"0.05\", \"0.1\", \"0.2\", \"0.5\", \"1.0\", \"2.0\",\r\n \"5.0\", \"10.0\"]\r\n else:\r\n plot_opts[\"xscale\"] = \"log\"\r\n plot_opts[\"xlabel\"] = \"Prob. of False Alarm\"\r\n plot_opts[\"xticks\"] = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.01\", \"0.02\", \"0.05\", \"0.1\", \"0.2\", \"0.5\", \"1.0\"]\r\n\r\n # Default\r\n plot_opts[\"xlim\"] = (plot_opts[\"xticks\"][0],\r\n plot_opts[\"xticks\"][-1])\r\n plot_opts[\"ylabel\"] = \"Prob. of Miss Detection\"\r\n\r\n plot_opts[\"yticks\"] = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6,\r\n 0.7, 0.8, 0.9, 1.0]\r\n plot_opts[\"yticks_labels\"] = [\r\n '0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7',\r\n '0.8', '0.9', '1.0']\r\n plot_opts[\"ylim\"] = (plot_opts[\"yticks\"][0],\r\n plot_opts[\"yticks\"][-1])\r\n \r\n elif plot_type.lower() == \"roc\":\r\n plot_opts[\"xscale\"] = \"linear\"\r\n plot_opts[\"ylabel\"] = \"Correct Detection Rate [%]\"\r\n plot_opts[\"xticks\"] = [\r\n 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\r\n plot_opts[\"yticks\"] = [\r\n 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\r\n plot_opts[\"yticks_labels\"] = ['0', '10', '20', '30', '40', '50',\r\n '60', '70', '80', '90', '100']\r\n plot_opts[\"xticks_labels\"] = ['0', '10', '20', '30', '40', '50',\r\n '60', '70', '80', '90', '100']\r\n\r\n return plot_opts", "def plot_options(cls, obj, percent_size):\n raise NotImplementedError", "def __getitem__(self, option):\n if option not in self.__dict__.keys():\n raise KeyError(\"Option '{}' not found.\".format(option))\n\n return self.__dict__[option]", "def opt(self, key, default=False):\n if key not in self.options:\n return default\n return self.options.get(key)", "def get_options(self, key):\n if key in self.options.get_option_names():\n return self.options\n\n try:\n scope, scoped_key = key.split('.')\n except ValueError:\n return None\n\n if scope == 'input' and scoped_key in self.input.options.get_option_names():\n return self.input.options\n elif scope == 'output' and scoped_key in self.output.options.get_option_names():\n return self.output.options\n elif scope == 'exploit' and scoped_key in self.exploit.options.get_option_names():\n return self.exploit.options\n else:\n return None", "def get_options():\n user_options = {}\n user_options['surface'] = {'label': 'Surface',\n 'type': 'stringList',\n 'default': 'bcc100',\n 'values': surface_selections}\n\n user_options['metal'] = {'label': 'Metal',\n 'type': 'string',\n 'default': 'Au'}\n\n user_options['a'] = {'label': 'Lattice Constant',\n 'type': 'float',\n 'precision': 3,\n 'suffix': 'Å'}\n\n user_options['size-x'] = {'label': 'Size X',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-y'] = {'label': 'Size Y',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-z'] = {'label': 'Size Z',\n 'type': 'integer',\n 'default': 3}\n\n user_options['vacuum'] = {'label': 'Vacuum distance',\n 'type': 'float',\n 'precision': 1,\n 'suffix': 'Å'}\n\n user_options['orthogonal'] = {'label': 'Orthogonal',\n 'type': 'stringList',\n 'default': 'True',\n 'values': ['True', 'False']}\n\n return {'userOptions': user_options }", "def _default_options(cls) -> Options:\n options = super()._default_options()\n\n options.curve_fitter = multi_curve_fit\n options.data_processor = None\n options.normalization = False\n options.x_key = \"xval\"\n options.plot = True\n options.axis = None\n options.xlabel = None\n options.ylabel = None\n options.xlim = None\n options.ylim = None\n options.xval_unit = None\n options.yval_unit = None\n options.result_parameters = None\n options.return_data_points = False\n options.curve_plotter = \"mpl_single_canvas\"\n options.style = PlotterStyle()\n\n # automatically populate initial guess and boundary\n fit_params = cls._fit_params()\n options.p0 = {par_name: None for par_name in fit_params}\n options.bounds = {par_name: None for par_name in fit_params}\n\n return options", "def get_option_key(self, iprop, ioption):\n key = _pychidg.f90wrap_get_option_key(self=self._handle, iprop=iprop, \\\n ioption=ioption)\n return key", "def _get_options(self) -> Dict[str, Any]:\n # TODO: handle holidays as well\n return {\n \"growth\": self.growth,\n \"changepoints\": self.changepoints and list(self.changepoints.astype('str')),\n \"n_changepoints\": self.n_changepoints,\n \"changepoint_range\": self.changepoint_range,\n \"changepoint_prior_scale\": self.changepoint_prior_scale,\n \"mcmc_samples\": self.mcmc_samples,\n \"interval_width\": self.interval_width,\n \"uncertainty_samples\": self.uncertainty_samples,\n \"yearly_seasonality\": self.yearly_seasonality,\n \"weekly_seasonality\": self.weekly_seasonality,\n \"daily_seasonality\": self.daily_seasonality,\n \"seasonality_mode\": self.seasonality_mode,\n \"seasonality_prior_scale\": self.seasonality_prior_scale,\n\n \"seasonalities\": self.seasonalities,\n \"extra_regressors\": self.extra_regressors\n }", "def _get_option(self, arg_name: str) -> Any:\n try:\n return getattr(self, f\"__{arg_name}\")\n except AttributeError as ex:\n raise AnalysisError(\n f\"The argument {arg_name} is selected but not defined. \"\n \"This key-value pair should be defined in the analysis option.\"\n ) from ex", "def get_option(self, key):\n return self.options[key]", "def get(self, **kws):\n assert len (kws)==1,`kws`\n key, default = kws.items()[0]\n if key not in self.__dict__:\n if VERBOSE:\n print 'Options.get: adding new option: %s=%r' % (key, default)\n self.__dict__[key] = default\n value = self.__dict__[key]\n if value is None:\n value = self.__dict__[key] = default\n return value", "def get_opt(self, widget):\r\n opt = dict()\r\n opt[\"state\"] = widget[\"state\"]\r\n opt[\"fg\"] = widget[\"fg\"]\r\n opt[\"bg\"] = widget[\"bg\"]\r\n return opt", "def get_option_descriptor(self, key):\n return self._options.get(key)", "def translate_options(cls, options):\n used_options = cls.default_options()\n if options is None:\n options = {}\n for key, val in options.items():\n if key not in used_options:\n raise KeyError(f\"Cannot handle key {key}.\")\n used_options[key] = val\n return used_options", "def __getattr__(self, name):\n if name in PLOTTER_NAMES:\n self._plotter = name\n return self\n if name in self.opts:\n return self.opts[name]\n raise AttributeError(sub(\n \"No plotting option or attribute '{}'\", name))", "def get_options(self, panel=\"\"):\n return dict()", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def create_options(self, saving=False):\n self.get_filters(saving)\n\n options = {}\n if(self.calibration_points):\n options['begin_calibration_index'] = self.begin_ind_calibration_entry.get()\n options['end_calibration_index'] = self.end_ind_calibration_entry.get()\n options['known_distance'] = self.known_distance_entry.get()\n else:\n options['begin_calibration_index']=''\n options['end_calibration_index']=''\n options['known_distance']=''\n options['unit_type'] = (re.sub(r'[^A-Za-z0-9_]', '', self.unit_type_entry.get())).capitalize()\n options['begin_index'] = self.begin_ind_entry.get()\n options['end_index'] = self.end_ind_entry.get()\n options['names_list'] = self.names_list_entry.get()\n options['name_column'] = self.name_column_var.get()\n options['x_column'] = self.x_column_var.get()\n options['y_column'] = self.y_column_var.get()\n options['z_column'] = self.z_column_var.get()\n options['filters'] = self.filter_entry_dict\n options['habitat_image'] = self.habitat_image\n options['x_ratio']=self.x_ratio_entry.get()\n options['y_ratio']=self.y_ratio_entry.get()\n options['z_ratio']=self.z_ratio_entry.get()\n options['sheet_name']=self.sheet_name_var.get()\n\n return options", "def _default_options(cls):\n default_options = super()._default_options()\n default_options.data_processor = dp.DataProcessor(\n input_key=\"counts\",\n data_actions=[dp.Probability(\"1\"), dp.BasisExpectationValue()],\n )\n default_options.plotter.set_figure_options(\n xlabel=\"Flat top width\",\n ylabel=\"Pauli expectation values\",\n xval_unit=\"s\",\n ylim=(-1, 1),\n )\n default_options.data_subfit_map = {\n \"x\": {\"meas_basis\": \"x\"},\n \"y\": {\"meas_basis\": \"y\"},\n \"z\": {\"meas_basis\": \"z\"},\n }\n\n return default_options", "def do_opt(self, *_args, **kwargs):\n args = list(_args)\n if not args:\n largest = 0\n keys = [key for key in self.conf if not key.startswith(\"_\")]\n for key in keys:\n largest = max(largest, len(key))\n for key in keys:\n print(\"%s : %s\" % (key.rjust(largest), self.conf[key]))\n return\n option = args.pop(0)\n if not args and not kwargs:\n method = getattr(self, \"getopt_\" + option, None)\n if method is None:\n self.getopt_default(option)\n else:\n method()\n else:\n method = getattr(self, \"opt_\" + option, None)\n if method is None:\n print(\"Unrecognized option %r\" % option)\n else:\n method(*args, **kwargs)\n self.save_config()", "def prompt_options_dict(options=None,\n by_descr=True,\n prompt=\"Select from the following options\"):\n if 'Bullet' not in globals():\n raise RuntimeError(\"[-] can't use Bullet on Windows\")\n if options is None:\n raise RuntimeError('[-] no options specified')\n if not isinstance(options[0], dict):\n raise RuntimeError('[-] options is not a list of dictionaries')\n choices = ['<CANCEL>'] + [\n opt['descr']\n if by_descr\n else opt['ident']\n for opt in options\n ]\n cli = Bullet(prompt=f'\\n{prompt}',\n choices=choices,\n indent=0,\n align=2,\n margin=1,\n shift=0,\n bullet=\"→\",\n pad_right=5)\n choice = cli.launch()\n if choice == \"<CANCEL>\":\n logger.info('[-] cancelled selection of choice')\n return None\n selected = find(options,\n 'descr' if by_descr else 'ident',\n choice)\n try:\n return options[selected]['ident']\n except Exception as exc: # noqa\n return None", "def option(self, key):\n if self.integration is None:\n return None\n return self.configuration.get(f'{self.get_config_name()}.{key}')", "def get_option(self, key, default=None):\n current_profile = \"profiles.{}.{}\".format(self.get_profile(), key)\n global_profile = \"profiles.global.{}\".format(key)\n return self.__get_option__(current_profile, self.__get_option__(global_profile, default))", "def get(self, key):\n try:\n if key == key.upper():\n return self.config[key]\n return self.options[key]\n except KeyError:\n return None", "def options(name, option=None, value=None, opt_dict=None):\n\n if isinstance(name, int):\n name = list(pytplot.data_quants.keys())[name]\n\n if opt_dict is None:\n opt_dict = {option: value}\n else:\n if not isinstance(opt_dict,dict):\n print(\"dict must be a dictionary object. Returning.\")\n return\n\n if not isinstance(name, list):\n name = [name]\n\n for i in name:\n\n for option, value in opt_dict.items():\n\n # Lower case option for consistency\n option = option.lower()\n\n if i not in pytplot.data_quants.keys():\n print(str(i) + \" is currently not in pytplot.\")\n return\n\n if option == 'color':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['extras']['line_color'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['line_color'] = [value]\n\n if option == 'link':\n if isinstance(value, list):\n pytplot.link(i, value[1], value[0])\n\n if option == 'colormap':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['extras']['colormap'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['colormap'] = [value]\n\n if option == 'spec':\n _reset_plots(i)\n if value:\n if 'spec_bins' not in pytplot.data_quants[i].coords:\n print(f\"{i} does not contain coordinates for spectrogram plotting. Continuing...\")\n continue\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec'] = value\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec'] = value\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n # Set the default dimension to plot by. All others will be summed over.\n if 'spec_dim_to_plot' not in pytplot.data_quants[i].attrs['plot_options']['extras']:\n if 'v' in pytplot.data_quants[i].coords:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v'\n elif 'v2' in pytplot.data_quants[i].coords:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v2'\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v1'\n\n if option == 'alt':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['alt'] = value\n\n if option == 'map':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['map'] = value\n\n if option == 'legend_names':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['legend_names'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['legend_names'] = [value]\n\n if option == 'xlog_slice':\n if value:\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_axis_type'] = 'linear'\n\n if option == 'ylog':\n negflag = 0 # _ylog_check(data_quants, value, i)\n if negflag == 0 and value:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_axis_type'] = 'linear'\n\n if option == 'ylog_slice':\n if value:\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_axis_type'] = 'linear'\n\n if option == 'zlog':\n # check for negative values and warn the user that they will be ignored\n negflag = _zlog_check(pytplot.data_quants, value, i)\n if negflag != 0 and value:\n print(str(i) + ' contains negative values; setting the z-axis to log scale will cause the negative values to be ignored on figures.')\n\n if value:\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_axis_type'] = 'linear'\n\n if option == 'nodata':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['visible'] = value\n\n if option == 'line_style':\n if value == 0 or value == 'solid_line':\n to_be = []\n elif value == 1 or value == 'dot':\n to_be = [2, 4]\n elif value == 2 or value == 'dash':\n to_be = [6]\n elif value == 3 or value == 'dash_dot':\n to_be = [6, 4, 2, 4]\n elif value == 4 or value == 'dash_dot_dot_dot':\n to_be = [6, 4, 2, 4, 2, 4, 2, 4]\n elif value == 5 or value == 'long_dash':\n to_be = [10]\n else:\n to_be=value\n\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['line_style'] = to_be\n\n if(value == 6 or value == 'none'):\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['visible'] = False\n\n if option == 'char_size':\n pytplot.data_quants[i].attrs['plot_options']['extras']['char_size'] = value\n\n if option == 'name':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['name'] = value\n\n if option == \"panel_size\":\n if value > 1 or value <= 0:\n print(\"Invalid value. Should be (0, 1]\")\n return\n pytplot.data_quants[i].attrs['plot_options']['extras']['panel_size'] = value\n\n if option == 'basemap':\n pytplot.data_quants[i].attrs['plot_options']['extras']['basemap'] = value\n\n if option == 'alpha':\n if value > 1 or value < 0:\n print(\"Invalid value. Should be [0, 1]\")\n return\n pytplot.data_quants[i].attrs['plot_options']['extras']['alpha'] = value\n\n if option == 'thick':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['line_width'] = value\n\n if option == 'yrange' or option == 'y_range':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = [value[0], value[1]]\n\n if option == 'zrange' or option == 'z_range':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_range'] = [value[0], value[1]]\n\n if option == 'xrange_slice':\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_range'] = [value[0], value[1]]\n\n if option == 'yrange_slice':\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_range'] = [value[0], value[1]]\n\n if option == 'xtitle':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['axis_label'] = value\n\n if option == 'ytitle':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['axis_label'] = value\n\n if option == 'ztitle':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['axis_label'] = value\n\n if option == 'xsubtitle':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['axis_subtitle'] = value\n\n if option == 'ysubtitle':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['axis_subtitle'] = value\n\n if option == 'zsubtitle':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['axis_subtitle'] = value\n\n if option == 'ybar':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ybar'] = value\n\n if option == 'ybar_color':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ybar'] = value\n\n if option == 'ybar_size':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ysize'] = value\n\n if option == 'plotter':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['plotter'] = value\n\n if option == 'crosshair_x':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['crosshair'] = value\n\n if option == 'crosshair_y':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['crosshair'] = value\n\n if option == 'crosshair_z':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['crosshair'] = value\n\n if option == 'static':\n pytplot.data_quants[i].attrs['plot_options']['extras']['static'] = value\n\n if option == 'static_tavg':\n pytplot.data_quants[i].attrs['plot_options']['extras']['static_tavg'] = [value[0], value[1]]\n\n if option == 't_average':\n pytplot.data_quants[i].attrs['plot_options']['extras']['t_average'] = value\n\n if option == 'spec_dim_to_plot' or option == 'spec_plot_dim':\n if len(pytplot.data_quants[i].values.shape) <= 2:\n print(f\"Must have more than 2 coordinate dimensions to set spec_coord_to_plot for {pytplot.data_quants[i].name}\")\n continue\n\n # Set the 'spec_dim_to_plot' value to either 'v' or 'v1', 'v2', 'v3', etc.\n if isinstance(value, int):\n coord_to_plot = \"v\" + str(value)\n if coord_to_plot not in pytplot.data_quants[i].coords:\n if value == 1:\n coord_to_plot = \"v\"\n if coord_to_plot not in pytplot.data_quants[i].coords:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n else:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = coord_to_plot\n elif isinstance(value, str):\n coord_to_plot = value\n if coord_to_plot not in pytplot.data_quants[i].coords:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = value\n\n # If we're plotting against different coordinates, we need to change what we consider the \"spec_bins\"\n pytplot.data_quants[i].coords['spec_bins'] = pytplot.data_quants[i].coords[coord_to_plot]\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n if option == 'spec_slices_to_use':\n if not isinstance(value, dict):\n print(\"Must be a dictionary object in the format {'v2':15, 'v3':7}\")\n return\n else:\n for coord in value:\n if coord not in pytplot.data_quants[i].coords:\n print(f\"Dimension {coord} not found in {pytplot.data_quants[i].name}\")\n continue\n\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_slices_to_use'] = value\n\n if option == 'border':\n pytplot.data_quants[i].attrs['plot_options']['extras']['border'] = value\n\n if option == 'var_label_ticks':\n pytplot.data_quants[i].attrs['plot_options']['var_label_ticks'] = value\n\n\n return" ]
[ "0.6476717", "0.6211503", "0.57079715", "0.5571008", "0.5534396", "0.547191", "0.54293185", "0.5385394", "0.53531367", "0.53213716", "0.5306876", "0.5284712", "0.5159889", "0.51395476", "0.5116941", "0.51114714", "0.50779635", "0.507736", "0.5073758", "0.5022548", "0.5020879", "0.50104797", "0.49683166", "0.49671754", "0.49628994", "0.49560538", "0.4949455", "0.49383163", "0.4930963", "0.49281263" ]
0.6997502
0
Uses traversal to find the appropriate projection for a nested object. Respects projections set on Overlays before considering Element based settings, before finally looking up the default projection on the plot type. If more than one nonNone projection type is found an exception is raised.
def _get_projection(cls, obj): isoverlay = lambda x: isinstance(x, CompositeOverlay) opts = cls._traverse_options(obj, 'plot', ['projection'], [CompositeOverlay, Element], keyfn=isoverlay) from_overlay = not all(p is None for p in opts[True]['projection']) projections = opts[from_overlay]['projection'] custom_projs = [p for p in projections if p is not None] if len(set(custom_projs)) > 1: raise Exception("An axis may only be assigned one projection type") return custom_projs[0] if custom_projs else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def projectionManip(*args, fitBBox: bool=True, projType: int=0, switchType: bool=True, q=True,\n query=True, **kwargs)->Union[None, Any]:\n pass", "def create_projection_from_projector_element(\n self, element, weight, type, projector_id\n ):\n projection = {\n \"id\": self.projection_id_counter,\n \"stable\": element.get(\"stable\", True),\n \"weight\": weight,\n \"options\": {},\n \"current_projector_id\": None,\n \"preview_projector_id\": None,\n \"history_projector_id\": None,\n \"meeting_id\": 1,\n }\n projection[f\"{type}_projector_id\"] = projector_id\n for k, v in element.items():\n if k not in (\"id\", \"name\", \"stable\"):\n projection[\"options\"][k] = v\n\n collection = element[\"name\"]\n if collection in COLLECTION_MAPPING:\n id = self.to_new_id(collection, element[\"id\"])\n collection = COLLECTION_MAPPING[collection]\n projection[\"content_object_id\"] = f\"{collection}/{id}\"\n projection[\"type\"] = None\n elif collection == \"agenda/item-list\":\n collection = \"meeting\"\n id = 1\n projection[\"content_object_id\"] = \"meeting/1\"\n projection[\"type\"] = \"agenda_item_list\"\n elif collection in (\n \"agenda/current-list-of-speakers\",\n \"agenda/current-list-of-speakers-overlay\",\n ):\n collection = \"meeting\"\n id = 1\n projection[\"content_object_id\"] = \"meeting/1\"\n projection[\"type\"] = \"current_list_of_speakers\"\n elif collection == \"agenda/current-speaker-chyron\":\n collection = \"meeting\"\n id = 1\n projection[\"content_object_id\"] = \"meeting/1\"\n projection[\"type\"] = \"current_speaker_chyron\"\n else:\n raise OS4ExporterException(f\"Unknown slide {collection}\")\n\n if collection != \"user\":\n content_object = self.get_model(collection, id)\n content_object[\"projection_ids\"].append(projection[\"id\"])\n else:\n user = self.get_model(collection, id)\n if not user[\"projection_$_ids\"]:\n user[\"projection_$_ids\"] = [\"1\"]\n user[\"projection_$1_ids\"] = []\n user[\"projection_$1_ids\"].append(projection[\"id\"])\n\n self.projection_id_counter += 1\n self.set_model(\"projection\", projection)\n return projection[\"id\"]", "async def test_entity_nested_projection(self):\n test_name = 'test_entity_nested_projection'\n entity_name = 'TestEntityNestedProjection'\n\n corpus = TestHelper.get_local_corpus(self.tests_subpath, test_name)\n expected_output_path = TestHelper.get_expected_output_folder_path(self.tests_subpath, test_name)\n manifest = await corpus.fetch_object_async('local:/default.manifest.cdm.json')\n\n ent_test_entity_nested_projection = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name), manifest)\n self.assertIsNotNone(ent_test_entity_nested_projection)\n resolved_test_entity_nested_projection = await ProjectionTestUtils.get_resolved_entity(corpus, ent_test_entity_nested_projection, [])\n self.assertIsNotNone(resolved_test_entity_nested_projection)\n await AttributeContextUtil.validate_attribute_context(self, corpus, expected_output_path, entity_name, resolved_test_entity_nested_projection)", "def apply_projection(projection, dataset):\n out = DatasetType(name=dataset.name, attributes=dataset.attributes)\n\n for var in projection:\n target, template = out, dataset\n while var:\n name, slice_ = var.pop(0)\n candidate = template[name]\n \n # apply slice\n if slice_:\n if isinstance(candidate, BaseType):\n candidate.data = candidate[slice_]\n elif isinstance(candidate, SequenceType):\n candidate = candidate[slice_[0]]\n elif isinstance(candidate, GridType):\n candidate = candidate[slice_]\n\n # handle structures\n if isinstance(candidate, StructureType):\n # add variable to target\n if name not in target.keys():\n if var:\n # if there are more children to add we need to clear the\n # candidate so it has only explicitly added children; \n # also, Grids are degenerated into Structures\n if isinstance(candidate, GridType):\n candidate = StructureType(candidate.name, candidate.attributes)\n candidate._keys = []\n target[name] = candidate\n target, template = target[name], template[name]\n else:\n target[name] = candidate\n\n # fix sequence data, including only variables that are in the sequence\n for seq in walk(out, SequenceType):\n seq.data = get_var(dataset, seq.id)[tuple(seq.keys())].data\n\n return out", "def Projection(W, TYPE_PROJ = proj_l11ball, ETA = 100, AXIS = 0, ETA_STAR = 100, device = \"cpu\" ): \n \n #global TYPE_PROJ, ETA, ETA_STAR, AXIS, device \n if TYPE_PROJ == 'No_proj':\n W_new = W\n if (TYPE_PROJ == proj_l1ball or TYPE_PROJ == proj_l11ball or TYPE_PROJ == proj_l11ball_line ):\n W_new = TYPE_PROJ(W, ETA, device)\n if TYPE_PROJ == proj_l21ball or TYPE_PROJ == proj_l12ball:\n W_new = TYPE_PROJ(W, ETA, AXIS, device = device)\n if TYPE_PROJ == proj_nuclear:\n W_new = TYPE_PROJ(W, ETA_STAR, device=device)\n return W_new", "def _get_projection(el):\n result = None\n if hasattr(el, 'crs'):\n result = (int(el._auxiliary_component), el.crs)\n return result", "def test_compatible_projections(self):\n\n # Read two layers with compatible projections\n hazard_filename = '%s/donut.shp' % TESTDATA\n exposure_filename = ('%s/pop_merapi_prj_problem.asc' % TESTDATA)\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n # Verify that their projection strings are different\n assert H.get_projection() != E.get_projection()\n assert H.get_projection(proj4=True) != E.get_projection(proj4=True)\n\n # But the InaSAFE comparison does pass\n assert H.projection == E.projection", "async def test_entity_attribute_nested_projection(self):\n test_name = 'test_entity_attribute_nested_projection'\n entity_name = 'TestEntityAttributeNestedProjection'\n\n corpus = TestHelper.get_local_corpus(self.tests_subpath, test_name)\n expected_output_path = TestHelper.get_expected_output_folder_path(self.tests_subpath, test_name)\n manifest = await corpus.fetch_object_async('local:/default.manifest.cdm.json')\n\n ent_test_entity_attribute_nested_projection = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name), manifest)\n self.assertIsNotNone(ent_test_entity_attribute_nested_projection)\n resolved_test_entity_attribute_nested_projection = await ProjectionTestUtils.get_resolved_entity(corpus, ent_test_entity_attribute_nested_projection, [])\n self.assertIsNotNone(resolved_test_entity_attribute_nested_projection)\n await AttributeContextUtil.validate_attribute_context(self, corpus, expected_output_path, entity_name, resolved_test_entity_attribute_nested_projection)", "def test_projection_comparisons(self):\n\n # Although the two test datasets have the same projection,\n # this example failed with the message:\n # The reason was that comparison was done with get_projection()\n # rather than the projection objects themselves.\n\n #Projections must be the same: I got\n #GEOGCS[\"GCS_WGS_1984\",DATUM[\"WGS_1984\",\n # SPHEROID[\"WGS_1984\",6378137,298.257223563]],\n # PRIMEM[\"Greenwich\",0],UNIT[\"Degree\",0.017453292519943295]] and\n #GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",\n # SPHEROID[\"WGS 84\",6378137,298.257223563,\n # AUTHORITY[\"EPSG\",\"7030\"]],TOWGS84[0,0,0,0,0,0,0],\n # AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0,\n # AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.01745329251994328,\n # AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4326\"]]\n\n # Name file names for hazard level and exposure\n hazard_filename = ('%s/rw_jakarta_singlepart.shp' % TESTDATA)\n exposure_filename = ('%s/indonesia_highway_sample.shp' % TESTDATA)\n\n # Read\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n Hp = H.projection\n Ep = E.projection\n msg = 'Projections did not match: %s != %s' % (Hp, Ep)\n assert Hp == Ep, msg", "def projection(self):\n pass", "def _convert_projection(keys_only, gae_projection_fields, index_schema):\n if gae_projection_fields:\n # Process projection_fields\n solr_projection_fields = ['id', 'rank', 'language']\n for gae_name in gae_projection_fields:\n # (1) In GAE fields with different type can have the same name,\n # in Solr they are stored as fields with different name (type suffix).\n try:\n solr_projection_fields += [\n solr_field.solr_name for solr_field in\n index_schema.grouped_fields[gae_name]\n ]\n except KeyError:\n logger.warning('Unknown field \"{}\" in projection'.format(gae_name))\n return solr_projection_fields\n elif keys_only:\n # Skip everything but ID.\n return ['id', 'rank', 'language']\n else:\n # Return all fields.\n return None", "def projection(self, point):\n projected_point = self._iterate_over_factors(\"projection\", {\"point\": point})\n return projected_point", "def test_project(self):\n import itertools\n from numpy import array, dot\n from numpy.linalg import det\n\n # our little magic constant\n magic = 0.33377777373737737777\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 1 + magic, -1 - magic):\n \n s = space(curvature=k)\n\n # test line preserving projection\n # 3 points are colinear when\n # | x1 y1 1 |\n # | x2 y2 1 | = 0\n # | x3 y3 1 |\n # let's test this!\n\n for p, q in itertools.permutations((\n (1, 0),\n (3/5, 4/5),\n (-5/13, 12/13),\n (-8/17, -15/17),\n ), 2):\n p = s.make_point(p, magic)\n q = s.make_point(q, magic)\n u = p.project(projection_types.preserve_lines)\n v = (p+q).project(projection_types.preserve_lines)\n w = (p+(-magic)*q).project(projection_types.preserve_lines)\n d = det([[*u, 1],[*v, 1],[*w, 1]])\n self.assertTrue(abs(d) < 1e-9)\n\n # test angle preserving projection\n # map will be conformal, so we do like a secant test\n\n delta = 1e-9\n vi = s.make_point((1, 0, 0), delta)\n vj = s.make_point((0, 1, 0), delta)\n vk = s.make_point((0, 0, 1), delta)\n for p in (\n (1, 0, 0),\n (0, 3/5, 4/5),\n (-5/13, 12/13, 0),\n (2/11, 6/11, 9/11),\n (3/7, 6/7, 2/7)\n ):\n p = s.make_point(p, magic)\n pp = p.project(projection_types.preserve_angles)\n pi, pj, pk = (array((p+v).project(projection_types.preserve_angles)) - pp for v in (vi, vj, vk))\n # should stay orthogonal and same size\n # note that we're doing a secant thing so it's only approximate\n # thus we set a relatively high tolerance\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pj, pj),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pk, pk),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pj),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pk),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pj, pk),\n 0,\n abs_tol = 1e-6\n ))", "def _solve3D_proj_multires(self, simu):\n if self.do_projection(simu):\n self._project()\n self._solve3D_multires()", "def test_validation_get_valid_projections(self):\n self.assertIsInstance(api.validation.fetch_projections(), dict)", "async def test_entity_projection(self):\n test_name = 'test_entity_projection'\n entity_name = 'TestEntityProjection'\n\n corpus = TestHelper.get_local_corpus(self.tests_subpath, test_name)\n expected_output_path = TestHelper.get_expected_output_folder_path(self.tests_subpath, test_name)\n manifest = await corpus.fetch_object_async('local:/default.manifest.cdm.json')\n\n ent_test_entity_projection = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name), manifest)\n self.assertIsNotNone(ent_test_entity_projection)\n resolved_test_entity_projection = await ProjectionTestUtils.get_resolved_entity(corpus, ent_test_entity_projection, [])\n self.assertIsNotNone(resolved_test_entity_projection)\n await AttributeContextUtil.validate_attribute_context(self, corpus, expected_output_path, entity_name, resolved_test_entity_projection)", "def set_projection_type(self, p_type):\n self.scenes[self.current_scene].set_projection_type(p_type)", "def _init_projection(self):\n radius = 6370e3\n \n # Spherical latlon used by WRF\n self.latlon_sphere = pyproj.Proj(proj='latlong',\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n # Lambert Conformal Conic used by WRF\n self.lambert_grid = pyproj.Proj(proj='lcc',\n lat_1=self.truelats[0],\n lat_2=self.truelats[1],\n lat_0=self.ref_latlon[0],\n lon_0=self.stand_lon,\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n grid_size_i = (self.domain_size[0] - 2) * self.cell_size[0]\n grid_size_j = (self.domain_size[1] - 2) * self.cell_size[1]\n\n grid_center_i, grid_center_j = pyproj.transform(\n self.latlon_sphere, self.lambert_grid,\n self.ref_latlon[1], self.ref_latlon[0])\n \n self.offset_i = grid_center_i - grid_size_i * .5\n self.offset_j = grid_center_j - grid_size_j * .5", "def projectionContext(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n name: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def read_projection(fname, element, theta_index):\n\n projections = dxchange.read_hdf5(fname, \"MAPS/XRF_roi\")\n theta = float(dxchange.read_hdf5(fname, \"MAPS/extra_pvs_as_csv\")[theta_index].split(b',')[1])\n elements = read_channel_names(fname)\n\n try:\n if find_index(elements, element) != None:\n return projections[find_index(elements, element),:, :], theta\n else:\n raise TypeError\n except TypeError:\n print(\"**** ERROR: Element %s does exist in the file: %s \" % (element, fname))\n return None", "def _project_elem(self, elem, mapping):\r\n\t\tif isinstance(elem, basestring):\r\n\t\t\treturn elem\r\n\t\telif isinstance(elem, xmlmodel.XmlElem):\r\n\t\t\tcls = mapping.get_class_for(elem)\r\n\t\t\tif cls is None:\r\n\t\t\t\traise TypeError, 'Could not determine object class for \\'{0}\\' element for node type {1}'.format(elem.tag, type(self))\r\n\t\t\tif not isinstance(cls, NodeClass):\r\n\t\t\t\tif callable(cls):\r\n\t\t\t\t\tcls = cls()\r\n\t\t\t\telse:\r\n\t\t\t\t\traise TypeError, 'Object class for \\'{0}\\' element for node type {1} is of type {2}, should be a NodeClass or a callable'.format(elem.tag, type(self), type(cls))\r\n\t\t\tnode = self._projection_table.get(elem, cls)\r\n\t\t\tif node is None:\r\n\t\t\t\tnode = cls(self._projection_table, elem)\r\n\t\t\t\tself._projection_table.put(elem, cls, node)\r\n\t\t\t\tnode.node_init()\r\n\t\t\treturn node\r\n\t\telse:\r\n\t\t\traise TypeError, 'elem should be a string or an XmlElem'", "def get_chained_proj(self):\n #non recursive call\n N = self.N_in\n projs = []\n for layer in self.layers:\n proj,N_out = self.get_deprojecter(layer,N)\n projs.append(proj)\n N = N_out\n return projs", "def subdAutoProjection(*args, caching: bool=True, nodeState: Union[int, bool]=0,\n constructionHistory: bool=True, layout: Union[int, bool]=0,\n layoutMethod: Union[int, bool]=0, name: AnyStr=\"\", optimize: Union[int,\n bool]=0, percentageSpace: Union[float, bool]=0.0, planes: Union[int,\n bool]=6, scale: Union[int, bool]=0, skipIntersect: bool=True,\n worldSpace: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def projection(poly1, dim, solver=None, abs_tol=ABS_TOL, verbose=0):\n if isinstance(poly1, Region):\n ret = Polytope()\n for i in range(len(poly1.list_poly)):\n p = projection(\n poly1.list_poly[i], dim,\n solver=solver, abs_tol=abs_tol)\n ret = ret + p\n return ret\n # flat ?\n if (poly1.dim < len(dim)) or is_empty(poly1):\n return poly1\n # `poly1` isn't flat\n poly_dim = poly1.dim\n dim = np.array(dim)\n org_dim = range(poly_dim)\n new_dim = dim.flatten() - 1\n del_dim = np.setdiff1d(org_dim, new_dim) # Index of dimensions to remove\n # logging\n logger.debug('polytope dim = ' + str(poly_dim))\n logger.debug('project on dims = ' + str(new_dim))\n logger.debug('original dims = ' + str(org_dim))\n logger.debug('dims to delete = ' + str(del_dim))\n mA, nA = poly1.A.shape\n # fewer rows than dimensions ?\n if mA < poly_dim:\n msg = 'fewer rows in A: ' + str(mA)\n msg += ', than polytope dimension: ' + str(poly_dim)\n logger.warning(msg)\n # enlarge A, b with zeros\n A = poly1.A.copy()\n poly1.A = np.zeros((poly_dim, poly_dim))\n poly1.A[0:mA, 0:nA] = A\n # stack\n poly1.b = np.hstack([poly1.b, np.zeros(poly_dim - mA)])\n logger.debug('m, n = ' + str((mA, nA)))\n # Compute cheby ball in lower dim to see if projection exists\n norm = np.sum(poly1.A * poly1.A, axis=1).flatten()\n norm[del_dim] = 0\n c = np.zeros(len(org_dim) + 1, dtype=float)\n c[len(org_dim)] = -1\n G = np.hstack([poly1.A, norm.reshape(norm.size, 1)])\n h = poly1.b\n sol = lpsolve(c, G, h)\n if sol['status'] != 0:\n # Projection not fulldim\n return Polytope()\n if sol['x'][-1] < abs_tol:\n return Polytope()\n # select projection solver\n if solver == \"esp\":\n return projection_esp(poly1, new_dim, del_dim)\n elif solver == \"exthull\":\n return projection_exthull(poly1, new_dim)\n elif solver == \"fm\":\n return projection_fm(poly1, new_dim, del_dim)\n elif solver == \"iterhull\":\n return projection_iterhull(poly1, new_dim)\n elif solver is not None:\n logger.warning('unrecognized projection solver \"' +\n str(solver) + '\".')\n # `solver` undefined or unknown\n # select method based on dimension criteria\n if len(del_dim) <= 2:\n logger.debug(\"projection: using Fourier-Motzkin.\")\n return projection_fm(poly1, new_dim, del_dim)\n elif len(org_dim) <= 4:\n logger.debug(\"projection: using exthull.\")\n return projection_exthull(poly1, new_dim)\n else:\n logger.debug(\"projection: using iterative hull.\")\n return projection_iterhull(poly1, new_dim)", "def projections(self):\n # backwards compatiblity for naming_scheme key\n conf = self.module.configuration(self.name)\n if \"naming_scheme\" in conf:\n default = {\"all\": conf[\"naming_scheme\"]}\n else:\n default = self.default_projections\n projections = conf.get(\"projections\", default)\n\n # Ensure the named tokens we are expanding are allowed, see\n # issue #2884 for reference\n msg = \"some tokens cannot be part of the module naming scheme\"\n for projection in projections.values():\n _check_tokens_are_valid(projection, message=msg)\n\n return projections", "def test_has_projection(self):\n for klass in Event.__subclasses__():\n self.assertTrue(hasattr(klass, 'project'),\n f'{klass.__name__} is missing project() method')\n self.assertTrue(inspect.isfunction(klass.project),\n f'{klass.__name__} is missing project() method')", "def read_gdal_projection(dataset):\n wkt = dataset.GetProjection()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(wkt)\n # src = None\n return srs", "def projection_depth(X, *, pointwise=False):\n\n depth = outlyingness_to_depth(_stagel_donoho_outlyingness)\n\n return depth(X, pointwise=pointwise)", "def get_projection(self):\n return self.projection", "def projection(self):\n self.projection = Projection(self)\n return self.projection" ]
[ "0.61008054", "0.57861453", "0.541124", "0.5349921", "0.5343465", "0.5273637", "0.5211582", "0.51546365", "0.5075276", "0.504999", "0.49605206", "0.49293154", "0.48868015", "0.48221928", "0.48099476", "0.4738513", "0.47346017", "0.47128424", "0.47019666", "0.46675426", "0.4598494", "0.45937777", "0.45818222", "0.45808133", "0.45644972", "0.4564285", "0.45509356", "0.45503727", "0.4542558", "0.45421666" ]
0.6758612
0
Computes the zorder of element in the NdOverlay taking into account possible batching of elements.
def get_zorder(self, overlay, key, el): spec = util.get_overlay_spec(overlay, key, el) try: return self.ordering.index(spec) except ValueError: self.ordering = sorted(self.ordering+[spec]) return self.ordering.index(spec)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getz_index(self):\n return self._getz_index", "def optimise_z(z, *args):\n x, y, elements, coordinates = args\n window_com = np.array([x, y, z])\n return pore_diameter(elements, coordinates, com=window_com)[0]", "def z(self):\r\n return self.position.z", "def _set_planar_pixel_order(img):\n if img.ndim == 3:\n # C-order increments along the y-axis slowest (0), then x-axis (1),\n # then z-axis (2). We want it to go along the z-axis slowest, then\n # y-axis, then x-axis.\n img = np.swapaxes(img, 1, 2)\n img = np.swapaxes(img, 0, 1)\n\n return img.copy()", "def obtain_depth(self):\n self.z_buffer = image(self.image_plane.width, self.image_plane.height)\n for j in range(self.image_plane.height):\n for i in range(self.image_plane.width):\n single_point = None\n for ray_tracing in self.ray_tracer:\n ray_tracing.ray_direction(i, j)\n ray_tracing.sphere_to_ray()\n ray_tracing.ray_sphere_intersection()\n ray_tracing.hit_pos()\n hit_point = ray_tracing.getHit()\n\n if single_point is None:\n single_point = hit_point\n elif single_point is not None and single_point.z > hit_point.z:\n single_point = hit_point\n self.z_buffer.setColor(single_point, i, j)", "def z_index(self):\n return self._z_index", "def find_layer(z, params):\r\n N = len(params['d_list'])\r\n for i in range(N):\r\n if z <= params['layer_bottom_list'][i]:\r\n return i-1\r\n return N-1", "def n_z(self, level):\n resolution = self.resolution(level)\n return (self.z_extent // resolution + 63) // 64", "def getZ(self):\n\t\treturn self.coords.z", "def _sort_ns(self):\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n", "def sh_order(j):\n l = sh_degree(j)\n return j + l + 1 - dimension(l)", "def naive_order_calculation(self):\n\t\torder = 0\n\t\tfor pt in self.enumerate_points():\n\t\t\torder += 1\n\t\treturn order", "def idx_z(self, zval):\r\n iz = np.around((zval - self.oz) / self.dz)\r\n return int(iz)", "def standard_sorting(cls, zmat):\n if zmat is None:\n return None\n nats = len(zmat)\n ncoords = 3*nats - 6\n if nats < 4:\n return None\n else:\n r_coords = [0, 1, 3]\n a_coords = [2, 4]\n t_coords = [5]\n if nats > 4:\n extra = np.arange(6, ncoords+1)\n r_coords += extra[::4].tolist()\n a_coords += extra[1::4].tolist()\n t_coords += extra[2::4].tolist()\n return np.argsort(np.concatenate([r_coords, a_coords, t_coords]))", "def _derZ(self, w, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha) * self.wxInterpolators[y_pos - 1][z_pos](w, x)\n + alpha * self.wxInterpolators[y_pos][z_pos](w, x)\n )\n - (\n (1 - alpha) * self.wxInterpolators[y_pos - 1][z_pos - 1](w, x)\n + alpha * self.wxInterpolators[y_pos][z_pos - 1](w, x)\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n dfdz[c] = (\n (\n (1 - alpha) * self.wxInterpolators[i - 1][j](w[c], x[c])\n + alpha * self.wxInterpolators[i][j](w[c], x[c])\n )\n - (\n (1 - alpha)\n * self.wxInterpolators[i - 1][j - 1](w[c], x[c])\n + alpha * self.wxInterpolators[i][j - 1](w[c], x[c])\n )\n ) / (self.z_list[j] - self.z_list[j - 1])\n return dfdz", "def NN_z(x, y, con_ver, nbr_ver, cellsize):\n gx, gy, elevNNGrid = interpolate_to_grid(con_ver[:, 0], con_ver[:,1], con_ver[:,2], \n interp_type = \"natural_neighbor\", \n hres = cellsize[0])\n elev_NN = elevNNGrid[0, 0]\n if not(np.isnan(elev_NN)):\n elev_i = elev_NN\n else:\n print(\"elev_NN is nan: evaluating else loop\")\n d_nbr = np.zeros(3)\n for n in range(0, 3):\n d_nbr[n] = ((x - nbr_ver[n][0])**2 + (y - nbr_ver[n][1])**2)**0.5\n nearest_ver = nbr_ver[d_nbr.argmax(0)]\n elev_i = nearest_ver[2]\n return elev_i", "def _derZ(self, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha) * self.xInterpolators[y_pos - 1][z_pos](x)\n + alpha * self.xInterpolators[y_pos][z_pos](x)\n )\n - (\n (1 - alpha) * self.xInterpolators[y_pos - 1][z_pos - 1](x)\n + alpha * self.xInterpolators[y_pos][z_pos - 1](x)\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n dfdz[c] = (\n (\n (1 - alpha) * self.xInterpolators[i - 1][j](x[c])\n + alpha * self.xInterpolators[i][j](x[c])\n )\n - (\n (1 - alpha) * self.xInterpolators[i - 1][j - 1](x[c])\n + alpha * self.xInterpolators[i][j - 1](x[c])\n )\n ) / (self.z_list[j] - self.z_list[j - 1])\n return dfdz", "def get_order(order, gt_idx_v):\n o = np.tile(order, (gt_idx_v.shape[0],1))\n g = np.expand_dims(gt_idx_v, 1)\n o = o - g\n l, c = np.where(o==0)\n return c # order[c[0]] = gt_idx_v[0]", "def _derZ(self, w, x, y, z):\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list, w), self.w_n - 1), 1)\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n w_pos = self.wSearchFunc(self.w_list, w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n - 1] = self.w_n - 1\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n alpha = (w - self.w_list[i - 1]) / (self.w_list[i] - self.w_list[i - 1])\n beta = (x - self.x_list[j - 1]) / (self.x_list[j] - self.x_list[j - 1])\n gamma = (y - self.y_list[k - 1]) / (self.y_list[k] - self.y_list[k - 1])\n dfdz = (\n (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[i - 1, j - 1, k - 1, l]\n + (1 - alpha) * (1 - beta) * gamma * self.f_values[i - 1, j - 1, k, l]\n + (1 - alpha) * beta * (1 - gamma) * self.f_values[i - 1, j, k - 1, l]\n + (1 - alpha) * beta * gamma * self.f_values[i - 1, j, k, l]\n + alpha * (1 - beta) * (1 - gamma) * self.f_values[i, j - 1, k - 1, l]\n + alpha * (1 - beta) * gamma * self.f_values[i, j - 1, k, l]\n + alpha * beta * (1 - gamma) * self.f_values[i, j, k - 1, l]\n + alpha * beta * gamma * self.f_values[i, j, k, l]\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[i - 1, j - 1, k - 1, l - 1]\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.f_values[i - 1, j - 1, k, l - 1]\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.f_values[i - 1, j, k - 1, l - 1]\n + (1 - alpha) * beta * gamma * self.f_values[i - 1, j, k, l - 1]\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[i, j - 1, k - 1, l - 1]\n + alpha * (1 - beta) * gamma * self.f_values[i, j - 1, k, l - 1]\n + alpha * beta * (1 - gamma) * self.f_values[i, j, k - 1, l - 1]\n + alpha * beta * gamma * self.f_values[i, j, k, l - 1]\n )\n ) / (self.z_list[l] - self.z_list[l - 1])\n return dfdz", "def depth_rendering(ref_view, disparity_map, lf_size = (64, 512, 512, 3)):\n lf_one_way = int(math.floor(math.sqrt(lf_size[0])))\n\n x_indices = np.arange(lf_size[1])\n y_indices = np.arange(lf_size[2])\n b_indices = np.arange(lf_size[0])\n\n #Create a grid of size lf_size[:3] consisting of the pixel co ordinates of each image\n _, x, y = np.meshgrid(b_indices, x_indices, y_indices, indexing= 'ij')\n\n # Create a grid of size (lf_size[0], 2) consiting of the row, col lf positions\n grid = np.meshgrid(np.arange(lf_one_way), np.arange(lf_one_way), indexing= 'ij')\n stacked = np.stack(grid, 2)\n positions = stacked.reshape(-1, 2)\n\n # Compute the distance from each lf position from the reference view\n # Repeat the elements of this to match the size of the disparity map\n ref_pos = np.array(\n [lf_one_way // 2, lf_one_way // 2])\n distance = (np.tile(ref_pos, (lf_size[0], 1)) - positions).T\n dis_repeated = np.repeat(distance, lf_size[1] * lf_size[2], axis = 1)\n dis_repeated = dis_repeated.reshape(2, lf_size[0], lf_size[1], lf_size[2])\n\n\n # Tile the disparity map so that there is one for each lf_position - lf_size[0]\n tiled_map = np.tile(disparity_map, (lf_size[0], 1, 1))\n\n # Compute the shifted pixels\n x_shifted = (x.astype(np.float32) - tiled_map * dis_repeated[0]).flatten()\n y_shifted = (y.astype(np.float32) - tiled_map * dis_repeated[1]).flatten()\n\n #indices for linear interpolation in a square around the central point\n x_low = np.around(x_shifted).astype(int)\n #x_high = x_low + 1\n\n y_low = np.around(y_shifted).astype(int)\n #y_high = y_low + 1\n\n #Place co-ordinates outside the image back into the image\n x_low_clip = np.clip(x_low, 0, ref_view.shape[0] - 1)\n #x_high_clip = np.clip(x_high, 0, ref_view.shape[0] - 1)\n y_low_clip = np.clip(y_low, 0, ref_view.shape[1] - 1)\n #y_high_clip = np.clip(y_high, 0, ref_view.shape[1] - 1)\n\n #Gather the interpolation points\n interp_pts_1 = np.stack((x_low_clip, y_low_clip))\n #interp_pts_2 = np.stack((x_low_clip, y_high_clip))\n #interp_pts_3 = np.stack((x_high_clip, y_low_clip))\n #interp_pts_4 = np.stack((x_high_clip, y_high_clip))\n\n #Index into the images\n desired_shape = lf_size\n res_1 = torch_big_sample(ref_view, interp_pts_1, desired_shape)\n return res_1\n res_2 = torch_big_sample(ref_view, interp_pts_2, desired_shape)\n res_3 = torch_big_sample(ref_view, interp_pts_3, desired_shape)\n res_4 = torch_big_sample(ref_view, interp_pts_4, desired_shape)\n\n #Compute interpolation weights\n x_low_f = x_low.astype(np.float32)\n d_x_low = 1.0 - (x_shifted.astype(np.float32) - x_low_f)\n d_x_high = 1.0 - d_x_low\n y_low_f = y_low.astype(np.float32)\n d_y_low = 1.0 - (y_shifted.astype(np.float32) - y_low_f)\n d_y_high = 1.0 - d_y_low\n\n w1 = torch.from_numpy(d_x_low * d_y_low)\n w2 = torch.from_numpy(d_x_low * d_y_high)\n w3 = torch.from_numpy(d_x_high * d_y_low)\n w4 = torch.from_numpy(d_x_high * d_y_high)\n\n #THEY AGREE AT THIS POINT\n weighted_1 = torch.mul(repeat_weights(w1, desired_shape), res_1)\n weighted_2 = torch.mul(repeat_weights(w2, desired_shape), res_2)\n weighted_3 = torch.mul(repeat_weights(w3, desired_shape), res_3)\n weighted_4 = torch.mul(repeat_weights(w4, desired_shape), res_4)\n\n novel_view = torch.add(torch.add(weighted_1, weighted_2), weighted_3)\n torch.add(novel_view, weighted_4, out=novel_view)\n return novel_view", "def __get_z__(self):\n return self.Direction['z']", "def calc_ply_order(constraints, targets):\r\n if constraints.sym:\r\n ply_order = np.arange(targets.n_plies // 2 + targets.n_plies % 2)\r\n return ply_order\r\n\r\n order_before_sorting = np.arange(targets.n_plies)\r\n ply_order = np.zeros((targets.n_plies,), int)\r\n ply_order[0::2] = order_before_sorting[\r\n :targets.n_plies // 2 + targets.n_plies % 2]\r\n ply_order[1::2] = order_before_sorting[\r\n targets.n_plies // 2 + targets.n_plies % 2:][::-1]\r\n return ply_order", "def N_z(self) -> int:\n return self.params.N_z", "def pz_fn(self, z):\n pass", "def _derZ(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.y_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos](w)\n + (1 - alpha)\n * beta\n * self.wInterpolators[x_pos - 1][y_pos][z_pos](w)\n + alpha\n * (1 - beta)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos](w)\n + alpha * beta * self.wInterpolators[x_pos][y_pos][z_pos](w)\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1](w)\n + (1 - alpha)\n * beta\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1](w)\n + alpha\n * (1 - beta)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1](w)\n + alpha * beta * self.wInterpolators[x_pos][y_pos][z_pos - 1](w)\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n dfdz[c] = (\n (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[i - 1][j - 1][k](w[c])\n + (1 - alpha)\n * beta\n * self.wInterpolators[i - 1][j][k](w[c])\n + alpha\n * (1 - beta)\n * self.wInterpolators[i][j - 1][k](w[c])\n + alpha * beta * self.wInterpolators[i][j][k](w[c])\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[i - 1][j - 1][k - 1](w[c])\n + (1 - alpha)\n * beta\n * self.wInterpolators[i - 1][j][k - 1](w[c])\n + alpha\n * (1 - beta)\n * self.wInterpolators[i][j - 1][k - 1](w[c])\n + alpha\n * beta\n * self.wInterpolators[i][j][k - 1](w[c])\n )\n ) / (self.z_list[k] - self.z_list[k - 1])\n return dfdz", "def cells_z(self):\n if self.is_depth:\n return list(reversed(self._cells[2]))\n return self._cells[2]", "def _derZ(self, x, y, z):\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha) * (1 - beta) * self.f_values[x_pos - 1, y_pos - 1, z_pos]\n + (1 - alpha) * beta * self.f_values[x_pos - 1, y_pos, z_pos]\n + alpha * (1 - beta) * self.f_values[x_pos, y_pos - 1, z_pos]\n + alpha * beta * self.f_values[x_pos, y_pos, z_pos]\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * self.f_values[x_pos - 1, y_pos - 1, z_pos - 1]\n + (1 - alpha) * beta * self.f_values[x_pos - 1, y_pos, z_pos - 1]\n + alpha * (1 - beta) * self.f_values[x_pos, y_pos - 1, z_pos - 1]\n + alpha * beta * self.f_values[x_pos, y_pos, z_pos - 1]\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n return dfdz", "def testGetOrderedLayers(self):\n container_obj = self.explorer_object.GetContainer(\n 'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c')\n layers = container_obj.GetOrderedLayers()\n self.assertEqual(2, len(layers))\n self.assertEqual(\n '1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125',\n layers[0])", "def userToPlotZ(z): \n return dislin.nzposn(z)", "def _get_level_ordering(self):\n # First, get a level for all layers:\n levels = {}\n for layer in self._layers:\n level = max(\n [levels[lay.name] for lay in self.incoming_layers(layer.name)] + [-1]\n )\n levels[layer.name] = level + 1\n max_level = max(levels.values())\n ordering = []\n for i in range(max_level + 1): # input to output\n layer_names = [\n layer.name for layer in self._layers if levels[layer.name] == i\n ]\n ordering.append(\n [\n (name, False, [x.name for x in self.incoming_layers(name)])\n for name in layer_names\n ]\n ) # (going_to/layer_name, anchor, coming_from)\n # promote all output banks to last row:\n for level in range(len(ordering)): # input to output\n tuples = ordering[level]\n index = 0\n for (name, anchor, none) in tuples[:]:\n if self._get_layer_type(name) == \"output\":\n # move it to last row\n # find it and remove\n ordering[-1].append(tuples.pop(index))\n else:\n index += 1\n # insert anchor points for any in next level\n # that doesn't go to a bank in this level\n # order_cache = {}\n for level in range(len(ordering)): # input to output\n tuples = ordering[level]\n for (name, anchor, fname) in tuples:\n if anchor:\n # is this in next? if not add it\n next_level = [\n (n, anchor) for (n, anchor, hfname) in ordering[level + 1]\n ]\n if (\n name,\n False,\n ) not in next_level: # actual layer not in next level\n ordering[level + 1].append(\n (name, True, fname)\n ) # add anchor point\n else:\n pass # finally!\n else:\n # if next level doesn't contain an outgoing\n # connection, add it to next level as anchor point\n for layer in self.outgoing_layers(name):\n next_level = [\n (n, anchor) for (n, anchor, fname) in ordering[level + 1]\n ]\n if (layer.name, False) not in next_level:\n ordering[level + 1].append(\n (layer.name, True, name)\n ) # add anchor point\n ordering = self._optimize_ordering(ordering)\n return ordering" ]
[ "0.57028675", "0.55984366", "0.5478423", "0.54625237", "0.5441317", "0.537248", "0.5289451", "0.52060914", "0.5195802", "0.51892036", "0.51678", "0.5121228", "0.51193726", "0.50830746", "0.507015", "0.5063056", "0.5060691", "0.5056381", "0.5056132", "0.50311744", "0.5008574", "0.49935734", "0.4985095", "0.49817", "0.4966024", "0.49506593", "0.4943147", "0.4935495", "0.4928189", "0.49159983" ]
0.72016543
0
Gets the extents for the axes from the current View. The globally computed ranges can optionally override the extents.
def get_extents(self, view, ranges): ndims = len(view.dimensions()) num = 6 if self.projection == '3d' else 4 if self.apply_ranges: if ranges: dims = view.dimensions() x0, x1 = ranges[dims[0].name] if ndims > 1: y0, y1 = ranges[dims[1].name] else: y0, y1 = (np.NaN, np.NaN) if self.projection == '3d': if len(dims) > 2: z0, z1 = ranges[dims[2].name] else: z0, z1 = np.NaN, np.NaN else: x0, x1 = view.range(0) y0, y1 = view.range(1) if ndims > 1 else (np.NaN, np.NaN) if self.projection == '3d': z0, z1 = view.range(2) if self.projection == '3d': range_extents = (x0, y0, z0, x1, y1, z1) else: range_extents = (x0, y0, x1, y1) else: range_extents = (np.NaN,) * num if self.apply_extents: norm_opts = self.lookup_options(view, 'norm').options if norm_opts.get('framewise', False) or self.dynamic: extents = view.extents else: extent_list = self.hmap.traverse(lambda x: x.extents, [Element]) extents = util.max_extents(extent_list, self.projection == '3d') else: extents = (np.NaN,) * num return tuple(l1 if l2 is None or not np.isfinite(l2) else l2 for l1, l2 in zip(range_extents, extents))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extents(self):\n self._updateExtents()\n return self.mExtents", "def extents(self):\n x0, y0, width, height = self._rect_bbox\n xmin, xmax = sorted([x0, x0 + width])\n ymin, ymax = sorted([y0, y0 + height])\n return xmin, xmax, ymin, ymax", "def extents(self):\n\n return self._local", "def extents(self):\n if self.direction == 'horizontal':\n vmin = self._selection_artist.get_x()\n vmax = vmin + self._selection_artist.get_width()\n else:\n vmin = self._selection_artist.get_y()\n vmax = vmin + self._selection_artist.get_height()\n return vmin, vmax", "def extent(self):\n return self._ax.extent", "def get_extent(self):\n pass", "def extent(self):\n return np.array(self._extent)", "def _get_extent_axes(self, x):\n if not hasattr(self, 'get_subplotspec'):\n return [self]\n y = ('y' if x == 'x' else 'x')\n idx = (0 if x == 'x' else 1)\n argfunc = (np.argmax if x == 'x' else np.argmin)\n irange = self._range_gridspec(x)\n axs = [ax for ax in self.figure._axes_main\n if ax._range_gridspec(x) == irange]\n if not axs:\n return [self]\n else:\n pax = axs.pop(argfunc([ax._range_gridspec(y)[idx] for ax in axs]))\n return [pax, *axs]", "def return_extents(self):\n\n return [qm.tree.mins, qm.tree.maxs]", "def GetExtents(self, transform=None):\n # Prepare GDAL functions to compute extents\n x_size, y_size = self.RasterXSize, self.RasterYSize\n\n # Compute four corners in destination projection\n upper_left = self.PixelCoordinates(0, 0,\n transform=transform)\n upper_right = self.PixelCoordinates(x_size, 0,\n transform=transform)\n lower_left = self.PixelCoordinates(0, y_size,\n transform=transform)\n lower_right = self.PixelCoordinates(x_size, y_size,\n transform=transform)\n x_values, y_values = list(zip(upper_left, upper_right,\n lower_left, lower_right))\n\n # Return lower-left and upper-right extents\n return Extents(lower_left=XY(min(x_values), min(y_values)),\n upper_right=XY(max(x_values), max(y_values)))", "def extent(self):\n ulx, uly, lrx, lry = self.ul_lr\n return ulx, lry, lrx, uly", "def extent(self):\r\n if not hasattr(self, '_extent'):\r\n self._extent = conf.lib.clang_getCursorExtent(self)\r\n\r\n return self._extent", "def extent(self):\n return self._extent", "def extent(self):\n rx0 = gxapi.float_ref()\n ry0 = gxapi.float_ref()\n rz0 = gxapi.float_ref()\n rx1 = gxapi.float_ref()\n ry1 = gxapi.float_ref()\n rz1 = gxapi.float_ref()\n self.gxvox.get_area(rx0, ry0, rz0, rx1, ry1, rz1)\n if self.is_depth:\n return gxgm.Point2(((rx0.value, ry0.value, -rz1.value), (rx1.value, ry1.value, -rz0.value)))\n return gxgm.Point2(((rx0.value, ry0.value, rz0.value), (rx1.value, ry1.value, rz1.value)),\n self.coordinate_system)", "def geoextent(self):\r\n return self.series_extent", "def extent(self):\n return self.index.max() - self.index.min(), self.columns.max() - self.columns.min()", "def compute_axes(self):\n mini, maxi = self._get_extremes()\n self.y_axis.min = mini\n self.y_axis.max = maxi\n self.y_axis._max_min()\n\n if not None in [s.xvalues for s in self]:\n mini, maxi = self._get_extremes('xvalues')\n self.x_axis.min = mini\n self.x_axis.max = maxi\n self.x_axis._max_min()", "def get_extent(self):\n geot = self.geotransform()\n return (geot[0], geot[3] + self.YSize() * geot[5],\n geot[0] + self.XSize() * geot[1], geot[3])", "def extent(self):\n\n x = np.array([0, self.nx]) * self.dx + self.corner_grid.x0\n ypoint = [0, self.ny] if self.origin == 'lower-left' else [self.ny, 0]\n y = np.array(ypoint) * self.dy + self.corner_grid.y0\n\n return [x[0], x[1], y[0], y[1]]", "def extent(self):\n left = self.transform[0]\n right = left + self.transform[1] * self.shape[1]\n top = self.transform[3]\n bottom = top + self.transform[5] * self.shape[0]\n return left, right, bottom, top", "def _getAxesExtent(\n self,\n x0: float,\n y0: float,\n x1: float,\n y1: float,\n enabledAxes: Optional[EnabledAxes] = None,\n ) -> AxesExtent:\n if enabledAxes is None:\n enabledAxes = self.enabledAxes\n\n y2_0, y2_1 = y0, y1\n left, top, width, height = self.plot.getPlotBoundsInPixels()\n\n if not all(enabledAxes) and not self.plot.isKeepDataAspectRatio():\n # Handle axes disabled for zoom if plot is not keeping aspec ratio\n if not enabledAxes.xaxis:\n x0, x1 = left, left + width\n if not enabledAxes.yaxis:\n y0, y1 = top, top + height\n if not enabledAxes.y2axis:\n y2_0, y2_1 = top, top + height\n\n if self.plot.isKeepDataAspectRatio() and height != 0 and width != 0:\n ratio = width / height\n xextent, yextent = math.fabs(x1 - x0), math.fabs(y1 - y0)\n if xextent != 0 and yextent != 0:\n if xextent / yextent > ratio:\n areaHeight = xextent / ratio\n center = 0.5 * (y0 + y1)\n y0 = center - numpy.sign(y1 - y0) * 0.5 * areaHeight\n y1 = center + numpy.sign(y1 - y0) * 0.5 * areaHeight\n else:\n areaWidth = yextent * ratio\n center = 0.5 * (x0 + x1)\n x0 = center - numpy.sign(x1 - x0) * 0.5 * areaWidth\n x1 = center + numpy.sign(x1 - x0) * 0.5 * areaWidth\n\n # Convert to data space\n x0, y0 = self.plot.pixelToData(x0, y0, check=False)\n x1, y1 = self.plot.pixelToData(x1, y1, check=False)\n y2_0 = self.plot.pixelToData(None, y2_0, axis=\"right\", check=False)[1]\n y2_1 = self.plot.pixelToData(None, y2_1, axis=\"right\", check=False)[1]\n\n return AxesExtent(\n min(x0, x1),\n max(x0, x1),\n min(y0, y1),\n max(y0, y1),\n min(y2_0, y2_1),\n max(y2_0, y2_1),\n )", "def getExtentUnits(self):\n return _libsbml.Model_getExtentUnits(self)", "def get_data_extent(self):\n xs, ys = self.xs, self.ys\n xmin, xmax = min(xs), max(xs)\n ymin, ymax = min(xy), max(ys)\n w = maxx - minx\n h = maxy - miny\n return xmin, ymax, w, h", "def axes(self):\n return self._axes", "def axes(self):\n return self._axes", "def bounds(self, axis, view=None):\n if view is None:\n view = self\n if axis not in self._vshare.bounds:\n self._vshare.bounds[axis] = self._compute_bounds(axis, view)\n return self._vshare.bounds[axis]", "def getCurrentExtent(self):\n if not self.currentBox:\n extent = None\n else:\n extent = boxToExtent(self.currentBox)\n return extent", "def get_extent(fpath):\n extents = []\n with h5py.File(fpath, mode='r') as f:\n for key, value in f['label/label-0'].attrs.items():\n if key.lower().endswith('extent') and isinstance(value, np.ndarray):\n extents.append(value)\n \n extents = np.stack(extents, axis=0)\n maxs = np.max(extents, axis=0)\n mins = np.min(extents, axis=0)\n axis_slices = []\n for min_, max_ in zip(mins[::2], maxs[1::2]):\n axis_slices.append(slice(min_, max_, 1))\n return tuple(axis_slices)", "def GetExtent(vDataSet):\r\n return [vDataSet.GetExtendMinX(),vDataSet.GetExtendMaxX(),\r\n vDataSet.GetExtendMinY(),vDataSet.GetExtendMaxY(),\r\n vDataSet.GetExtendMinZ(),vDataSet.GetExtendMaxZ()]", "def extent(self):\n if self.x is not None:\n if self.y is not None:\n if self.z is not None:\n return (self.x.min(), self.x.max(),\n self.y.min(), self.y.max(),\n self.z.min(), self.z.max())\n return (self.x.min(), self.x.max(),\n self.y.min(), self.y.max())\n return (self.x.min(), self.x.max())\n\n elif self.r is not None and self.t is not None:\n if self.z is not None:\n return (self.z.min(), self.z.max(),\n self.r.min(), self.r.max(),\n self.t.min(), self.t.max())\n return (self.r.min(), self.r.max(),\n self.t.min(), self.t.max())\n\n return ()" ]
[ "0.72161704", "0.6875994", "0.6859144", "0.67906785", "0.65129864", "0.63847166", "0.63576", "0.6300293", "0.6264915", "0.6224861", "0.61757976", "0.61531943", "0.61249256", "0.61061025", "0.60827875", "0.6021081", "0.5992878", "0.599001", "0.59846133", "0.5962699", "0.5962071", "0.59305775", "0.5904698", "0.5868598", "0.5868598", "0.5862479", "0.5849482", "0.58306336", "0.5829935", "0.58241355" ]
0.7218878
0
Given a HoloMap compute the appropriate (mapwise or framewise) ranges in order to apply the Compositor collapse operations in display mode (data collapse should already have happened).
def _apply_compositor(self, holomap, ranges=None, keys=None, dimensions=None): # Compute framewise normalization defaultdim = holomap.ndims == 1 and holomap.kdims[0].name != 'Frame' if keys and ranges and dimensions and not defaultdim: dim_inds = [dimensions.index(d) for d in holomap.kdims] sliced_keys = [tuple(k[i] for i in dim_inds) for k in keys] frame_ranges = OrderedDict([(slckey, self.compute_ranges(holomap, key, ranges[key])) for key, slckey in zip(keys, sliced_keys) if slckey in holomap.data.keys()]) else: mapwise_ranges = self.compute_ranges(holomap, None, None) frame_ranges = OrderedDict([(key, self.compute_ranges(holomap, key, mapwise_ranges)) for key in holomap.keys()]) ranges = frame_ranges.values() return Compositor.collapse(holomap, (ranges, frame_ranges.keys()), mode='display')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _modify_map_size(self, merged_map):\n pos_x_white, pos_y_white = np.where(merged_map == 255)\n pos_x_black, pos_y_black = np.where(merged_map == 0)\n\n pos_x_M = np.amax(np.hstack((pos_x_black, pos_x_white)))\n pos_x_m = np.amin(np.hstack((pos_x_black, pos_x_white)))\n pos_y_M = np.amax(np.hstack((pos_y_black, pos_y_white)))\n pos_y_m = np.amin(np.hstack((pos_y_black, pos_y_white)))\n\n reduced_map = merged_map[pos_x_m-5:pos_x_M+5, pos_y_m-5:pos_y_M+5]\n\n return reduced_map", "def disp_map(disp):\n map = np.array([\n [0,0,0,114],\n [0,0,1,185],\n [1,0,0,114],\n [1,0,1,174],\n [0,1,0,114],\n [0,1,1,185],\n [1,1,0,114],\n [1,1,1,0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0]-1,map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] -1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0]-1] / cbins[cbins.shape[0] -1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6,1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size+1,1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s,0:3] * np.tile(1 - disp,(1,3)) + map[s + 1,0:3] * np.tile(disp,(1,3))\n\n return disp", "def disp_map(disp):\n map = np.array([\n [0, 0, 0, 114],\n [0, 0, 1, 185],\n [1, 0, 0, 114],\n [1, 0, 1, 174],\n [0, 1, 0, 114],\n [0, 1, 1, 185],\n [1, 1, 0, 114],\n [1, 1, 1, 0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0] - 1, map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] - 1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0] - 1] / cbins[cbins.shape[0] - 1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6, 1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size + 1, 1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s, 0:3] * np.tile(1 - disp, (1, 3)) + map[s + 1, 0:3] * np.tile(disp, (1, 3))\n\n return disp", "def test_change_min_max(self):\n\n datarange = self.colormap.range\n\n # Perform a dummy mapping.\n a = ArrayDataSource(array([0.0, 0.5, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n\n # Update the min_value.\n datarange.low = -1.0\n\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, 0.0, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n # Update the max_value.\n datarange.high = 0.0\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, -0.5, 0.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n\n return", "def project_ranges(cb, msg, attributes):\n if skip(cb, msg, attributes):\n return msg\n\n plot = get_cb_plot(cb)\n x0, x1 = msg.get('x_range', (0, 1000))\n y0, y1 = msg.get('y_range', (0, 1000))\n extents = x0, y0, x1, y1\n x0, y0, x1, y1 = project_extents(extents, plot.projection,\n plot.current_frame.crs)\n coords = {'x_range': (x0, x1), 'y_range': (y0, y1)}\n return {k: v for k, v in coords.items() if k in attributes}", "def __init__(self, colmaps, min_value, max_value):\n \n self.colmaps = colmaps\n self.anz_seg = len(self.colmaps)\n \n self.xmin = []\n self.xmax = []\n self.colmap = []\n \n # min_value being smaller than the smallest min value\n # of a segment is not allowed (same for max_value)\n if min_value < self.colmaps[0][0]:\n min_value = self.colmaps[0][0]\n \n if max_value > self.colmaps[self.anz_seg-1][1]:\n max_value = self.colmaps[self.anz_seg-1][1]\n \n # scale segment borders to interval [0,1]\n for i in xrange(self.anz_seg):\n x = colmaps[i][0]\n self.xmin.append((x-min_value)/(max_value-min_value))\n \n x = colmaps[i][1]\n self.xmax.append((x-min_value)/(max_value-min_value))\n \n self.colmap.append(colmaps[i][2])\n \n print self.xmin, self.xmax", "def draw_composite_map(date_obj, t850, u200, v200, u500, v500, mslp, gh500, u850, v850, pwat):\n \n #Get lat and lon arrays for this dataset:\n lat = t850.lat.values\n lon = t850.lon.values\n\n #========================================================================================================\n # Create a Basemap plotting figure and add geography\n #========================================================================================================\n\n #Create a Plate Carree projection object\n proj_ccrs = ccrs.Miller(central_longitude=0.0)\n\n #Create figure and axes for main plot and colorbars\n fig = plt.figure(figsize=(18,12),dpi=125)\n gs = gridspec.GridSpec(12, 36, figure=fig) #[ytop:ybot, xleft:xright]\n ax = plt.subplot(gs[:, :-1],projection=proj_ccrs) #main plot\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax2 = plt.subplot(gs[:4, -1]) #top plot\n ax2.set_xticklabels([])\n ax2.set_yticklabels([])\n ax3 = plt.subplot(gs[4:8, -1]) #bottom plot\n ax3.set_xticklabels([])\n ax3.set_yticklabels([])\n ax4 = plt.subplot(gs[8:, -1]) #bottom plot\n ax4.set_xticklabels([])\n ax4.set_yticklabels([])\n\n #Add political boundaries and coastlines\n ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidths=1.2)\n ax.add_feature(cfeature.BORDERS.with_scale('50m'), linewidths=1.2)\n ax.add_feature(cfeature.STATES.with_scale('50m'), linewidths=0.5)\n\n #Add land/lake/ocean masking\n land_mask = cfeature.NaturalEarthFeature('physical', 'land', '50m',\n edgecolor='face', facecolor='#e6e6e6')\n sea_mask = cfeature.NaturalEarthFeature('physical', 'ocean', '50m',\n edgecolor='face', facecolor='#ffffff')\n lake_mask = cfeature.NaturalEarthFeature('physical', 'lakes', '50m',\n edgecolor='face', facecolor='#ffffff')\n ax.add_feature(sea_mask,zorder=0)\n ax.add_feature(land_mask,zorder=0)\n ax.add_feature(lake_mask,zorder=0)\n\n #========================================================================================================\n # Fill contours\n #========================================================================================================\n\n #--------------------------------------------------------------------------------------------------------\n # 850-hPa temperature\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(-40,40,1)\n cmap = plt.get_cmap('jet')\n extend = \"both\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,t850,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.1)\n\n #--------------------------------------------------------------------------------------------------------\n # PWAT\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(20,71,0.5)\n\n #Define a color gradient for PWAT\n pwat_colors = gradient([[(255,255,255),0.0],[(255,255,255),20.0]],\n [[(205,255,205),20.0],[(0,255,0),34.0]],\n [[(0,255,0),34.0],[(0,115,0),67.0]])\n cmap = pwat_colors.get_cmap(clevs)\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,pwat,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.9)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax2,shrink=0.75,pad=0.01,ticks=[20,30,40,50,60,70])\n\n #--------------------------------------------------------------------------------------------------------\n # 250-hPa wind\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n wind = calc.wind_speed(u200, v200)\n\n #Specify contour settings\n clevs = [40,50,60,70,80,90,100,110]\n cmap = col.ListedColormap(['#99E3FB','#47B6FB','#0F77F7','#AC97F5','#A267F4','#9126F5','#E118F3','#E118F3'])\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,wind,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax3,shrink=0.75,pad=0.01,ticks=clevs)\n\n #--------------------------------------------------------------------------------------------------------\n # 500-hPa smoothed vorticity\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n dx,dy = calc.lat_lon_grid_deltas(lon,lat)\n vort = calc.vorticity(u500, v500, dx=dx, dy=dy)\n smooth_vort = smooth(vort, 5.0) * 10**5\n\n #Specify contour settings\n clevs = np.arange(2,20,1)\n cmap = plt.get_cmap('autumn_r')\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,smooth_vort,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.3)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax4,shrink=0.75,pad=0.01,ticks=clevs[::2])\n \n #========================================================================================================\n # Contours\n #========================================================================================================\n\n #--------------------------------------------------------------------------------------------------------\n # MSLP\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(960,1040+4,4)\n style = 'solid' #Plot solid lines\n color = 'red' #Plot lines as gray\n width = 0.8 #Width of contours 0.25\n\n #Contour this variable\n cs = ax.contour(lon,lat,mslp,clevs,colors=color,linewidths=width,linestyles=style,transform=proj_ccrs,alpha=0.9)\n\n #Include value labels\n ax.clabel(cs, inline=1, fontsize=9, fmt='%d')\n\n #--------------------------------------------------------------------------------------------------------\n # Geopotential heights\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n gh500 = gh500 / 10.0\n\n #Specify contour settings\n clevs = np.arange(480,612,4)\n style = 'solid' #Plot solid lines\n color = 'black' #Plot lines as gray\n width = 2.0 #Width of contours\n\n #Contour this variable\n cs = ax.contour(lon,lat,gh500,clevs,colors=color,linewidths=width,linestyles=style,transform=proj_ccrs)\n\n #Include value labels\n ax.clabel(cs, inline=1, fontsize=12, fmt='%d')\n\n #--------------------------------------------------------------------------------------------------------\n # Surface barbs\n #--------------------------------------------------------------------------------------------------------\n\n #Plot wind barbs\n _ = ax.quiver(lon, lat, u850.values, v850.values, transform=proj_ccrs, regrid_shape=(38,30), scale=820, alpha=0.5)\n\n #--------------------------------------------------------------------------------------------------------\n # Label highs & lows\n #--------------------------------------------------------------------------------------------------------\n\n #Label highs and lows\n add_mslp_label(ax, proj_ccrs, mslp, lat, lon)\n\n #========================================================================================================\n # Step 6. Add map boundary, legend, plot title, then save image and close\n #========================================================================================================\n\n #Add china province boundary\n add_china_map_2cartopy(ax, name='province')\n\n #Add custom legend\n from matplotlib.lines import Line2D\n custom_lines = [Line2D([0], [0], color='#00A123', lw=5),\n Line2D([0], [0], color='#0F77F7', lw=5),\n Line2D([0], [0], color='#FFC000', lw=5),\n Line2D([0], [0], color='k', lw=2),\n Line2D([0], [0], color='k', lw=0.1, marker=r'$\\rightarrow$', ms=20),\n Line2D([0], [0], color='r', lw=0.8),]\n\n ax.legend(custom_lines, ['PWAT (mm)', '200-hPa Wind (m/s)', '500-hPa Vorticity', '500-hPa Height (dam)', '850-hPa Wind (m/s)', 'MSLP (hPa)'], loc=2, prop={'size':12})\n\n #Format plot title\n title = \"Synoptic Composite \\nValid: \" + dt.datetime.strftime(date_obj,'%Y-%m-%d %H%M UTC')\n st = plt.suptitle(title,fontweight='bold',fontsize=16)\n st.set_y(0.92)\n\n #Return figuration\n return(fig)", "def _plot_one_value(\n data_matrix, grid_metadata_dict, colour_map_object, min_colour_value,\n max_colour_value, plot_cbar_min_arrow, plot_cbar_max_arrow,\n log_scale=False):\n\n figure_object, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres = (\n _get_basemap(grid_metadata_dict)\n )\n\n num_grid_rows = data_matrix.shape[0]\n num_grid_columns = data_matrix.shape[1]\n x_spacing_metres = (\n (basemap_x_matrix_metres[0, -1] - basemap_x_matrix_metres[0, 0]) /\n (num_grid_columns - 1)\n )\n y_spacing_metres = (\n (basemap_y_matrix_metres[-1, 0] - basemap_y_matrix_metres[0, 0]) /\n (num_grid_rows - 1)\n )\n\n data_matrix_at_edges, edge_x_coords_metres, edge_y_coords_metres = (\n grids.xy_field_grid_points_to_edges(\n field_matrix=data_matrix,\n x_min_metres=basemap_x_matrix_metres[0, 0],\n y_min_metres=basemap_y_matrix_metres[0, 0],\n x_spacing_metres=x_spacing_metres,\n y_spacing_metres=y_spacing_metres)\n )\n\n data_matrix_at_edges = numpy.ma.masked_where(\n numpy.isnan(data_matrix_at_edges), data_matrix_at_edges\n )\n\n # data_matrix_at_edges[numpy.isnan(data_matrix_at_edges)] = -1\n\n plotting_utils.plot_coastlines(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_countries(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_states_and_provinces(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_parallels(\n basemap_object=basemap_object, axes_object=axes_object,\n num_parallels=NUM_PARALLELS)\n\n plotting_utils.plot_meridians(\n basemap_object=basemap_object, axes_object=axes_object,\n num_meridians=NUM_MERIDIANS)\n\n basemap_object.pcolormesh(\n edge_x_coords_metres, edge_y_coords_metres,\n data_matrix_at_edges, cmap=colour_map_object,\n vmin=min_colour_value, vmax=max_colour_value, shading='flat',\n edgecolors='None', axes=axes_object, zorder=-1e12)\n\n colour_bar_object = plotting_utils.plot_linear_colour_bar(\n axes_object_or_matrix=axes_object, data_matrix=data_matrix,\n colour_map_object=colour_map_object, min_value=min_colour_value,\n max_value=max_colour_value, orientation_string='horizontal',\n extend_min=plot_cbar_min_arrow, extend_max=plot_cbar_max_arrow,\n padding=0.05)\n\n tick_values = colour_bar_object.get_ticks()\n\n if log_scale:\n tick_strings = [\n '{0:d}'.format(int(numpy.round(10 ** v))) for v in tick_values\n ]\n elif numpy.nanmax(data_matrix) >= 6:\n tick_strings = [\n '{0:d}'.format(int(numpy.round(v))) for v in tick_values\n ]\n else:\n tick_strings = ['{0:.2f}'.format(v) for v in tick_values]\n\n colour_bar_object.set_ticks(tick_values)\n colour_bar_object.set_ticklabels(tick_strings)\n\n return figure_object, axes_object", "def decode(self, heatmaps, offsets):\n posemap = self._offset_to_pose(offsets)\n inst_indexes, inst_scores = self._get_maximum_from_heatmap(heatmaps[:, :1])\n poses = posemap.view(posemap.size(1), -1)[..., inst_indexes]\n poses = poses.view(self.num_joints, 2, -1).permute(2, 0, 1).contiguous()\n inst_scores = inst_scores.unsqueeze(1).unsqueeze(2).expand(poses.size())\n poses = torch.cat((poses, inst_scores), dim=2)\n return poses.clone()", "def updateMap(self,map):\n if not self.opened:\n col = int( self.world_rect.left / map.header_data['tilewidth'])\n row = int( self.world_rect.top / map.header_data['tileheight'])\n layerIndex = len(map.layer_data)-1\n while(layerIndex > 0):\n layer = map.layer_data[layerIndex]\n if(layer[row][col] > 1):\n layer[row][col] = 0\n break\n layerIndex -= 1\n for g in self.groups():\n g.remove(self)", "def _splineloc(self, coa_map, win=5, upscale=10):\n\n # Get shape of 3-D coalescence map\n nx, ny, nz = coa_map.shape\n n = np.array([nx, ny, nz])\n\n # Find maximum coalescence location in grid\n mx, my, mz = np.unravel_index(np.nanargmax(coa_map), coa_map.shape)\n i = np.array([mx, my, mz])\n\n # Determining window about maximum value and trimming coa grid\n w2 = (win - 1)//2\n x1, y1, z1 = np.clip(i - w2, 0 * n, n)\n x2, y2, z2 = np.clip(i + w2 + 1, 0 * n, n)\n\n # If subgrid is not close to the edge\n if (x2 - x1) == (y2 - y1) == (z2 - z1):\n coa_map_trim = coa_map[x1:x2, y1:y2, z1:z2]\n\n # Defining the original interpolation function\n xo = np.linspace(0, coa_map_trim.shape[0] - 1,\n coa_map_trim.shape[0])\n yo = np.linspace(0, coa_map_trim.shape[1] - 1,\n coa_map_trim.shape[1])\n zo = np.linspace(0, coa_map_trim.shape[2] - 1,\n coa_map_trim.shape[2])\n xog, yog, zog = np.meshgrid(xo, yo, zo)\n interpgrid = Rbf(xog.flatten(), yog.flatten(), zog.flatten(),\n coa_map_trim.flatten(),\n function=\"cubic\")\n\n # Creating the new interpolated grid\n xx = np.linspace(0, coa_map_trim.shape[0] - 1,\n (coa_map_trim.shape[0] - 1) * upscale + 1)\n yy = np.linspace(0, coa_map_trim.shape[1] - 1,\n (coa_map_trim.shape[1] - 1) * upscale + 1)\n zz = np.linspace(0, coa_map_trim.shape[2] - 1,\n (coa_map_trim.shape[2] - 1) * upscale + 1)\n xxg, yyg, zzg = np.meshgrid(xx, yy, zz)\n\n # Interpolate spline function on new grid\n coa_map_int = interpgrid(xxg.flatten(), yyg.flatten(),\n zzg.flatten()).reshape(xxg.shape)\n\n # Calculate max coalescence location on interpolated grid\n mxi, myi, mzi = np.unravel_index(np.nanargmax(coa_map_int),\n coa_map_int.shape)\n mxi = mxi/upscale + x1\n myi = myi/upscale + y1\n mzi = mzi/upscale + z1\n self.output.log(\"\\t\\tGridded loc: {} {} {}\".format(mx, my, mz), self.log)\n self.output.log(\"\\t\\tSpline loc: {} {} {}\".format(mxi, myi, mzi), self.log)\n\n # Run check that spline location is within grid-cell\n if (abs(mx - mxi) > 1) or (abs(my - myi) > 1) or \\\n (abs(mz - mzi) > 1):\n msg = \"\\tSpline warning: spline location outside grid cell\"\n msg += \"with maximum coalescence value\"\n self.output.log(msg, self.log)\n\n xyz = self.lut.xyz2loc(np.array([[mxi, myi, mzi]]), inverse=True)\n loc_spline = self.lut.xyz2coord(xyz)[0]\n\n # Run check that spline location is within window\n if (abs(mx - mxi) > w2) or (abs(my - myi) > w2) or \\\n (abs(mz - mzi) > w2):\n msg = \"\\t !!!! Spline error: location outside interpolation \"\n msg += \"window !!!!\\n\\t\\t\\tGridded Location returned\"\n self.output.log(msg, self.log)\n\n xyz = self.lut.xyz2loc(np.array([[mx, my, mz]]), inverse=True)\n loc_spline = self.lut.xyz2coord(xyz)[0]\n\n else:\n msg = \"\\t !!!! Spline error: interpolation window crosses edge of \"\n msg += \"grid !!!!\\n\\t\\t\\tGridded Location returned\"\n self.output.log(msg, self.log)\n\n xyz = self.lut.xyz2loc(np.array([[mx, my, mz]]), inverse=True)\n loc_spline = self.lut.xyz2coord(xyz)[0]\n\n return loc_spline", "def uk_map(fig1, indata, clevs, datlons, datlats, mtitle, munits, maskswitch):\n\t\n\tfrom mpl_toolkits import basemap as bm\n\timport matplotlib.cm as cm\n\tfrom mpl_toolkits.basemap import shiftgrid \n\tfrom netCDF4 import Dataset\n\tfrom matplotlib.colors import LightSource\n\timport matplotlib.pyplot as plt\n\timport numpy as np\n\timport hillshade\n\timport set_shade\n\timport colour_map\n\t\n\tif maskswitch==1:\n\t\t# import missing data map for masking out of oceans \n\t\tmissdata = Dataset('/exports/work/geos_cxc/users/ahardin4/output/amibatch/afixa/miss.nc', 'r', format='NETCDF3_CLASSIC')\n\t\t\n\t# create the figure and axes instances.\n\tax = fig1.add_axes([0.1,0.1,0.8,0.8])\n\tm = bm.Basemap(llcrnrlon=-9.5,llcrnrlat=49.5,urcrnrlon=2.5,urcrnrlat=59,rsphere=(6378137.00,6356752.3142),\\\n \tresolution='f',area_thresh=1000.,projection='laea', lat_0=54.5,lon_0=-2.75,ax=ax)\n\tm.drawcoastlines()\n\t\n\t# read in etopo5 topography/bathymetry.\n\turl = 'http://ferret.pmel.noaa.gov/thredds/dodsC/data/PMEL/etopo5.nc'\n\tetopodata = Dataset(url)\n\ttopoin = etopodata.variables['ROSE'][:]\n\tlons = etopodata.variables['ETOPO05_X'][:]\n\tlats = etopodata.variables['ETOPO05_Y'][:]\n\t\n\t# shift data so lons go from -180 to 180 instead of 00 to 360.\n\ttopoin,lons = shiftgrid(180.,topoin,lons,start=False)\n\n\t# transform coordinates\n\tx,y=m(datlons[:,:],datlats[:,:])\n\t# transform to nx x ny regularly spaced 5km native projection grid\n\tnx = int((m.xmax-m.xmin)/5000.)+1; ny = int((m.ymax-m.ymin)/5000.)+1\n\ttopodat = m.transform_scalar(topoin,lons,lats,nx,ny)\n\t\n\t# create light source object for topography\n\tls = LightSource(azdeg = 0, altdeg = 2)\n\t# use set_shade function (also available)\n\trgb = set_shade(topodat)\n\n\t# plot image over map with imshow.\n\tim = m.imshow(rgb)\n\t\n\t# apply function to colormap pointers, can be any function at all, as long as\n\t# 0 remains 0, 1 remains 1, and values increase from one to the other.\n\t\n\t# x^4 is good for pseudo-log plots of rainfall:\n\t#log_jet=cmap_xmap(lambda x: (x*x*x*x), cm.hsv)\n\t\n\t#set to lambda x: x for no change:\n\tlog_jet=cmap_xmap(lambda x: (x), cm.jet)\n\t\n\t#apply function to colormap if desired to make whole scale 'hotter' or 'colder'\n\t#example makes colourmap significantly hotter by confining values to upper quarter:\t\n\t#log_jet=cmap_map(lambda x: x/4+0.75, cm.gist_rainbow)\n\t\n\t# mask out oceans, but not lakes. Useful when plotting or comparing against observed\n\tif maskswitch==1:\n\t\tmissmap=missdata.variables['land_map']\n\t\tmissmap2=missdata.variables['land_map']\n\t\t# cut from big mask to small mask if necessary\n\t\t#smallmap=missmap[0,6:46,0:34]\n\t\tsmallmap=missmap[0,:,:]\n\t\tsmallmap2=missmap2[0,:,:]\n\t\t# expand out by one to take into account interpolation\n\t\t\n\t\tfor i in range(1,39):\n\t\t\tfor j in range(1,33):\n\t\t\t\tif smallmap[i,j] == 0.0:\n\t\t\t\t\tsmallmap2[i-1,j]=0.0 \n\t\t\t\t\tsmallmap2[i,j-1]=0.0\n\t\t\t\t\tsmallmap2[i+1,j]=0.0 \n\t\t\t\t\tsmallmap2[i,j+1]=0.0\n\t\t\n\t\t# perform masking\n\t\tindata=np.ma.masked_array(indata,mask=(smallmap2<-0.5))\n\t\tprint smallmap2[0,0], smallmap2[36,0], smallmap2[20,20]\n\t\t#indata[indata<=0.1]=np.nan\n\t# produce semi-transparent contour map\n\tcontourmap=m.contourf(x,y,indata,clevs,cmap=cm.get_cmap(log_jet,len(clevs)-1),extend='both',\n\t\talpha=0.5,origin='lower',rasterized=True)\n\t\t\n\t# produce simple block plot\n\t#contourmap=m.pcolor(x,y,indata,shading='interp',cmap=cm.get_cmap(log_jet,len(clevs)-1),\n\t#\talpha=0.5)\n\t\t\n\t# place colour bar on right\n\tcb = m.colorbar(contourmap,\"right\", size=\"5%\", pad='3%')\n\t# configure colour bar labeling\n\tcl = plt.getp(cb.ax, 'ymajorticklabels')\n\tcontourmap=plt.setp(cl, fontsize=14)\n\n\t# draw parallels and meridians so as not to clash with colour bar placement\n\t# labels = [left,right,top,bottom]\n\tm.drawparallels(np.arange(-70.,80,1.), labels=[1,0,0,1], fontsize=13)\n\tm.drawmeridians(np.arange(351.,362.,2.),labels=[1,0,0,1], fontsize=13)\n\t\n\t# configure title and units\n\tcb.ax.set_xlabel(munits, fontsize=12)\n\tcontourmap=plt.title(mtitle, fontsize=14)", "def calculate_min_max_tiles(self):", "def is_map_obstacle_in_screen_range(self):\n raise NotImplementedError", "def split_simcc_xy(self, heatmap: Union[np.ndarray, torch.Tensor]):\n size = heatmap.size()\n k = size[0] if size[0] <= 20 else 20\n maps = []\n for _ in range(k):\n xy_dict = {}\n single_heatmap = heatmap[_]\n xy_dict['x'], xy_dict['y'] = self.merge_maps(single_heatmap)\n maps.append(xy_dict)\n return maps, k", "def update_histo_frame():\n min_histo.text = str(MIN_RANGE_F) # Display the legend\n max_histo.text = str(MAX_RANGE_F)\n\n histogram = np.zeros(GRID_AXIS) # Clear histogram accumulation array\n # Collect camera data and calculate the histogram\n for _row in range(0, GRID_AXIS):\n for _col in range(0, GRID_AXIS):\n histo_index = int(map_range(GRID_DATA[_col, _row], 0, 1, 0, GRID_AXIS - 1))\n histogram[histo_index] = histogram[histo_index] + 1\n\n histo_scale = np.max(histogram) / (GRID_AXIS - 1)\n if histo_scale <= 0:\n histo_scale = 1\n\n # Display the histogram\n for _col in range(0, GRID_AXIS):\n for _row in range(0, GRID_AXIS):\n if histogram[_col] / histo_scale > GRID_AXIS - 1 - _row:\n image_group[((_row * GRID_AXIS) + _col)].fill = index_to_rgb(\n round((_col / GRID_AXIS), 3)\n )\n else:\n image_group[((_row * GRID_AXIS) + _col)].fill = BLACK", "def CC_2Dfilter(\n h5path_labels,\n map_propnames,\n criteria,\n h5path_int='',\n slicedim=0,\n usempi=False,\n outputfile='',\n protective=False,\n ):\n\n (min_area,\n max_area,\n max_intensity_mb,\n max_eccentricity,\n min_solidity,\n min_euler_number,\n min_extent) = criteria\n\n # prepare mpi\n mpi_info = utils.get_mpi_info(usempi)\n\n # TODO: check output path\n\n # open data for reading\n h5file_mm, ds_mm, _, _ = utils.h5_load(h5path_labels, comm=mpi_info['comm'])\n if h5path_int:\n h5file_mb, ds_mb, _, _ = utils.h5_load(h5path_int, comm=mpi_info['comm'])\n else:\n ds_mb = None\n # mask used as intensity image in mean_intensity criterium\n\n # get the maximum labelvalue in the input\n root = h5path_labels.split('.h5')[0]\n maxlabel = get_maxlabel(root, ds_mm)\n\n # prepare mpi\n n_slices = ds_mm.shape[slicedim]\n series = np.array(range(0, n_slices), dtype=int)\n if mpi_info['enabled']:\n series = utils.scatter_series(mpi_info, series)[0]\n if mpi_info['rank'] == 0:\n fws_reduced = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n else:\n fws_reduced = None\n\n fws = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n\n mapall = criteria.count(None) == len(criteria)\n\n # pick labels observing the constraints\n go2D = ((max_eccentricity is not None) or\n (min_solidity is not None) or\n (min_euler_number is not None) or\n mapall)\n if go2D:\n\n for i in series:\n slcMM = utils.get_slice(ds_mm, i, slicedim)\n if h5path_int:\n slcMB = utils.get_slice(ds_mb, i, slicedim) # , 'bool'\n else:\n slcMB = None\n fws = check_constraints(slcMM, fws, map_propnames,\n criteria, slcMB, mapall)\n if mpi_info['enabled']:\n mpi_info['comm'].Reduce(fws, fws_reduced, op=MPI.MAX, root=0)\n else:\n fws_reduced = fws\n\n else:\n\n if mpi_info['rank'] == 0:\n fws = check_constraints(ds_mm, fws, map_propnames,\n criteria, ds_mb, mapall)\n fws_reduced = fws\n\n # write the forward maps to a numpy vector\n if mpi_info['rank'] == 0:\n slc = int(n_slices/2)\n slcMM = ds_mm[slc, :, :]\n slcMB = ds_mb[slc, :, :] if h5path_int else None\n datatypes = get_prop_datatypes(slcMM, map_propnames, slcMB)\n for i, propname in enumerate(map_propnames):\n root = outputfile.split('.h5')[0]\n nppath = '{}_{}.npy'.format(root, propname)\n outarray = np.array(fws_reduced[:, i], dtype=datatypes[i])\n np.save(nppath, outarray)\n\n # close and return\n h5file_mm.close()\n if h5path_int:\n h5file_mb.close()\n\n if mpi_info['rank'] == 0:\n return outarray", "def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return", "def mollview(map=None,fig=None,plot=False,filenme=None,\n\t\t\t rot=None,coord=None,unit='',\n\t\t\t xsize=800,title='Mollweide view',nest=False,\n\t\t\t min=None,max=None,flip='astro',\n\t\t\t remove_dip=False,remove_mono=False,\n\t\t\t gal_cut=0,\n\t\t\t format='%g',format2='%g',\n\t\t\t cbar=True,cmap=None, notext=False,\n\t\t\t norm=None,hold=False,margins=None,sub=None,\n\t\t\t return_projected_map=False):\n\ttry:\n\t\tfrom healpy import pixelfunc, projaxes as PA\n\texcept ImportError:\n\t\twarnings.warn(\n\t\t\t\"Could not load healpy package. If you want to use this feature, \"\n\t\t\t\"plaese install the healpy package from here: http://healpy.readthedocs.io/en/latest/\"\n\t\t\t\"or via pip or conda.\", RuntimeWarning)\n\t\treturn\n\n\t# Create the figure\n\n\tif not (hold or sub):\n\t\tif fig == None:\n\t\t\tf=plt.figure(figsize=(8.5,5.4))\n\t\t\textent = (0.02,0.05,0.96,0.9)\n\t\telse:\n\t\t\tf=fig\n\t\t\textent = (0.02,0.05,0.96,0.9)\n\telif hold:\n\t\tf=plt.gcf()\n\t\tleft,bottom,right,top = np.array(f.gca().get_position()).ravel()\n\t\textent = (left,bottom,right-left,top-bottom)\n\t\tf.delaxes(f.gca())\n\telse: # using subplot syntax\n\t\tf=plt.gcf()\n\t\tif hasattr(sub,'__len__'):\n\t\t\tnrows, ncols, idx = sub\n\t\telse:\n\t\t\tnrows, ncols, idx = sub//100, (sub%100)//10, (sub%10)\n\t\tif idx < 1 or idx > ncols*nrows:\n\t\t\traise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ncols,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t idx))\n\t\tc,r = (idx-1)%ncols,(idx-1)//ncols\n\t\tif not margins:\n\t\t\tmargins = (0.01,0.0,0.0,0.02)\n\t\textent = (c*1./ncols+margins[0],\n\t\t\t 1.-(r+1)*1./nrows+margins[1],\n\t\t\t 1./ncols-margins[2]-margins[0],\n\t\t\t 1./nrows-margins[3]-margins[1])\n\t\textent = (extent[0]+margins[0],\n\t\t\t extent[1]+margins[1],\n\t\t\t extent[2]-margins[2]-margins[0],\n\t\t\t extent[3]-margins[3]-margins[1])\n\n\t# Starting to draw : turn interactive off\n\twasinteractive = plt.isinteractive()\n\tplt.ioff()\n\ttry:\n\t\tif map is None:\n\t\t\tmap = np.zeros(12)+np.inf\n\t\t\tcbar=False\n\t\tmap = pixelfunc.ma_to_array(map)\n\t\tax=PA.HpxMollweideAxes(f,extent,coord=coord,rot=rot,\n\t\t\t\t\t\t format=format2,flipconv=flip)\n\t\tf.add_axes(ax)\n\t\tif remove_dip:\n\t\t\tmap=pixelfunc.remove_dipole(map,gal_cut=gal_cut,\n\t\t\t\t\t\t\t\t\tnest=nest,copy=True,\n\t\t\t\t\t\t\t\t\tverbose=True)\n\t\telif remove_mono:\n\t\t\tmap=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,\n\t\t\t\t\t\t\t\t\t copy=True,verbose=True)\n\t\timg = ax.projmap(map,nest=nest,xsize=xsize,coord=coord,vmin=min,vmax=max,\n\t\t\t cmap=cmap,norm=norm)\n\t\tif cbar:\n\t\t\tim = ax.get_images()[0]\n\t\t\tb = im.norm.inverse(np.linspace(0,1,im.cmap.N+1))\n\t\t\tv = np.linspace(im.norm.vmin,im.norm.vmax,im.cmap.N)\n\t\t\tif matplotlib.__version__ >= '0.91.0':\n\t\t\t\tcb=f.colorbar(im,ax=ax,\n\t\t\t\t\t\t orientation='horizontal',\n\t\t\t\t\t\t shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),\n\t\t\t\t\t\t pad=0.05,fraction=0.1,boundaries=b,values=v,\n\t\t\t\t\t\t format=format)\n\t\t\telse:\n\t\t\t\t# for older matplotlib versions, no ax kwarg\n\t\t\t\tcb=f.colorbar(im,orientation='horizontal',\n\t\t\t\t\t\t shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),\n\t\t\t\t\t\t pad=0.05,fraction=0.1,boundaries=b,values=v,\n\t\t\t\t\t\t format=format)\n\t\t\tcb.solids.set_rasterized(True)\n\t\tax.set_title(title)\n\t\tif not notext:\n\t\t\tax.text(0.86,0.05,ax.proj.coordsysstr,fontsize=14,\n\t\t\t\tfontweight='bold',transform=ax.transAxes)\n\t\tif cbar:\n\t\t\tcb.ax.text(0.5,-1.0,unit,fontsize=14,\n\t\t\t\t transform=cb.ax.transAxes,ha='center',va='center')\n\t\tf.sca(ax)\n\tfinally:\n\t\tif plot:\n\t\t\tplt.draw()\n\t\tif wasinteractive:\n\t\t\tplt.ion()\n\t\t\t#plt.show()\n\tif return_projected_map:\n\t\treturn img", "def drawMap(self):\n world_map = folium.Map(location=[25, 10], zoom_start=3)\n totals_column = 'total_' + self.map_type.lower()\n top10 = self.covid_df.sort_values(totals_column, axis=0, ascending=False)['location'][:10]\n scale, units = self.unitsDetector(self.covid_df[totals_column].max())\n \n color_scheme = {'Cases': 'YlOrRd', 'Deaths': 'PuRd'}[self.map_type]\n bins = list(np.linspace(0, np.ceil(self.covid_df[totals_column].max() / scale) * scale, 6))\n legend_name = 'Total Number of COVID-19 ' + self.map_type\n map_file_name = self.generateFileName()\n \n folium.Choropleth(geo_data=self.geo_data,\n data=self.covid_df,\n columns=['location', totals_column],\n key_on='feature.properties.ADMIN',\n fill_color=color_scheme,\n bins=bins,\n legend_name=legend_name,\n highlight=True\n ).add_to(world_map)\n \n for i in range(10):\n country = top10.iloc[i]\n cases = self.covid_df[self.covid_df['location'] == country][totals_column] / scale\n \n # Centroid coordinates for each country labelled by its ISO-2 code\n lat = self.countries_centroids.loc[self.name_iso2_mapping[country]]['latitude']\n long = self.countries_centroids.loc[self.name_iso2_mapping[country]]['longitude']\n popup = f\"{country}: {cases.values[0]:.2f}{units} total {self.map_type.lower()}\"\n \n folium.Marker(location=[lat, long],\n popup=folium.Popup(popup, \n max_width=1000)\n ).add_to(world_map)\n \n world_map.save(map_file_name)", "def undistort_rectify_map(self):\n return cv.initUndistortRectifyMap(self._k, self._dist, np.eye(3), self._k, self.frame_size[::-1], cv.CV_16SC2)", "def compute_ranges(self, obj, key, ranges):\n all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element]))\n if obj is None or not self.normalize or all_table:\n return OrderedDict()\n # Get inherited ranges\n ranges = self.ranges if ranges is None else dict(ranges)\n\n # Get element identifiers from current object and resolve\n # with selected normalization options\n norm_opts = self._get_norm_opts(obj)\n\n # Traverse displayed object if normalization applies\n # at this level, and ranges for the group have not\n # been supplied from a composite plot\n return_fn = lambda x: x if isinstance(x, Element) else None\n for group, (axiswise, framewise) in norm_opts.items():\n elements = []\n # Skip if ranges are cached or already computed by a\n # higher-level container object.\n framewise = framewise or self.dynamic\n if group in ranges and (not framewise or ranges is not self.ranges):\n continue\n elif not framewise: # Traverse to get all elements\n elements = obj.traverse(return_fn, [group])\n elif key is not None: # Traverse to get elements for each frame\n frame = self._get_frame(key)\n elements = [] if frame is None else frame.traverse(return_fn, [group])\n if not axiswise or ((not framewise or len(elements) == 1)\n and isinstance(obj, HoloMap)): # Compute new ranges\n self._compute_group_range(group, elements, ranges)\n self.ranges.update(ranges)\n return ranges", "def make_pm_maps(input_file, input_pm_file, output_file, num_cones, num_bins=80, titles=None, mincount=0, maxcount=40, cut=None):\n # get titles for each subplot and dwarf proper motions\n titles, dwarf_pmra, dwarf_pmdec, = load_dwarf_info(input_file, titles)\n\n # load stellar pm values\n ra, dec, pmra, pmdec, parallax, parallax_error = load_gaia_search_info(input_pm_file)\n\n # from table 2 in\n # if titles is not None:\n # titles = fix_names(titles)\n # for i, title, dpmra, dpmdec in enumerate(zip(titles, dwarf_pmra, dwarf_pmdec)):\n # dwarf_pmra[i], dwarf_pmdec[i] = fix_pms(title, dpmra, dpmdec)\n # # dwarf_pmra[5] = 1.81\n # # dwarf_pmra[8] = -1.21\n # # dwarf_pmra[11] = 0.22\n # # dwarf_pmdec[5] = 0.14\n # # dwarf_pmdec[8] = -0.92\n # # dwarf_pmdec[11] = -1.41\n\n # set fig size and shape\n d = len(titles)\n rows = 3\n cols = int(np.ceil(d/rows))\n fig, axs = plot_setup(rows, cols, d)\n max_count = [0, 0]\n\n # plot each dwarf in separate subplots\n for ax, title, dwarfpmra, dwarfpmdec, *data in zip(axs, titles, dwarf_pmra, dwarf_pmdec, ra, dec, pmra, pmdec, parallax, parallax_error):\n counts, xedges, yedges, im = pm_histogram(fig, ax, data, title, dwarf_pmra=dwarfpmra, dwarf_pmdec=dwarfpmdec, cut=cut)\n\n # make labels across all subplots\n universal_plot_labels(fig, r\"Proper motion, right ascension [mas/yr]\", r\"Proper motion, declination [mas/yr]\")\n\n # add a universal colorbar, change cmap in hist2d above\n # fig.colorbar(im, ax=axs.ravel().tolist())\n\n fig.savefig(output_file, bbox_inches='tight')", "def adjust_map(map_, n_codes):\n assert np.size(map_, 1) <= n_codes, \"Map does not fit in number of codes\"\n margin = n_codes - np.size(map_, 1)\n left = margin // 2\n right = margin - left\n return np.concatenate((map_[:, 0:1, :],)*left + (map_,) + (map_[:,-1:, :],)*right, axis=1)", "def example_SegmentedColorMapping(min_value, max_value):\n \n colmap1 = ColorMapper(\"red2\")\n colmap1.exponent = 0.7\n \n colmap2 = ColorMapper(\"green\")\n \n colmap3 = ColorMapper(\"green\")\n colmap3.invert = True\n \n colmap4 = ColorMapper(\"blue2\")\n colmap4.invert = True\n colmap4.exponent = 0.5\n \n colmap = SegmentedColorMapping([ (-4.0, -2.0, colmap1), (-2.0, 0.0, colmap2),\n (0.0, 2.0, colmap3), (2.0, 4.0, colmap4)],\n min_value, max_value)\n \n return colmap", "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()", "def map_area( m ):\n \n \n m.drawcoastlines( linewidth = 1.5, linestyle = 'solid', color = [ 75./255., 75/255., 75/255. ] )\t\n # ------draw parallels----------------\n circles = np.arange( -90., 90. + 30, 30. ) #delat = 30.\n m.drawparallels( circles, labels = [ 1, 0, 0, 0 ] )\n \n # -------draw meridians---------------\n meridians = np.arange( 0., 360, 60. ) #delon = 60.\n m.drawmeridians( meridians, labels = [ 0, 0, 0, 1 ] )", "def _fcn_minmax_roi(self):\n self.roi._update_cbar_minmax()\n self.cbqt.cbobjs._objs['roi']['clim'] = self.roi._clim\n kwargs = self.cbqt.cbobjs._objs['roi'].to_kwargs(True)\n self.roi.update_from_dict(kwargs)\n self.roi._update_cbar()", "def gate_out(self, *dim_ranges):\n relevant_data = self.get_points(*[r.dim for r in dim_ranges])\n mins = np.array([r.min for r in dim_ranges])\n maxes = np.array([r.max for r in dim_ranges])\n test1 = np.any(relevant_data < mins, axis=1)\n test2 = np.any(relevant_data > maxes, axis=1)\n final = np.logical_or(test1, test2) \n return DataTable(self.data[final], self.dims, self.legends, self.tags.copy())", "def make_map_slider(self,root, col,color_range, **kwargs):\n \n #Identify all the temporal options for the specified fill setting\n shpfieldslist = [item[0] for item in self.shps[0].fields]\n self.datefieldlist[col] = [item for item in shpfieldslist if item.startswith(color_range)]\n datestrlist = [item[-6:] for item in self.datefieldlist[col]]\n self.datenumlist[col] = [int(item) for item in datestrlist]\n \n #Set bounds of scale\n datemax = max(self.datenumlist[col])\n datemin = min(self.datenumlist[col])\n self.map_temporalflag[col] = 1\n \n #Generate the scale\n self.mapslider_list[col] = tk.Scale(root, \n from_=datemin, to=datemax, \n orient='horizontal',\n tickinterval = 0,\n length = self.screenwidth*0.25\n )\n \n #If a slideval was provided, set the scale to that value\n if 'slideval' in kwargs:\n slideval = kwargs.pop('slideval')\n else:\n slideval = datemax\n if not slideval:\n slideval = datemax\n self.mapslider_list[col].set(slideval)\n \n \n #Make label\n mapslider_label = tk.Label(root, text=self.translate('Map')+': ',\n bg=self.default_background)\n mapslider_label.grid(column=0, row=2)\n self.mapslider_label_list[col] = (mapslider_label)\n \n #Bind controls to scale and plae on grid\n self.mapslider_list[col].bind(\"<ButtonRelease-1>\", lambda e: self.mapslide(col, factor=self.mapslider_list[col].get()))\n self.mapslider_list[col].grid(column=1, row=2, columnspan=3)\n \n #Store the field setting to be used for generating the map\n dateindex = self.datenumlist[col].index(slideval)\n datefield = self.datefieldlist[col][dateindex]\n self.date_setting_list[col] = datefield\n color_range = self.date_setting_list[col]\n \n return color_range" ]
[ "0.53061557", "0.5123476", "0.50569475", "0.5041872", "0.4931829", "0.49258262", "0.4896154", "0.4868902", "0.4862584", "0.48051956", "0.48001003", "0.47694784", "0.47593305", "0.47555342", "0.4747587", "0.47454724", "0.47438157", "0.47292355", "0.47042343", "0.46995413", "0.46734232", "0.46683073", "0.46559906", "0.4649211", "0.46427724", "0.46302462", "0.46177948", "0.4608795", "0.4604277", "0.46033227" ]
0.77395713
0
This Function is used to control the Movement of the Snake
def Movement(): keys = pygame.key.get_pressed() if keys[pygame.K_LEFT] and not snake.ang==90: snake.x_change = -snake.vel snake.y_change = 0 snake.left = True snake.right = False snake.up = False snake.down = False snake.ang = -90 elif keys[pygame.K_RIGHT] and not snake.ang==-90: snake.x_change = snake.vel snake.y_change = 0 snake.left = False snake.right = True snake.up = False snake.down = False snake.ang = 90 elif keys[pygame.K_UP] and not snake.ang==0: snake.x_change = 0 snake.y_change = -snake.vel snake.left = False snake.right = False snake.up = True snake.down = False snake.ang = 180 elif keys[pygame.K_DOWN] and not snake.ang==180: snake.x_change = 0 snake.y_change = snake.vel snake.left = False snake.right = False snake.up = False snake.down = True snake.ang = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self):\r\n piece = []\r\n if self.direction == \"UP\":\r\n piece = [self.body[0][0], self.body[0][1] - self.POS_CHANGE] # create piece at new coordinates\r\n elif self.direction == \"DOWN\":\r\n piece = [self.body[0][0], self.body[0][1] + self.POS_CHANGE]\r\n elif self.direction == \"LEFT\":\r\n piece = [self.body[0][0] - self.POS_CHANGE, self.body[0][1]]\r\n elif self.direction == \"RIGHT\":\r\n piece = [self.body[0][0] + self.POS_CHANGE, self.body[0][1]]\r\n\r\n if piece:\r\n if piece in self.body: # Lose game if snake touches itself\r\n self.alive = False\r\n else:\r\n self.body.insert(0, piece) # insert new piece at head of snake\r\n if len(self.body) > self.length:\r\n self.body.pop() # delete last piece of snake, if length isnt increased\r\n\r\n self.draw_snake()", "def movement_handler(new_direction, snake):\n #Get the head of the snake\n new_head = snake[0].copy()\n #Update the new head position based on where the snake moved\n if new_direction == INPUT.LEFT:\n new_head[0] -= CELL_SIZE\n elif new_direction == INPUT.UP:\n new_head[1] -= CELL_SIZE\n elif new_direction == INPUT.RIGHT:\n new_head[0] += CELL_SIZE\n else: #new_direction == INPUT.DOWN:\n new_head[1] += CELL_SIZE\n\n #We will update the position of the snake's head\n new_x, new_y = new_head\n if len(snake) >= 2:\n old_x, old_y = snake[1]\n else:\n old_x, old_y = [-1, -1]\n\n #If the player is running into themselves, reverse their INPUTection\n if new_x == old_x and new_y == old_y:\n if new_direction == INPUT.LEFT:\n new_head[0] += CELL_SIZE*2\n elif new_direction == INPUT.UP:\n new_head[1] += CELL_SIZE*2\n elif new_direction == INPUT.RIGHT:\n new_head[0] -= CELL_SIZE*2\n else: #new_direction == INPUT.DOWN:\n new_head[1] -= CELL_SIZE*2\n return new_head", "def event_handler(event):\n if event.type == pygame.QUIT: # click close button at top corner of the screen\n pygame.quit()\n quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP and snake_direction[Y] == 0: # occur when up key is pressed\n snake_direction[X] = 0\n snake_direction[Y] = UP\n elif event.key == pygame.K_DOWN and snake_direction[Y] == 0: \n snake_direction[X] = 0\n snake_direction[Y] = DOWN\n elif event.key == pygame.K_LEFT and snake_direction[X] == 0: \n snake_direction[X] = LEFT\n snake_direction[Y] = 0\n elif event.key == pygame.K_RIGHT and snake_direction[X] == 0: \n snake_direction[X] = RIGHT\n snake_direction[Y] = 0\n # implement other directions here", "def __snake_move(self):\n self.__eat_candy()\n # move tail and body\n n_snake = len(self.__snake)\n for i in range(1, n_snake):\n s2 = self.__snake[n_snake - i]\n s1 = self.__snake[n_snake - i - 1]\n s2.move(s1.pos())\n # move head\n pos = self.__snake[0].pos()\n tmp_snake = Snake(self, direction=self.__h_direction, position=pos)\n h_pos = self.__get_next_head_pos(tmp_snake)\n tmp_snake.remove()\n icon = f'resources/{self.__directions[self.__h_direction]}.svg'\n new_head = Snake(self, icon, self.__h_direction, self.__cell_edge, h_pos)\n old_head = self.__snake[0]\n self.__snake[0] = new_head\n old_head.remove()\n self.__crash_check()", "def movement(self):", "def move(self): \n # range(start, stop, step)\n for seg_num in range(len(self.segments) - 1, 0, -1):\n new_x_position = self.segments[seg_num - 1].xcor()\n new_y_position = self.segments[seg_num - 1].ycor()\n self.segments[seg_num].goto(new_x_position, new_y_position)\n\n # moving first snake's segment 20 spaces and updating last_direction\n self.head.forward(MOVE_DISTANCE)\n self.last_direction = self.head.heading()", "def movement(self):\n self.rect.left -= self.speedx #to move the asteroid to the left", "def snake_move(snake, direction):\n head = snake[0].copy()\n\n if direction == RIGHT:\n head[0] = head[0] + 1\n elif direction == LEFT:\n head[0] = head[0] - 1\n elif direction == UP:\n head[1] = head[1] - 1\n elif direction == DOWN:\n head[1] = head[1] + 1\n else:\n return snake\n \n snake.insert(0,head)\n snake.pop()\n \n return snake", "def move_player(self, pressed_keys):\n # Arrow-key movement\n if pressed_keys[K_UP]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_DOWN]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_LEFT]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_RIGHT]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n # WASD movement\n if pressed_keys[K_w]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_s]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_a]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_d]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n #Boundary\n if self.player.rect.left < 0:\n self.player.rect.left = 0\n if self.player.rect.right > self.board.screen_width:\n self.player.rect.right = self.board.screen_width\n if self.player.rect.top <= 0:\n self.player.rect.top = 0\n if self.player.rect.bottom >= self.board.screen_height:\n self.player.rect.bottom = self.board.screen_height", "def step(self):\n if not self._is_game_over:\n self._move_snake()\n self._is_game_over = self.is_snake_collides()", "def snakeSetup(self,display):\n if display:\n self.screen = pygame.display.set_mode(windowSize)\n pygame.display.set_caption('Snake!')\n pygame.init()\n self.clock = pygame.time.Clock()\n self.dir = left #round(3 * random.random())\n self.s = snake(playerColor, unitSize,self.dir)\n self.setup = True", "def snake_move(self,param=(),duration = None,ignore_error_handle = False):\n message = {};\n step = 'draw a snake on device with ' + str(len(param)) + ' joint';\n try:\n touch_action = TouchAction(self.driver);\n touch_action.snake_move(param,duration).release().perform();\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def change_movement(self, action):\r\n if action == \"diagonal\" and self.movement != \"diagonal\":\r\n self.movement = \"diagonal\"\r\n self.x_speed = 3\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_diagonal)\r\n elif action == \"horizontal\" and self.movement != \"horizontal\":\r\n self.movement = \"horizontal\"\r\n self.x_speed = 3\r\n self.y_speed = 0\r\n self.canvas.after(50, self.move_horizontal)\r\n elif action == \"vertical\" and self.movement != \"vertical\":\r\n self.movement = \"vertical\"\r\n self.x_speed = 0\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_vertical)\r\n elif action == \"inward_outward\":\r\n self.movement = \"inward_outward\"\r\n self.canvas.after(50, self.move_inward_outward)", "def control(self, keyCode):\n if (keyCode == DOWN and (self.on_left or self.on_right)):\n if self.on_left:\n self.x = self.maze.LEFT_VERT\n else:\n self.x = self.maze.RIGHT_VERT\n self.rot_begin = self.MOUTH_DOWN_BEGIN_ANGLE\n self.rot_end = self.MOUTH_DOWN_END_ANGLE\n self.x_add = 0\n self.y_add = self.velocity\n elif (keyCode == UP and (self.on_left or self.on_right)):\n if self.on_left:\n self.x = self.maze.LEFT_VERT\n else:\n self.x = self.maze.RIGHT_VERT\n self.rot_begin = self.MOUTH_UP_BEGIN_ANGLE\n self.rot_end = self.MOUTH_UP_END_ANGLE\n self.x_add = 0\n self.y_add = -(self.velocity)\n elif (keyCode == LEFT and (self.on_top or self.on_bottom)):\n if self.on_top:\n self.y = self.maze.TOP_HORIZ\n else:\n self.y = self.maze.BOTTOM_HORIZ\n self.rot_begin = self.MOUTH_LEFT_BEGIN_ANGLE\n self.rot_end = self.MOUTH_LEFT_END_ANGLE\n self.x_add = -(self.velocity)\n self.y_add = 0\n elif (keyCode == RIGHT and (self.on_top or self.on_bottom)):\n if self.on_top:\n self.y = self.maze.TOP_HORIZ\n else:\n self.y = self.maze.BOTTOM_HORIZ\n self.rot_begin = self.MOUTH_RIGHT_BEGIN_ANGLE\n self.rot_end = self.MOUTH_RIGHT_END_ANGLE\n self.x_add = self.velocity\n self.y_add = 0", "def move(self):\r\n\r\n # Randomizes movement after 40 steps and flips sprite \\\r\n # (if x-value of speed variable changes from positive to negative)\r\n if step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 40 steps, but doesn't flip sprite because \\\r\n # x-value of speed variable doesn't change from positive to negative\r\n elif step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Randomizes movement after 80 steps and flips sprite \\\r\n # (if x-value of speed variable changes from negative to positive)\r\n if step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 80 steps, but doesn't flip sprite \\\r\n # because x-value of speed variable doesn't change from positive to negative\r\n elif step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Flips the dino sprite when it hits the left or right side of the enclosure \\\r\n # and reverses dino's speed\r\n if self.rect.right > 818 or self.rect.left < 182:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[0] = - self.speed[0]\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Reverses the dino's speed if it hits the top or bottom side of the enclosure\r\n if self.rect.top < 55 or self.rect.bottom > 542:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[1] = - self.speed[1]\r\n\r\n # Causes dinosaur to go to the tree when hunger is high enough\r\n if hunger >= 205:\r\n if step != 40 and step != 80 and 0 < thirst < 175:\r\n if self.rect.left > 300 and self.speed[0] not in range(-1000, 0):\r\n # Speed must be rounded so that speed[0] and speed[1] is in the range functions above \\\r\n # (range function doesn't take decimal point numbers)\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 300 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n if self.rect.left < 300 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 300 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n\r\n # Causes dinosaur to go to the pond when thirst is high enough\r\n if thirst == 175:\r\n if step != 40 and step != 80:\r\n if self.rect.left > 540 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 540 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n if self.rect.left < 540 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 540 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n\r\n # Sets rectangle surrounding dino sprite to new position based on its speed\r\n newpos = self.rect.move(self.speed)\r\n self.rect = newpos", "def reset(self):\r\n self.body = [[int(self.x_pos/2), int(self.y_pos/2)]] # initial snake starts at center of screen\r\n self.direction = \"UP\"\r\n self.length = 1\r\n self.alive = True\r\n self.speed = 10", "def interact():\n env = SnakeEnv()\n done = False\n r = 0\n action = random.randrange(4)\n delay_time = 0.2\n\n # After the first run of the method env.render()\n # env.renderer.viewer obtains an attribute 'window'\n # which is a pyglet.window.Window object\n env.render(mode='human')\n # Use the arrows to control the snake's movement direction\n @env.renderer.viewer.window.event\n def on_text_motion(motion):\n \"\"\"\n Events to actions mapping\n \"\"\"\n\n nonlocal action\n if motion == MOTION_UP:\n action = 0\n elif motion == MOTION_DOWN:\n action = 2\n elif motion == MOTION_LEFT:\n action = 3\n elif motion == MOTION_RIGHT:\n action = 1\n\n while not done:\n time.sleep(delay_time)\n obs, reward, done, info = env.step(action)\n env.render(mode='human')\n if reward:\n r += reward\n # Speeding up snake after eating food\n delay_time -= 1/6 * delay_time\n\n return r", "def move():\n Robot.move()", "def player_movement(self):", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def left(event):\n if event.action == sense_hat.ACTION_RELEASED:\n snake.changeDirection(LEFT)", "def __new_snake(self):\n self._snake = self.Snake(Direction.RIGHT, Position(4, 4), Position(3, 4), Position(2, 4))", "def update(self, pressed_keys):\r\n # read key presses in event log and change position accordingly\r\n if pressed_keys[K_UP]:\r\n if self.direction == \"down\":\r\n pass\r\n else:\r\n self.yChange = -block\r\n self.xChange = 0\r\n self.direction = \"up\"\r\n self.surf = pygame.transform.scale(self.image[0], (block, block))\r\n if pressed_keys[K_DOWN]:\r\n if self.direction == \"up\":\r\n pass\r\n else:\r\n self.yChange = block\r\n self.xChange = 0\r\n self.direction = \"down\"\r\n self.surf = self.imgD\r\n if pressed_keys[K_LEFT]:\r\n if self.direction == \"right\":\r\n pass\r\n else:\r\n self.xChange = -block\r\n self.yChange = 0\r\n self.direction = \"left\"\r\n self.surf = self.imgL\r\n if pressed_keys[K_RIGHT]:\r\n if self.direction == \"left\":\r\n pass\r\n else:\r\n self.xChange = block\r\n self.yChange = 0\r\n self.direction = \"right\"\r\n self.surf = self.imgR\r\n\r\n # when snake passes the boundaries of the screen it will loop through to the opposite side\r\n if self.x >= dis_width:\r\n self.x = 0\r\n if self.x < 0:\r\n self.x = dis_width\r\n if self.y >= dis_height:\r\n self.y = 0\r\n if self.y < 0:\r\n self.y = dis_height\r\n\r\n # add the direction change based on button press\r\n self.x += self.xChange\r\n self.y += self.yChange\r\n\r\n self.head = []\r\n self.head.append(self.x)\r\n self.head.append(self.y)\r\n self.head.append(self.direction)\r\n self.list.append(self.head)\r\n\r\n #if list has more items than the length of snake delete first item in list\r\n if len(self.list) > self.length:\r\n del self.list[0]", "def play_step(self, action):\n self.players[0].moving_left = False\n self.players[0].moving_right = False\n if action == MOVE_LEFT:\n self.players[0].moving_left = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_left = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == MOVE_RIGHT:\n self.players[0].moving_right = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_right = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == SHOOT:\n if self.dead_player or not self.players[0].is_alive:\n self.update(is_a_star=True)\n return\n if not self.players[0].weapon.is_active:\n self.players[0].shoot()\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n if self.dead_player or not self.players[0].is_alive:\n return", "def on_key_press(self, key, modifiers):\n if key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n elif key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED", "def play_move(plateau, sens):\n if not (sens.upper() == \"B\" or sens.upper() == \"H\" or sens.upper() == \"D\" or sens.upper() == \"G\"):\n return \"Erreur !\"\n # mouvement des colonne\n if sens.upper() == \"B\":\n columns_move(plateau, 0)\n\n elif sens.upper() == \"H\":\n columns_move(plateau, 1)\n\n # mouvement des lignes\n elif sens.upper() == \"D\":\n lines_move(plateau, 0)\n\n elif sens.upper() == \"G\":\n lines_move(plateau, 1)", "def up(event):\n if event.action == sense_hat.ACTION_RELEASED:\n snake.changeDirection(UP)", "def move(self):\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_w]:\n self.y -= self.vel\n if keys[pygame.K_a]:\n self.x -= self.vel\n if keys[pygame.K_s]:\n self.y += self.vel\n if keys[pygame.K_d]:\n self.x += self.vel", "def automove(self):\n if self.x < self.end_cinematic_x_pos:\n self.x += self.SHIP_SPEED\n if self.x > self.end_cinematic_x_pos:\n self.x -= self.SHIP_SPEED\n if self.y < self.end_cinematic_y_pos:\n self.y += self.SHIP_SPEED\n if self.y > self.end_cinematic_y_pos:\n self.y -= self.SHIP_SPEED" ]
[ "0.7399674", "0.6836237", "0.6778097", "0.6765576", "0.67256254", "0.66972667", "0.65777445", "0.6498739", "0.6492528", "0.6492464", "0.64858997", "0.6451853", "0.64459854", "0.640347", "0.64023644", "0.6400139", "0.6397963", "0.6363882", "0.6341559", "0.63338673", "0.63338673", "0.6331024", "0.6329555", "0.6322084", "0.6315082", "0.6307919", "0.6303197", "0.6290147", "0.6255717", "0.6253231" ]
0.78153515
0
This Function Calculates distance between Food and Snake
def Distance(foodx,foody): di = ((snake.x - foodx)**2) + ((snake.y - foody)**2) d = int(math.sqrt(di)) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def measure_distance(self):\n # set Trigger to HIGH\n GPIO.output(self.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(self.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n # multiply with the sonic speed (343.00 m/s)\n # and divide by 2, because there and back\n distance = (time_elapsed * 343.00) / 2\n\n return distance", "def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)", "def distance(self) -> int:\n return 0", "def distance_between_wheels():", "def distance(self, first_tape, second_tape):\n pairs = zip(first_tape, second_tape)\n return math.sqrt(abs(sum(map((lambda n: self.subsq(*n)), pairs))))", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)", "def get_distance(self):\n print(\"voici la distance à l'obstacle\")", "def distances(self):", "def manhatam_distance(self) -> int:\n raise NotImplementedError", "def get_distance(self) -> int:\n return self.get_measurement_data().distance", "def distance(self):\n _, _, costs = self.calculate_costs()\n return np.sum(costs)", "def _get_distance_betweenitems(self, item_no1, item_no2):\n\n try:\n if item_no1 >= 0 and item_no2 >= 0:\n loc_current = self.page_current.item_onscreenlocs[item_no1]\n loc_potential = self.page_current.item_onscreenlocs[item_no2]\n distance = abs(loc_potential - loc_current)\n else:\n distance = 0\n\n except IndexError:\n distance = 0\n\n return distance", "def calc_distance(self, observation):\n actual_obs = observation[0]\n scrn_player = actual_obs.observation.feature_screen.player_relative\n scrn_select = actual_obs.observation.feature_screen.selected\n scrn_density = actual_obs.observation.feature_screen.unit_density\n\n state_added = scrn_select + scrn_density\n\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n\n # first step\n if np.sum(scrn_select) == 0:\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n # marine behind beacon\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n else:\n # normal navigation\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 3), axis=0).round()\n\n beacon_center = np.mean(self.xy_locs(scrn_player == 3), axis=0).round()\n #\n # print(state_added)\n # print(\"---- Marine {} | {} Beacon ----\".format(marine_center, beacon_center))\n # time.sleep(0.2)\n distance = math.hypot(beacon_center[0] - marine_center[0],\n beacon_center[1] - marine_center[1])\n\n return beacon_center, marine_center, distance", "def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def calculate_distance(atom1,atom2): #dot string to show when you go into the help doc of this function\n x_distance = atom1[0]-atom2[0]\n y_distance = atom1[1]-atom2[1]\n z_distance = atom1[2]-atom2[2]\n distance = numpy.sqrt(x_distance**2+ y_distance**2+z_distance**2)\n return distance", "def distance():\n return str(us.get_distance())", "def manhatam_distance(self) -> int:\n return abs(self.north) + abs(self.east)", "def distance(self, keyOne, keyTwo):", "def distance(self, other_pt, is_lla=True):\n return 0.0", "def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)", "def get_distance(self, star):\n if self.centroid == star.centroid:\n print(\"distance for same star\")\n return 0\n\n unitary_a = self.get_unitary_vector()\n unitary_b = star.get_unitary_vector()\n dab = math.degrees(math.acos(unitary_a[0] * unitary_b[0] +\n unitary_a[1] * unitary_b[1] +\n unitary_a[2] * unitary_b[2]))\n return dab", "def manhatam_distance(self) -> int:\n return abs(self.position[0]) + abs(self.position[1])", "def get_distance(self, star):\n if self == star:\n return 0\n\n a_car = self.get_cartesian_coords()\n b_car = star.get_cartesian_coords()\n dab = math.degrees(math.acos(a_car[0] * b_car[0] +\n a_car[1] * b_car[1] +\n a_car[2] * b_car[2]))\n return dab", "async def distance(self):\n return round(await self._rpc.distance(), 2)", "def getDistance(pos1, pos2):\r\n return ((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2) ** 0.5", "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])", "def get_distance(route, dists):\n cost = 0\n if route[0] != route[-1]:\n route.append(route[0])\n\n for i in range(len(route)-1):\n cost += dists[route[i], route[i+1]]\n # cost += dists[route[-1], route[0]]\n return cost" ]
[ "0.6700603", "0.6521232", "0.6514296", "0.63147444", "0.62802625", "0.6160367", "0.61424226", "0.6136788", "0.61203325", "0.6113802", "0.6087659", "0.6070107", "0.6063656", "0.6058297", "0.60548335", "0.60548335", "0.60385036", "0.6023612", "0.60140026", "0.5991973", "0.59842086", "0.59711033", "0.5960644", "0.5938914", "0.5929656", "0.5924037", "0.59235173", "0.59184366", "0.590708", "0.5900615" ]
0.8156581
0
Updates the value of a leaf node and all the sums above it. Idx expected in the [0, capacity] range.
def update(self, idx, value): idx = self.__capacity - 1 + idx self.__tree[idx] = value self.__update(idx)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def __setitem__(self, idx: int, val: float) -> None:\n assert 0 <= idx < self.capacity, f\"idx={idx} capacity={self.capacity}\"\n\n # Index of the leaf to insert into (always insert in \"second half\"\n # of the tree, the first half is reserved for already calculated\n # reduction-values).\n idx += self.capacity\n self.value[idx] = val\n\n # Recalculate all affected reduction values (in \"first half\" of tree).\n idx = idx >> 1 # Divide by 2 (faster than division).\n while idx >= 1:\n update_idx = 2 * idx # calculate only once\n # Update the reduction value at the correct \"first half\" idx.\n self.value[idx] = self.operation(\n self.value[update_idx], self.value[update_idx + 1]\n )\n idx = idx >> 1 # Divide by 2 (faster than division).", "def set(self, node_index, value):\n if value < 0.0:\n raise ValueError(\n 'Sum tree values should be nonnegative. Got {}'.format(value))\n self.highest_set = max(node_index, self.highest_set)\n node_index = node_index + self.low_idx\n self.max_recorded_priority = max(value, self.max_recorded_priority)\n\n delta_value = value - self.nodes[node_index]\n\n # Now traverse back the tree, adjusting all sums along the way.\n for _ in reversed(range(self.depth)):\n # Note: Adding a delta leads to some tolerable numerical inaccuracies.\n self.nodes[node_index] += delta_value\n node_index = (node_index - 1) // 2\n\n self.nodes[node_index] += delta_value\n assert node_index == 0, ('Sum tree traversal failed, final node index '\n 'is not 0.')", "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits. wtf ??? this line is rigth but kind of wired !\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def update(self, leaf_value,visits_count=1):\n # Count visit.\n self._n_visits += visits_count\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits\n\n if self.is_root():\n self.last_leafvalue = leaf_value", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def _update_node(node, value):\n node.N += 1\n node.W += value\n node.Q = node.W / node.N", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.", "def fn(node, x):\n if not node: return x\n x = fn(node.right, x) # sum of right subtree\n x += node.val \n node.val = x\n return fn(node.left, x)", "def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)", "def updateTree(self, i, val, cur):\n start, end = cur.start, cur.end\n if start == end == i:\n cur.val = val\n return\n mid = start+(end-start)/2\n if i <= mid:\n cur.val -= cur.left.val\n self.updateTree(i, val, cur.left)\n cur.val += cur.left.val\n else:\n cur.val -= cur.right.val\n self.updateTree(i, val, cur.right)\n cur.val += cur.right.val", "def include_final_offset(node, offset):\n for leaf in node.leaves:\n leaf.value = leaf.value * offset", "def __init__(self, capacity):\n assert isinstance(capacity, int)\n if capacity <= 0:\n raise ValueError(\n 'Sum tree capacity should be positive. Got: {}'.format(capacity))\n\n self.nodes = []\n self.depth = int(np.ceil(np.log2(capacity)))\n self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx\n self.high_idx = capacity + self.low_idx\n self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision.\n self.capacity = capacity\n\n self.highest_set = 0\n\n self.max_recorded_priority = 1.0", "def heap_update(self):\n print 'SumTree pre-update:', self.replay.tree[0].sum\n last_ixs = self.replay.last_ixs(True)\n while True:\n if len(last_ixs) == 0:\n break\n if len(last_ixs) < 10000:\n ixs = last_ixs\n last_ixs = []\n else:\n ixs = last_ixs[:10000]\n last_ixs = last_ixs[10000:]\n batch = [self.replay.tree[ix].pointer for ix in ixs]\n delta = self.get_delta(batch)\n self.get_p_weights(delta, batch, ixs)\n print 'SumTree post-update:', self.replay.tree[0].sum\n print 'SumTree updated'", "def backup_nodes(self, value: float, backup_until=None):\n current = self.current\n parent = current.parent\n sum_from_leaf = value\n\n while parent is not backup_until:\n parent.visits += 1\n sum_from_leaf += current.score - parent.score\n\n # average rewards\n parent.reward += (sum_from_leaf - parent.reward) / parent.visits\n current, parent = parent, parent.parent", "def value(d,o):\n # return memoized value if possible\n if (d,o) in v:\n return v[(d,o)]\n\n thisitem = int(t[d][o])\n # the total of a subtree that starts at the leaf, is just the value of the leaf\n if d == maxdepth:\n val = thisitem\n else:\n val = thisitem + max(value(d+1,o),value(d+1,o+1))\n\n v[(d,o)]=val\n return val", "def get(self, subtree_sum):\n idx = 0\n while True:\n # if idx is a leaf node return the idx and the value\n if idx >= self.__capacity - 1:\n return (idx - self.__capacity + 1, self.__tree[idx])\n\n # else continue down\n left = 2 * idx + 1\n right = 2 * idx + 2\n left_sum = self.__tree[left]\n if left_sum >= subtree_sum:\n idx = left\n else:\n idx = right\n subtree_sum -= left_sum", "def include_final_offset(node, offset):\n for leaf in node.leaves:\n leaf.value = leaf.value + offset", "def include_final_offset(node, offset):\n if offset != 0.0:\n for leaf in node.leaves:\n leaf.value = leaf.value + offset", "def refresh(self):\n node, ans = self.list_head.next.next, 0\n # first update key_nodes in even positions\n while node:\n ans += 1\n node = node.next.next\n # then update tree_nodes's current_btree_node in odd positions\n node = self.list_head.next\n while node:\n node.current_btree_node = self\n if node.next:\n node = node.next.next\n else:\n break\n self.size = ans", "def backup(self, value):\n current = self\n while current.parent is not None:\n value *= -1\n current.number_visits += 1\n current.total_value += value\n current = current.parent", "def increment(self):\n if self.is_empty():\n return 0\n else:\n self.get_root().value += 1\n if self.get_left():\n self.get_left().increment()\n if self.get_right():\n self.get_right().increment()", "def capacity_enlarge(self, k):\n count = 0\n idx = self.capacity - 1\n while count < k:\n left = self.tree[idx]\n right = priorityNode(0, None)\n insert_pos = self.tree.shape[0]\n self.tree = np.insert(self.tree, insert_pos, [left,right])\n idx += 1\n count += 1\n\n self.last_capacity = self.capacity # mark down the last capacity for adding operation\n self.capacity += k # Update the value of capacity", "def _update_value_at(self, index, value):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n node.value = value" ]
[ "0.74209356", "0.6767374", "0.6753369", "0.6640915", "0.62751794", "0.6238642", "0.61967045", "0.61868465", "0.618199", "0.6152516", "0.6152516", "0.6152516", "0.6084542", "0.5986752", "0.5953413", "0.5944968", "0.58795923", "0.581223", "0.5790687", "0.57693315", "0.57603663", "0.570496", "0.5703129", "0.56980103", "0.5690032", "0.5679459", "0.56651527", "0.56528497", "0.5648007", "0.5642546" ]
0.74235
0
Receives the idx of a leaf node and updates the sums on all the nodes above it based on its current value.
def __update(self, idx): parent = (idx - 1) // 2 while parent >= 0: left, right = 2 * parent + 1, 2 * parent + 2 self.__tree[parent] = self.__tree[left] + self.__tree[right] parent = (parent - 1) // 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def set(self, node_index, value):\n if value < 0.0:\n raise ValueError(\n 'Sum tree values should be nonnegative. Got {}'.format(value))\n self.highest_set = max(node_index, self.highest_set)\n node_index = node_index + self.low_idx\n self.max_recorded_priority = max(value, self.max_recorded_priority)\n\n delta_value = value - self.nodes[node_index]\n\n # Now traverse back the tree, adjusting all sums along the way.\n for _ in reversed(range(self.depth)):\n # Note: Adding a delta leads to some tolerable numerical inaccuracies.\n self.nodes[node_index] += delta_value\n node_index = (node_index - 1) // 2\n\n self.nodes[node_index] += delta_value\n assert node_index == 0, ('Sum tree traversal failed, final node index '\n 'is not 0.')", "def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)", "def fn(node, x):\n if not node: return x\n x = fn(node.right, x) # sum of right subtree\n x += node.val \n node.val = x\n return fn(node.left, x)", "def _fix_up_to_root(self, idx):\n combine_fn = self._combine_fn\n while idx >= 1:\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n # idx = self._parent(idx)\n idx = idx >> 1", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits. wtf ??? this line is rigth but kind of wired !\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def __setitem__(self, idx: int, val: float) -> None:\n assert 0 <= idx < self.capacity, f\"idx={idx} capacity={self.capacity}\"\n\n # Index of the leaf to insert into (always insert in \"second half\"\n # of the tree, the first half is reserved for already calculated\n # reduction-values).\n idx += self.capacity\n self.value[idx] = val\n\n # Recalculate all affected reduction values (in \"first half\" of tree).\n idx = idx >> 1 # Divide by 2 (faster than division).\n while idx >= 1:\n update_idx = 2 * idx # calculate only once\n # Update the reduction value at the correct \"first half\" idx.\n self.value[idx] = self.operation(\n self.value[update_idx], self.value[update_idx + 1]\n )\n idx = idx >> 1 # Divide by 2 (faster than division).", "def update(self, leaf_value,visits_count=1):\n # Count visit.\n self._n_visits += visits_count\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits\n\n if self.is_root():\n self.last_leafvalue = leaf_value", "def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits", "def updateTree(self, i, val, cur):\n start, end = cur.start, cur.end\n if start == end == i:\n cur.val = val\n return\n mid = start+(end-start)/2\n if i <= mid:\n cur.val -= cur.left.val\n self.updateTree(i, val, cur.left)\n cur.val += cur.left.val\n else:\n cur.val -= cur.right.val\n self.updateTree(i, val, cur.right)\n cur.val += cur.right.val", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.", "def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n vals.append(ans)\n return ans", "def include_final_offset(node, offset):\n for leaf in node.leaves:\n leaf.value = leaf.value * offset", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def update_tree(root, executed_acts, total_rew):\n root.value = max(total_rew, root.value)\n root.visits += 1\n new_nodes = 0\n\n node = root\n for step, act in enumerate(executed_acts):\n if act not in node.children:\n node.children[act] = Node()\n new_nodes += 1\n node = node.children[act]\n node.value = max(total_rew, node.value)\n node.visits += 1\n\n return new_nodes", "def find_prefixsum_idx(self, prefixsum):\n if isinstance(prefixsum, float):\n prefixsum = np.array([prefixsum])\n assert 0 <= np.min(prefixsum)\n assert np.max(prefixsum) <= self.sum() + 1e-5\n assert isinstance(prefixsum[0], float)\n\n idx = np.ones(len(prefixsum), dtype=int)\n cont = np.ones(len(prefixsum), dtype=bool)\n\n while np.any(cont): # while not all nodes are leafs\n idx[cont] = 2 * idx[cont]\n prefixsum_new = np.where(\n self._value[idx] <= prefixsum, prefixsum - self._value[idx], prefixsum\n )\n # prepare update of prefixsum for all right children\n idx = np.where(\n np.logical_or(self._value[idx] > prefixsum, np.logical_not(cont)),\n idx,\n idx + 1,\n )\n # Select child node for non-leaf nodes\n prefixsum = prefixsum_new\n # update prefixsum\n cont = idx < self._capacity\n # collect leafs\n return idx - self._capacity", "def include_final_offset(node, offset):\n for leaf in node.leaves:\n leaf.value = leaf.value + offset", "def include_final_offset(node, offset):\n if offset != 0.0:\n for leaf in node.leaves:\n leaf.value = leaf.value + offset", "def update(self, index: int, x: int):\n index += self.n2\n self.tree[index] = self.binary(self.tree[index], x)\n while index > 1:\n # (index ^ 1) はiと1の排他的論理和(XOR)\n x = self.binary(x, self.tree[index ^ 1])\n index >>= 1 # 右ビットシフトで親ノードのインデックスへ移動\n self.tree[index] = self.binary(self.tree[index], x)", "def get(self, subtree_sum):\n idx = 0\n while True:\n # if idx is a leaf node return the idx and the value\n if idx >= self.__capacity - 1:\n return (idx - self.__capacity + 1, self.__tree[idx])\n\n # else continue down\n left = 2 * idx + 1\n right = 2 * idx + 2\n left_sum = self.__tree[left]\n if left_sum >= subtree_sum:\n idx = left\n else:\n idx = right\n subtree_sum -= left_sum", "def fn(node, val):\n if not node: return 0\n val = 10*val + node.val\n if not node.left and not node.right: return val \n return fn(node.left, val) + fn(node.right, val)", "def sum(self) -> int:\n return self.root.sum", "def getSum(root, level, h):\n if root == None:\n return\n \n h[level] = root.data\n \n getSum(root.left, level+1, h)\n getSum(root.right, level+1, h)", "def find_sum(root, desired_sum, level=0, buffer_list=None, result=[]):\n if not buffer_list:\n buffer_list = []\n\n if not root:\n return result\n\n buffer_list.append(root.key)\n temp = desired_sum\n\n for i in range(level, -1, -1):\n temp -= buffer_list[i]\n\n if temp == 0:\n result.append(buffer_list[i:level + 1])\n\n find_sum(root.left, desired_sum, level + 1, buffer_list[:], result)\n find_sum(root.right, desired_sum, level + 1, buffer_list[:], result)\n\n return result" ]
[ "0.70218706", "0.6763772", "0.65720445", "0.6464258", "0.634624", "0.62440336", "0.62352777", "0.6203399", "0.6152715", "0.61511225", "0.61511225", "0.61511225", "0.61307853", "0.6130343", "0.6128592", "0.61171615", "0.6116346", "0.60709554", "0.60432565", "0.60112834", "0.59804213", "0.5979022", "0.59768355", "0.5935433", "0.58912987", "0.5850242", "0.58430946", "0.5835884", "0.5799235", "0.5785629" ]
0.75952435
0
Given an arbitrary number of functions we create a pipeline where the output is piped between functions. You can also specify a tuple of arguments that should be passed to the functions in the pipeline. The first argument is always the output of the previous function. This version uses the reduce builtin instead of using recursion.
def ReducePipeline(*funcs, **kwargs): def accum(val, func): funcArgs = kwargs.get(func.__name__, tuple()) if hasattr(val, "__call__"): return func(val(), *funcArgs) else: return func(val, *funcArgs) def wrapper(*data): newFuncs = (partial(funcs[0], *data),) + funcs[1:] return reduce(accum, newFuncs) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pipe(*functions):\n\n return reduce(compose, functions, identity)", "def make_pipeline(steps):\n def compose2(f, g):\n return lambda x: g(f(x))\n return functools.reduce(compose2, steps)", "def compose(*fns):\n return functools.reduce(lambda f,g: lambda x: f(g(x)), fns)", "def compose(*functions):\n return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)", "def chain_funcs(funcs):\n return lambda x: reduce(lambda f1, f2: f2(f1), funcs, x)", "def compose(*fs) -> Callable:\n return lambda x: reduce(flip(funcall), reversed(fs), x)", "def tee_pipe(*funcs: Tuple[Callable[[GT], GS], ...]) -> Callable[[GT], GT]:\n\n piped = compose(*funcs)\n\n def _tee_pipe(arr):\n a, b = itertools.tee(arr)\n piped(a)\n return b\n\n return _tee_pipe", "def compose(*funcs: Callable[[T], T]) -> Callable[[T], T]:\n return functools.reduce(lambda g, f: lambda x: f(g(x)), funcs, lambda x: x)", "def compose(*funcs):\n def _compose(g, f):\n return lambda *args, **kwargs: g(f(*args, **kwargs))\n return reduce(_compose, funcs)", "def compose(*funcs):\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError(\"Composition of empty sequence not supported.\")", "def compose(*funcs):\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')", "def compose_many(*fs):\n return reduce(compose, fs)", "def compose(*funcs):\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')", "def compose(*funcs):\n return reduce(lambda f, g: lambda x: f(g(x)), funcs[::-1])", "def compose(*fns):\n import functools\n\n def _apply(x, f):\n if isinstance(x, tuple):\n return f(*x)\n else:\n return f(x)\n\n def comp(*args):\n return functools.reduce(_apply, fns, args)\n\n return comp", "def composition(func_list):\n return reduce(\n lambda (f1, args1), (f2, args2) : (lambda x : f1(f2(x, *args2), *args1)), \n func_list,\n lambda x : x\n )", "def pipeline(\n first: Callable[[Any], Any],\n second: Callable[[Any], Any],\n *rest: Callable[[Any], Any]\n) -> Callable[[Any], Any]:\n return compose(*reversed(rest), second, first)", "def compose(*functions):\n head, *tail = functions\n return head if not tail else lambda *args, **kwargs: head(compose(*tail)(*args, **kwargs))", "def pipeline(self, *funcs) -> \"fn\":\n return self._mod.pipeline(self, *funcs)", "def PipeLine(*funcs, **kwargs):\n def wrapper(*data):\n if len(funcs) == 1:\n combinedArgs = data + kwargs.get(funcs[-1].__name__, tuple())\n return funcs[-1](combinedArgs)\n else:\n combinedArgs = kwargs.get(funcs[-1].__name__, tuple())\n if combinedArgs != ():\n del kwargs[funcs[-1].__name__]\n return funcs[-1](PipeLine(*funcs[:-1], **kwargs)(*data), *combinedArgs)\n return wrapper", "def _pipe_and_accumulate(val, fns):\n for fn in fns:\n val = fn(val)\n yield val", "def compose(*funcs):\n if not funcs:\n return identity\n\n def wrapper(*args, **kwargs):\n fst, *rest = funcs\n ret = fst(*args, **kwargs)\n\n for f in rest:\n ret = f(ret)\n\n return ret\n\n return wrapper", "def pipeline(filters):\n pipe = partial(reduce, lambda acc, f: f(acc), filters)\n bil = bilateral()\n\n def procme(img):\n img = bil(img)\n return pipe(img)\n\n return lambda img: map(procme, [img[:, :, 0], img[:, :, 1], img[:, :, 2]])", "def compose(*fns):\n\n if len(fns) == 0:\n raise ValueError(\"At least one function must be provided\")\n\n def composite(*args):\n x = fns[-1](*args)\n for fn in reversed(fns[0:-1]):\n x = fn(x)\n return x\n\n return composite", "def compose1(*functions: _ComposeArg[_T]) -> _Transform[_T]:\n def composition(arg, **kwargs):\n for f in reversed(functions):\n if isinstance(f, tuple):\n f, kws = f\n arg = f(arg, **{kw: kwargs[kw] for kw in kws})\n else:\n arg = f(arg)\n return arg\n return composition", "def m_pipe(val, *fns, **kwargs):\n kw = kwargs\n _val = val\n for fn in fns:\n _val = fn(_val, **kw)\n return _val", "def compose(*funcs):\n if not funcs:\n return identity\n else:\n f0 = funcs[0]\n def composed(_):\n # f_1 o f_2 o ... o f_n\n pre_composed = compose(*funcs[1:])\n return f0(pre_composed(_))\n return composed", "def reducer(functions, init_value):\n return reduce(lambda res, func: func(res), functions, init_value)", "def merge_n_reduce(\n function: typing.Callable, arity: int, data: list\n) -> typing.Any:\n while len(data) > 1:\n data_chunk = data[:arity]\n data = data[arity:]\n data.append(function(*data_chunk))\n return data[0]", "def chain_layer_functions(input_layer, functions):\n return reduce(lambda layer, func: func(layer), functions, input_layer)" ]
[ "0.85493785", "0.79058933", "0.7589019", "0.75349104", "0.7516276", "0.7404753", "0.7371548", "0.7317679", "0.7314664", "0.7298046", "0.7296629", "0.7294338", "0.7245193", "0.719107", "0.71188146", "0.7108474", "0.710505", "0.7088272", "0.69122714", "0.6900706", "0.68673223", "0.67737937", "0.6764257", "0.6689317", "0.66435003", "0.65857357", "0.65198576", "0.647352", "0.6399837", "0.6360639" ]
0.8107064
1
Check move possibility using observation window
def is_move_possible(obs_window, displacement): pos_row = obs_window.shape[0] // 2 pos_col = obs_window.shape[1] // 2 new_pos_row = pos_row + displacement[0] new_pos_col = pos_col + displacement[1] is_traversable = obs_window[new_pos_row, new_pos_col] == 0 is_shortcut = obs_window[new_pos_row, pos_col] or obs_window[pos_row, new_pos_col] return is_traversable and (not is_shortcut)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ismoving(self):\n return self.dp.state()==PyTango.DevState.MOVING", "def move_window():\n\tif SLIDING_WINDOW:\n\t\t# get the chosen predicates\n\t\tpred = Predicate.objects.filter(pk__in=[p+1 for p in toggles.CHOSEN_PREDS])\n\n\t\t# handle window properties\n\t\tfor p in pred:\n\t\t\tp.move_window()", "def test_window():\n # Generate observations with random times\n timeline = random_timed_observation_timeline()\n\n # Defaults to one hour\n for window in timeline.windows():\n # Gotta be a tuple, though we don't know the length\n assert isinstance(window, tuple)\n assert len(window) > 0\n\n # Check the types\n for o in window:\n assert isinstance(o, Observation)\n\n # Double check that Observations in the window are sorted (for fun)\n for o1, o2 in zip(window, window[1:]):\n assert o1 < o2\n\n # Make sure each member is within an hour of the first.\n # We know they're sorted, so just check first and last.\n assert (window[0].time + timedelta(hours=1)) > window[-1].time", "def ismoving(self):\n return not self.get_par(\"done_moving\")", "def is_moving(self):\n is_moving = self.get_raw_status() & self.STATUS_MOVING\n return bool(is_moving)", "def does_move_win(self, x, y):\n me = self.board[x][y]\n for (dx, dy) in [(0, +1), (+1, +1), (+1, 0), (+1, -1)]:\n p = 1\n while self.is_on_board(x+p*dx, y+p*dy) and self.board[x+p*dx][y+p*dy] == me:\n p += 1\n n = 1\n while self.is_on_board(x-n*dx, y-n*dy) and self.board[x-n*dx][y-n*dy] == me:\n n += 1\n\n if p + n >= (self.connect + 1): # want (p-1) + (n-1) + 1 >= 4, or more simply p + n >- 5\n return True\n\n return False", "def CheckPaneMove(self, pane):\r\n\r\n win_rect = pane.frame.GetRect()\r\n win_rect.x, win_rect.y = pane.floating_pos\r\n \r\n if win_rect == self._last_rect:\r\n return False\r\n\r\n # skip the first move event\r\n if self._last_rect.IsEmpty():\r\n self._last_rect = wx.Rect(*win_rect)\r\n return False\r\n\r\n # skip if moving too fast to avoid massive redraws and\r\n # jumping hint windows\r\n if abs(win_rect.x - self._last_rect.x) > 10 or \\\r\n abs(win_rect.y - self._last_rect.y) > 10:\r\n self._last_rect = wx.Rect(*win_rect)\r\n return False\r\n\r\n return True", "def is_moving(self):\n return self.steps < self.max_steps", "def move_valid(move):\n return True", "def is_moving(self):\n return self.gripper_io.get_signal_value(\"is_moving\")", "def _ispinnedmove(self, from_, to_):\n return False", "def is_moving(self):\n response = self.__send_and_receive(protocol.GET_IS_MOVE)\n value = self.__gen_response_value(response)\n if value:\n # printf(\"\".join(value[1:]))\n if \"\".join(value)[1:] == \"1\":\n return True\n else:\n return False\n else:\n return False", "def is_new_move(my_board, x, y):\n return my_board[x, y] == CLOSED", "def is_moving(self) -> bool:\n return self.orders and self.orders[0].ability.id is AbilityId.MOVE", "def _onmove(self, event):", "def has_moved(self):\n return self.move_count > 0", "def _check_window(x: int, y: int, z: int) -> bool:\n return (x + y) == z", "def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r", "def move_atoms(self):\n return self.abivars.ionmov != 0", "def ev_windowmoved(self, event: WindowMoved) -> None:", "def move_of_king_and_rook(self, from_row, from_col, to_row, to_col): \n #provjere da li su kraljevi ili topovi inicirali pomijeranje\n if(from_row == 7 and from_col == 0):\n self.wrl_moved = True\n elif(from_row == 7 and from_col == 7):\n self.wrr_moved = True\n elif(from_row == 7 and from_col == 4):\n self.wk_moved = True\n elif(from_row == 0 and from_col == 0):\n self.brl_moved = True\n elif(from_row == 0 and from_col == 7):\n self.brr_moved = True\n elif(from_row == 0 and from_col == 4):\n self.bk_moved = True\n \n #provjera da li je neko pojeo topove\n if(to_row == 7 and to_col == 0):\n self.wrl_moved = True\n elif(to_row == 7 and to_col == 7):\n self.wrr_moved = True\n elif(to_row == 0 and to_col == 0):\n self.brl_moved = True\n elif(to_row == 0 and to_col == 7):\n self.brr_moved = True", "def OnMoveEvent(self, event):\r\n\r\n win_rect = self.GetRect()\r\n\r\n if win_rect == self._last_rect:\r\n return\r\n\r\n # skip the first move event\r\n if self._last_rect.IsEmpty(): \r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n \r\n # skip if moving too fast to avoid massive redraws and\r\n # jumping hint windows\r\n if abs(win_rect.x - self._last_rect.x) > 3 or abs(win_rect.y - self._last_rect.y) > 3:\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n\r\n # prevent frame redocking during resize\r\n if self._last_rect.GetSize() != win_rect.GetSize():\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n\r\n if _VERSION_STRING < \"2.9\":\r\n leftDown = wx.GetMouseState().LeftDown()\r\n else:\r\n leftDown = wx.GetMouseState().LeftIsDown()\r\n\r\n if not leftDown:\r\n return\r\n\r\n if not self._moving: \r\n self.OnMoveStart(event)\r\n self._moving = True\r\n\r\n if self._last3_rect.IsEmpty():\r\n return\r\n\r\n self.OnMoving(event)", "def check_win(self, board, move):\n for i, j, k in self.winning_cases:\n if board[i] == move and board[j] == move and board[k] == move:\n return True\n return False", "def Active(self):\n return self.NMove > 0", "def no_more_move(self):\n if (self.p_no_move + self.c_no_move == 2):\n return True\n return False", "def isArmWithinWindow(armPos, window):\n for pos in armPos:\n if(pos[0][0]<0 or pos[0][0]>window[0] or pos[1][0]<0 or pos[1][0]>window[0]):\n return False\n if(pos[0][1]<0 or pos[0][1]>window[1] or pos[1][1]<0 or pos[1][1]>window[1]):\n return False\n\n\n return True", "def findPlacesToMove():\n movesDestinations = [];\n \n curY = curBlank[0];\n curX = curBlank[1];\n\n if(curY-1 >= 1): #UP\n movesDestinations.append((curY-1, curX));\n if(curY+1 <= n): #DOWN\n movesDestinations.append((curY+1, curX));\n if(curX-1 >= 1): #LEFT\n movesDestinations.append((curY, curX-1));\n if(curX+1 <= n): #RIGHT\n movesDestinations.append((curY, curX+1));\n \n return movesDestinations;", "def legal_move_on(draw, board):\n start, _ = draw(strategies.sampled_from(sorted(board.pieces)))\n end = draw(strategies.sampled_from(sorted(board.movable_from(start))))\n return start, end", "def test_move():\n human = Human()\n coordinates = [2, 1]\n dimensions = [3, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n\n possible_new_coordinates = [[2, 0], [3, 0], [3, 1], [3, 2], [2, 2], [1, 2], [1, 1], [1, 0]]\n\n assert new_coordinates in possible_new_coordinates", "def test_sliding_window(self):\n frame_length = 512\n step = 100\n x_trainlist = [np.zeros((25187,9)) for b in range(78)]\n y_trainlist = [np.zeros((12,9)) for b in range(78)]\n x_train, y_train = tutorial_pamap2.sliding_window(frame_length, step, x_trainlist, y_trainlist)\n test = len(x_train) == 19266\n assert test" ]
[ "0.6529597", "0.6286072", "0.6259327", "0.6203149", "0.6197683", "0.6197294", "0.6170365", "0.601616", "0.5998505", "0.5931079", "0.5923095", "0.5921639", "0.5878888", "0.5877249", "0.58698124", "0.585277", "0.5812267", "0.5797952", "0.5775176", "0.57483536", "0.5737459", "0.5729434", "0.57093906", "0.5701173", "0.5668545", "0.56511736", "0.5650913", "0.5649598", "0.5646812", "0.56445885" ]
0.7049266
0
Returns the CPModule from within the loaded Python module m an imported module returns the CPModule class
def find_cpmodule(m): for v, val in list(m.__dict__.items()): if isinstance(val, type) and issubclass(val, cellprofiler_core.module.Module): return val raise ValueError( "Could not find cellprofiler_core.module.Module class in %s" % m.__file__ )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _module(self):\n if self._module_cache is None:\n self._module_cache = load_module(self._name, self._path)\n return self._module_cache", "def base_module(self) -> nn.Module:\n return getattr(__import__(\"src.modules\", fromlist=[\"\"]), self.name)", "def get_module(self):\n return self.module", "def get_module(cls, module_name):\n if cls.module_dict is None:\n # Init the module_dict once.\n cls.module_dict = {mod.name: mod for mod in cls.get_pb().modules}\n return cls.module_dict.get(module_name)", "def get_module_class(module):\n try:\n for name, obj in inspect.getmembers(module):\n # must check for parent module name (should be beacon/codec/etc) as to avoid imported class objects\n if inspect.isclass(obj) and obj.__module__ == module.__name__:\n return obj\n # have it instantiate the object? depends where I decide to use this method: obj_() creates an instance.\n except Exception, e:\n print \"Error getting class from %s module\" % (module.__name__)\n raise", "def get_module(cls, module=None):\n return module or sys.modules[cls.module_name()]", "def exposed_getmodule(self, name):\n return __import__(name, None, None, \"*\")", "def get_module(module_name):\n module = __import__(module_name)\n components = module_name.split('.')\n for comp in components[1:]:\n module = getattr(module,comp)\n return module", "def module(self):\n return self.lib.module", "def get_module(module):\n return getattr(sys.modules, module, importlib.import_module(module))", "def my_import(module_name, class_name):\n\n\t# load the module, will raise ImportError if module cannot be loaded\n\tm = importlib.import_module(module_name)\n\n\t# get the class, will raise AttributeError if class cannot be found\n\tc = getattr(m, class_name)\n\n\treturn c", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def get_module(self):\n module = self.__class__.__module__.split('.')\n module = \".\".join(module[:-1])\n module = module + \".\" + self._get_valid_version().module\n return module", "def _get_module(self, name):\n module = self._modules.get(name)\n if not module:\n module = importlib.import_module(name)\n self._modules[name] = module\n return module", "def get_class(fileName):\n module = __import__(fileName)\n return getattr(module, fileName)", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def load_module(file_name):\n path = temp.relpath(file_name)\n m = _load_module(path)\n logger.info(\"load_module %s\", path)\n return m", "def get_module(name) -> Module:\n if isinstance(name, str):\n obj = get_object(name)\n else:\n obj = name\n\n name = obj.__name__\n if name in modules:\n return modules[name]\n else:\n module = Module(obj)\n modules[name] = module\n return module", "def __import_from(localization, member_name, module_name=\"__builtin__\"):\n module = import_python_module(localization, module_name)\n if isinstance(module, TypeError):\n return module, None\n\n try:\n return module, module.get_type_of_member(localization, member_name)\n except Exception as exc:\n return module, TypeError(localization,\n \"Could not load member '{0}' from module '{1}': {2}\".format(member_name, module_name,\n str(exc)))", "def _import_module(name):\n __import__(name)\n return sys.modules[name]", "def _import_module(name):\n __import__(name)\n return sys.modules[name]", "def get_module(self, name: str) -> ModuleInstance:\n return self.modules[name]", "def _load(self):\n # Import the target module and insert it into the parent's namespace\n module = importlib.import_module(self.__name__)\n self._parent_module_globals[self._local_name] = module\n\n # Emit a warning if one was specified\n if self._warning:\n logging.warning(self._warning)\n # Make sure to only warn once.\n self._warning = None\n\n # Update this object's dict so that if someone keeps a reference to the\n # LazyLoader, lookups are efficient (__getattr__ is only called on lookups\n # that fail).\n self.__dict__.update(module.__dict__)\n\n return module", "def get_compss_module(self):\n return self.compss_module" ]
[ "0.6648748", "0.64653724", "0.64607435", "0.6405533", "0.63600814", "0.6347188", "0.63392264", "0.63091576", "0.625608", "0.6237327", "0.61681557", "0.6154078", "0.6154078", "0.6154078", "0.6154078", "0.6154078", "0.6122566", "0.6057319", "0.6050425", "0.6026159", "0.6026159", "0.6026159", "0.60214496", "0.6015974", "0.5982699", "0.596684", "0.596684", "0.59576523", "0.5933951", "0.5933264" ]
0.7351052
0
CPU mnist test for TF Training Instance Type c5.4xlarge Given above parameters, registers a task with family named after this test, runs the task, and waits for the task to be stopped before doing teardown operations of instance and cluster.
def test_ecs_tensorflow_training_mnist_cpu(cpu_only, ecs_container_instance, tensorflow_training, training_cmd, ecs_cluster_name): instance_id, cluster_arn = ecs_container_instance ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def run_sm_perf_test(image_uri, num_nodes, region):\n _, framework_version = get_framework_and_version_from_tag(image_uri)\n if framework_version.startswith(\"1.\"):\n pytest.skip(\"Skipping benchmark test on TF 1.x images.\")\n\n processor = \"gpu\" if \"gpu\" in image_uri else \"cpu\"\n device_cuda_str = (\n f\"{processor}-{get_cuda_version_from_tag(image_uri)}\" if processor == \"gpu\" else processor\n )\n\n ec2_instance_type = \"p3.16xlarge\" if processor == \"gpu\" else \"c5.18xlarge\"\n\n py_version = \"py2\" if \"py2\" in image_uri else \"py37\" if \"py37\" in image_uri else \"py3\"\n\n time_str = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n commit_info = os.getenv(\"CODEBUILD_RESOLVED_SOURCE_VERSION\")\n target_upload_location = os.path.join(\n BENCHMARK_RESULTS_S3_BUCKET,\n \"tensorflow\",\n framework_version,\n \"sagemaker\",\n \"training\",\n device_cuda_str,\n py_version,\n )\n training_job_name = f\"tf{framework_version[0]}-tr-bench-{device_cuda_str}-{num_nodes}-node-{py_version}-{commit_info[:7]}-{time_str}\"\n\n # Inserting random sleep because this test starts multiple training jobs around the same time, resulting in\n # a throttling error for SageMaker APIs.\n time.sleep(Random(x=training_job_name).random() * 60)\n\n test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"resources\")\n venv_dir = os.path.join(test_dir, \"sm_benchmark_venv\")\n\n ctx = Context()\n\n with ctx.cd(test_dir), ctx.prefix(f\"source {venv_dir}/bin/activate\"):\n log_file = f\"results-{commit_info}-{time_str}-{framework_version}-{device_cuda_str}-{py_version}-{num_nodes}-node.txt\"\n run_out = ctx.run(\n f\"timeout 45m python tf_sm_benchmark.py \"\n f\"--framework-version {framework_version} \"\n f\"--image-uri {image_uri} \"\n f\"--instance-type ml.{ec2_instance_type} \"\n f\"--node-count {num_nodes} \"\n f\"--python {py_version} \"\n f\"--region {region} \"\n f\"--job-name {training_job_name}\"\n f\"2>&1 | tee {log_file}\",\n warn=True,\n echo=True,\n )\n\n if not (run_out.ok or run_out.return_code == 124):\n target_upload_location = os.path.join(target_upload_location, \"failure_log\")\n\n ctx.run(\n f\"aws s3 cp {os.path.join(test_dir, log_file)} {os.path.join(target_upload_location, log_file)}\"\n )\n\n LOGGER.info(f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\")\n\n result_statement, throughput = _print_results_of_test(\n os.path.join(test_dir, log_file), processor\n )\n throughput /= num_nodes\n\n assert run_out.ok, (\n f\"Benchmark Test failed with return code {run_out.return_code}. \"\n f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\"\n )\n\n threshold_table = (\n (\n TENSORFLOW_SM_TRAINING_CPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_CPU_4NODE_THRESHOLD\n )\n if processor == \"cpu\"\n else TENSORFLOW_SM_TRAINING_GPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_GPU_4NODE_THRESHOLD\n )\n threshold = get_threshold_for_image(framework_version, threshold_table)\n LOGGER.info(\n f\"tensorflow {framework_version} sagemaker training {device_cuda_str} {py_version} \"\n f\"imagenet {num_nodes} nodes Throughput: {throughput} images/sec, threshold: {threshold} images/sec\"\n )\n assert throughput > threshold, (\n f\"tensorflow {framework_version} sagemaker training {processor} {py_version} imagenet {num_nodes} nodes \"\n f\"Benchmark Result {throughput} does not reach the threshold {threshold}\"\n )", "def test_ecs_tensorflow_training_mnist_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n num_gpus = ec2_utils.get_instance_num_gpus(instance_id)\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id,\n num_gpus=num_gpus)", "def meta_train(tasks, model, args, device, method='random', meta_iters=10000, num_updates=5, meta_batch_size=5):\n # Define logging\n os.makedirs(args.save_path, exist_ok=True)\n writer = SummaryWriter(\n os.path.join(args.save_path, 'runs', '{}'.format(datetime.now()).replace(\":\", \"_\")))\n\n header = ' Time Task Iteration Loss Accuracy'\n log_template = '{:>10} {:>25} {:10.0f} {:10.6f} {:10.6f}'\n test_template = 'Test mean: {}, Test std: {}'\n\n print(header)\n start = time.time()\n\n # Define optimizers, lr schedulers and loss function\n optimizer_bert = AdamW(params=model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n optimizer = optim.Adam(params=chain(model.proto_net.encoder.mlp.parameters(),\n model.output_layer.parameters()),\n lr=args.lr)\n scheduler_bert = get_cosine_schedule_with_warmup(optimizer_bert, 200, meta_iters)\n scheduler = get_cosine_schedule_with_warmup(optimizer, 0, meta_iters)\n # ProtoNets always have CrossEntropy loss due to softmax output\n cross_entropy = nn.CrossEntropyLoss()\n\n print('Loading Tokenizer..')\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n special_tokens_dict = {'additional_special_tokens': [\"[MNT]\", \"[URL]\"]}\n\n num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\n print('We have added', num_added_toks, 'tokens')\n model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.\n\n # setup task sampler and task model\n sampler = TaskSampler(tasks, method=method, custom_task_ratio=args.custom_task_ratio, supp_query_split=True)\n task_model = type(model)(args)\n task_model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n\n iterations = 0\n # Iterate over the data\n train_iter = sampler.get_iter('train', tokenizer, batch_size=args.batch_size, shuffle=True)\n model.train()\n\n # setup validation task and episodes for evaluation\n val_task = get_validation_task(args)\n episodes = torch.load(args.episodes)\n\n # dummy data to overwrite old values of task model output layer\n dummy_w = torch.randn((args.mlp_dims[-1], 2))\n dummy_b = torch.randn(2)\n\n average_query_loss = 0\n best_query_loss = 1e+9\n best_test_mean = -1\n best_test_last = -1\n convergence_tolerance_cnt = 0\n # outer loop (meta-iterations)\n for i in range(meta_iters):\n grads = []\n task_losses_inner = {}\n task_accuracies_inner = {}\n task_losses_outer = {}\n task_accuracies_outer = {}\n # inner loop (sample different tasks)\n for task_sample in range(meta_batch_size):\n # clone original model\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n task_model.to(device)\n task_model.train()\n\n # new optimizer for every new task model\n task_optimizer_bert = optim.SGD(params=task_model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n task_optimizer = optim.SGD(params=chain(task_model.proto_net.encoder.mlp.parameters(),\n task_model.output_layer.parameters()),\n lr=args.inner_lr)\n\n # prepare support and query set\n batch = next(train_iter)\n support = batch[:3]\n query = batch[3:]\n\n # setup output layer (via meta-model's prototype network)\n proto_embeddings = model.proto_net(support[0].to(device), attention_mask=support[2].to(device))\n prototypes = model.proto_net.calculate_centroids((proto_embeddings, support[1]), sampler.get_num_classes())\n W, b = task_model.calculate_output_params(prototypes.detach())\n task_model.initialize_classifier(W, b)\n\n # train some iterations on support set\n for update in range(num_updates):\n task_optimizer_bert.zero_grad()\n task_optimizer.zero_grad()\n predictions = task_model(support[0].to(device), attention_mask=support[2].to(device))\n task_loss = cross_entropy(predictions, support[1].long().squeeze().to(device))\n task_loss.backward()\n task_optimizer.step()\n task_optimizer_bert.step()\n\n # record task losses and accuracies for logging\n task_losses_inner[sampler.get_name()] = task_loss.item()\n task_accuracies_inner[sampler.get_name()] = sampler.calculate_accuracy(predictions, support[1].to(device))\n\n # trick to add prototypes back to computation graph\n W = 2 * prototypes + (W - 2 * prototypes).detach()\n b = -prototypes.norm(dim=1)**2 + (b + prototypes.norm(dim=1)**2).detach()\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # calculate gradients for meta update on the query set\n predictions = task_model(query[0].to(device), attention_mask=query[2].to(device))\n query_loss = cross_entropy(predictions, query[1].long().squeeze().to(device))\n query_loss.backward()\n\n # record task losses and accuracies for logging\n task_losses_outer[sampler.get_name()] = query_loss.item()\n task_accuracies_outer[sampler.get_name()] = sampler.calculate_accuracy(predictions, query[1].to(device))\n average_query_loss += query_loss.item()\n\n # register W and b parameters again to avoid error in weight update\n W = nn.Parameter(W)\n b = nn.Parameter(b)\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # save gradients of first task model\n if task_sample == 0:\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads.append(param.grad.clone())\n # add the gradients of all task samples\n else:\n p = 0\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads[p] += param.grad.clone()\n p += 1\n\n # perform meta update\n # first load/add the calculated gradients in the meta-model\n # (already contains gradients from prototype calculation)\n p = 0\n for param in model.parameters():\n if param.requires_grad and param.grad is not None:\n param.grad += grads[p]\n p += 1\n # update model parameters according to the gradients from inner loop (clear gradients afterwards)\n optimizer.step()\n optimizer_bert.step()\n scheduler.step()\n scheduler_bert.step()\n optimizer.zero_grad()\n optimizer_bert.zero_grad()\n\n iterations += 1\n if iterations % args.log_every == 0:\n average_query_loss /= (args.log_every*meta_batch_size)\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n iter_acc = sum(task_accuracies_outer.values()) / len(task_accuracies_outer.values())\n writer.add_scalar('Meta_Average/Loss/outer'.format(sampler.get_name()), iter_loss, iterations)\n writer.add_scalar('Meta_Average/Accuracy/outer'.format(sampler.get_name()), iter_acc, iterations)\n for t in tasks:\n task_name = t.get_name()\n if task_name in task_losses_inner.keys():\n writer.add_scalar('{}/Loss/inner'.format(task_name), task_losses_inner[task_name], iterations)\n writer.add_scalar('{}/Accuracy/inner'.format(task_name), task_accuracies_inner[task_name], iterations)\n writer.add_scalar('{}/Loss/outer'.format(task_name), task_losses_outer[task_name], iterations)\n writer.add_scalar('{}/Accuracy/outer'.format(task_name), task_accuracies_outer[task_name], iterations)\n print(log_template.format(\n str(timedelta(seconds=int(time.time() - start))),\n sampler.get_name(),\n iterations,\n iter_loss,\n iter_acc))\n\n # save best snapshot\n if average_query_loss < best_query_loss:\n best_query_loss = average_query_loss\n average_query_loss = 0\n snapshot_prefix = os.path.join(args.save_path, 'best_query')\n snapshot_path = (\n snapshot_prefix +\n '_loss_{:.5f}_iter_{}_model.pt'\n ).format(best_query_loss, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n # evaluate in k shot fashion\n if iterations % args.eval_every == 0:\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n test_mean, test_std = k_shot_testing(task_model, episodes, val_task, device, num_updates=args.inner_updates,\n num_test_batches=args.num_test_batches)\n writer.add_scalar('{}/Acc'.format(val_task.get_name()), test_mean, iterations)\n writer.add_scalar('{}/STD'.format(val_task.get_name()), test_std, iterations)\n print(test_template.format(test_mean, test_std), flush=True)\n if test_mean > best_test_mean:\n best_test_mean = test_mean\n snapshot_prefix = os.path.join(args.save_path, 'best_test_{}'.format(val_task.get_name()))\n snapshot_path = (\n snapshot_prefix +\n '_acc_{:.5f}_iter_{}_model.pt'\n ).format(best_test_mean, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n \n if test_mean > best_test_last:\n best_test_last = best_test_mean\n convergence_tolerance_cnt = 0\n else:\n convergence_tolerance_cnt += 1\n\n if convergence_tolerance_cnt == args.convergence_tolerance:\n break\n\n\n # saving redundant parameters\n # Save model checkpoints.\n if iterations % args.save_every == 0:\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n snapshot_prefix = os.path.join(args.save_path, 'snapshot')\n snapshot_path = (\n snapshot_prefix +\n '_iter_{}_loss_{}_model.pt'\n ).format(iterations, iter_loss)\n logging.debug('Saving model...')\n model.save_model(snapshot_path)\n # Keep only the last snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n writer.close()", "def run_time_operation(self, learning_option, cluster):\r\n\r\n # whether or not test procedure\r\n is_train = tf.placeholder_with_default(True, shape=())\r\n learning_option['is_train'] = is_train\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # construct API\r\n def apiConstructor():\r\n # CIFAR-10 images: [224, 224, 3]\r\n # label: [1000]\r\n def train_in():\r\n x, y = learning_option.get('train_imagenet')\r\n return x, y\r\n def test_in():\r\n x, y = learning_option.get('test_imagenet')\r\n return x, y\r\n\r\n images, labels = tf.cond(is_train, train_in, test_in)\r\n # get output dimension\r\n outdim = list(images.get_shape()[i].value for i in xrange(len(images.get_shape())))\r\n\r\n # set output\r\n self.set_output('image', images)\r\n self.set_output('label', labels)\r\n self.set_dimension('image', outdim)\r\n\r\n # set tf summary\r\n tf.summary.image(self.name, images, max_outputs=10)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def generate_mnist_datasets(\n datapoints_per_task,\n K_list,\n cir_inner_loop_list, \n test_task_idx, \n val_task_idx,\n n_finetune_sets):\n\n # arbitrarily chosen, class-imbalance rate in outer and inner training loops\n cir_outer_loop = 0.5\n cir_inner_loop = 0.5\n # class-imbalance rate in the test sets of the test and validation tasks\n cir_test = 0.5\n # arbitrarily chosen, percentage of data that will be used in the inner training loop\n percent_data_inner_loop = 0.5\n\n percent_data_finetune_val = 0.8\n\n n_test_set = 4000\n\n test_task_idx, val_task_idx = test_task_idx, val_task_idx\n\n finetune_sets_per_K_cir = {}\n test_task_test_set, val_task = {}, {}\n \n\n train_task_list_inner, train_task_list_outer = [], []\n\n train_tasks_idxs = [i for i in range(0,10) if i not in [val_task_idx, test_task_idx]]\n\n base_path = '/home/USER/Documents'\n if (not (os.path.exists(base_path))):\n base_path = '/home/ubuntu/Projects'\n train_images, train_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/train-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/train-labels-idx1-ubyte')\n\n test_images, test_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/t10k-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/t10k-labels-idx1-ubyte')\n\n\n train_images, test_images = train_images.reshape((-1,28,28))/255.0, test_images.reshape((-1,28,28))/255.0\n images = np.concatenate((train_images, test_images))\n labels = np.concatenate((train_labels, test_labels))\n\n test_task_normal_indexes, val_task_normal_indexes = list(np.nonzero(labels == test_task_idx)[0]), list(np.nonzero(train_labels == val_task_idx)[0])\n test_task_X_normal, val_task_X_normal = images[test_task_normal_indexes],train_images[val_task_normal_indexes]\n test_task_Y_normal, val_task_Y_normal = np.zeros_like(labels[test_task_normal_indexes]), np.zeros_like(train_labels[val_task_normal_indexes])\n\n\n # val and test task have anomalies (samples of other numbers) that are not used for training\n # besides the two sets of anomalies (one for val task and one for test task are disjoint)\n test_task_anomalous_indexes = list(np.nonzero(test_labels[:5000] != test_task_idx)[0])\n val_task_anomalous_indexes= [index for index, element in enumerate(list(test_labels[5000:])) if element not in [val_task_idx, test_task_idx]]\n\n\n test_task_X_anomalous, val_task_X_anomalous = test_images[:5000][test_task_anomalous_indexes],test_images[5000:][val_task_anomalous_indexes]\n test_task_Y_anomalous, val_task_Y_anomalous = np.ones_like(test_labels[:5000][test_task_anomalous_indexes]), np.ones_like(test_labels[5000:][val_task_anomalous_indexes])\n\n test_task_X, val_task_X = np.concatenate((test_task_X_normal, test_task_X_anomalous)), np.concatenate((val_task_X_normal, val_task_X_anomalous))\n test_task_Y, val_task_Y = np.expand_dims(np.concatenate((test_task_Y_normal, test_task_Y_anomalous)),-1), np.expand_dims(np.concatenate((val_task_Y_normal, val_task_Y_anomalous)),-1)\n\n\n train_tasks_X_list, train_tasks_Y_list = [], []\n for task_idx in train_tasks_idxs:\n train_task_normal_indexes = list(np.nonzero(train_labels == task_idx)[0]) \n train_task_anomalous_indexes = [index for index, element in enumerate(list(train_labels)) if element not in [task_idx, val_task_idx, test_task_idx]]\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == val_task_idx)[0]) == 0)\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == test_task_idx)[0]) == 0)\n train_task_X_normal, train_task_X_anomalous = train_images[train_task_normal_indexes], train_images[train_task_anomalous_indexes]\n train_task_Y_normal, train_task_Y_anomalous = np.zeros_like(train_labels[train_task_normal_indexes]), np.ones_like(train_labels[train_task_anomalous_indexes])\n train_task_X, train_task_Y = np.concatenate((train_task_X_normal, train_task_X_anomalous)), np.concatenate((train_task_Y_normal, train_task_Y_anomalous))\n train_tasks_X_list.append(train_task_X)\n train_tasks_Y_list.append(np.expand_dims(train_task_Y,-1))\n\n\n\n # building test task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(test_task_Y == 0)[0]), list(np.nonzero(test_task_Y == 1)[0])\n n_test_set_normal = int(n_test_set*cir_test)\n test_set_normal_indexes = random.sample(normal_indexes, n_test_set_normal)\n test_set_anomaly_indexes = random.sample(anomaly_indexes, n_test_set - n_test_set_normal)\n test_set_indexes = []\n test_set_indexes += test_set_normal_indexes\n test_set_indexes += test_set_anomaly_indexes\n\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_X[test_set_indexes], test_task_Y[test_set_indexes]\n\n\n #shuffle\n s_test = np.arange(test_task_test_set['test_X'].shape[0])\n np.random.shuffle(s_test)\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_test_set['test_X'][s_test], test_task_test_set['test_Y'][s_test]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n\n\n for K in K_list:\n finetune_sets_per_cir = {}\n for cir in cir_inner_loop_list:\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n \n finetune_sets_list = []\n\n disjoint = False\n if(cir*K*n_finetune_sets<len(rest_normal_indexes)):\n disjoint = True\n\n n_finetune_normal = int(K*cir)\n n_finetune_anomaly = K - n_finetune_normal\n for i in range(n_finetune_sets):\n # if enough for disjoint do that\n # else sample randomly\n # store in a dict with keys cir_K\n finetune_normal_indexes = random.sample(rest_normal_indexes, n_finetune_normal)\n finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_finetune_anomaly)\n finetune_indexes = []\n finetune_indexes += finetune_normal_indexes\n finetune_indexes += finetune_anomaly_indexes\n finetune_set = {}\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = test_task_X[finetune_indexes], test_task_Y[finetune_indexes]\n\n #shuffle\n s_finetune = np.arange(finetune_set['finetune_X'].shape[0])\n np.random.shuffle(s_finetune)\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = finetune_set['finetune_X'][s_finetune], finetune_set['finetune_Y'][s_finetune]\n\n finetune_sets_list.append(finetune_set)\n \n if(disjoint):\n rest_normal_indexes = [index for index in rest_normal_indexes if index not in finetune_normal_indexes]\n rest_anomaly_indexes = [index for index in rest_anomaly_indexes if index not in finetune_anomaly_indexes]\n\n finetune_sets_per_cir[str(cir)] = finetune_sets_list\n\n finetune_sets_per_K_cir[str(K)] = finetune_sets_per_cir\n\n\n #building val task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(val_task_Y == 0)[0]), list(np.nonzero(val_task_Y == 1)[0])\n n_val_finetune = int(percent_data_finetune_val*datapoints_per_task)\n n_val_test_set = datapoints_per_task - n_val_finetune\n n_val_test_set_normal = int(n_val_test_set*cir_test)\n val_test_set_normal_indexes = random.sample(normal_indexes, n_val_test_set_normal)\n\n\n val_test_set_anomaly_indexes = random.sample(anomaly_indexes, n_val_test_set - n_val_test_set_normal)\n val_test_set_indexes = []\n val_test_set_indexes += val_test_set_normal_indexes\n val_test_set_indexes += val_test_set_anomaly_indexes\n val_task['test_X'], val_task['test_Y'] = val_task_X[val_test_set_indexes], val_task_Y[val_test_set_indexes]\n\n\n rest_normal_indexes = [index for index in normal_indexes if index not in val_test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in val_test_set_anomaly_indexes]\n\n n_val_finetune_normal = int(n_val_finetune*cir_inner_loop)\n val_finetune_normal_indexes = random.sample(rest_normal_indexes, n_val_finetune_normal)\n val_finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_val_finetune - n_val_finetune_normal)\n val_finetune_indexes = []\n val_finetune_indexes += val_finetune_normal_indexes\n val_finetune_indexes += val_finetune_anomaly_indexes\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task_X[val_finetune_indexes], val_task_Y[val_finetune_indexes]\n\n #shuffle\n s_val_finetune = np.arange(val_task['finetune_X'].shape[0])\n s_val_test = np.arange(val_task['test_X'].shape[0])\n np.random.shuffle(s_val_finetune)\n np.random.shuffle(s_val_test)\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task['finetune_X'][s_val_finetune], val_task['finetune_Y'][s_val_finetune]\n val_task['test_X'], val_task['test_Y'] = val_task['test_X'][s_val_test], val_task['test_Y'][s_val_test]\n\n\n\n # building sets of data of the training tasks\n for task_X, task_Y in zip(train_tasks_X_list, train_tasks_Y_list):\n normal_indexes, anomaly_indexes = list(np.nonzero(task_Y == 0)[0]), list(np.nonzero(task_Y == 1)[0])\n\n n_inner_loop = int(percent_data_inner_loop*datapoints_per_task)\n n_inner_loop_normal = int(n_inner_loop*cir_inner_loop)\n n_outer_loop = datapoints_per_task - n_inner_loop\n n_outer_loop_normal = int(n_outer_loop*cir_outer_loop)\n \n inner_loop_normal_indexes = random.sample(normal_indexes, n_inner_loop_normal)\n inner_loop_anomaly_indexes = random.sample(anomaly_indexes, n_inner_loop - n_inner_loop_normal)\n inner_loop_indexes = []\n inner_loop_indexes += inner_loop_normal_indexes\n inner_loop_indexes += inner_loop_anomaly_indexes\n\n train_task_inner_X, train_task_inner_Y = task_X[inner_loop_indexes], task_Y[inner_loop_indexes]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in inner_loop_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in inner_loop_anomaly_indexes]\n\n \n outer_loop_normal_indexes = random.sample(rest_normal_indexes, n_outer_loop_normal)\n outer_loop_anomaly_indexes = random.sample(rest_anomaly_indexes, n_outer_loop - n_outer_loop_normal)\n outer_loop_indexes = []\n outer_loop_indexes += outer_loop_normal_indexes\n outer_loop_indexes += outer_loop_anomaly_indexes\n\n train_task_outer_X, train_task_outer_Y = task_X[outer_loop_indexes], task_Y[outer_loop_indexes]\n\n\n s_inner = np.arange(train_task_inner_X.shape[0])\n s_outer = np.arange(train_task_outer_X.shape[0])\n np.random.shuffle(s_inner)\n np.random.shuffle(s_outer)\n train_task_list_inner.append([train_task_inner_X[s_inner],train_task_inner_Y[s_inner]])\n train_task_list_outer.append([train_task_outer_X[s_outer],train_task_outer_Y[s_outer]])\n\n\n\n train_tasks_inner_X = np.stack([train_task_list_inner[i][0]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_inner_Y = np.stack([train_task_list_inner[i][1]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_outer_X = np.stack([train_task_list_outer[i][0]\n for i in range(len(train_task_list_outer))], 0)\n train_tasks_outer_Y = np.stack([train_task_list_outer[i][1]\n for i in range(len(train_task_list_outer))], 0)\n\n \n train_tasks = {'X_train_inner': train_tasks_inner_X,\n 'Y_train_inner': train_tasks_inner_Y,\n 'X_train_outer': train_tasks_outer_X,\n 'Y_train_outer': train_tasks_outer_Y\n }\n\n\n return train_tasks, val_task, test_task_test_set, finetune_sets_per_K_cir", "def main(params):\n params = run_train.prepare_experiment_folder(params, FOLDER_EXPERIMENT)\n\n # run_train.check_pathes_patterns(paths)\n tl_expt.set_experiment_logger(params['path_expt'])\n logging.info('COMPUTER: \\n%r', platform.uname())\n logging.info(tl_expt.string_dict(params, desc='PARAMETERS'))\n\n tl_expt.create_subfolders(params['path_expt'], LIST_SUBFOLDER)\n\n path_csv = os.path.join(params['path_expt'], NAME_CSV_TRIPLES)\n df_paths = get_csv_triplets(\n params['path_list'], path_csv, params['path_images'], params['path_segms'], force_reload=FORCE_RERUN\n )\n\n dict_classif = seg_clf.load_classifier(params['path_classif'])\n params_clf = dict_classif['params']\n params_clf.update(params)\n logging.info(tl_expt.string_dict(params, desc='UPDATED PARAMETERS'))\n\n # perform on new images\n df_stat = pd.DataFrame()\n _wrapper_detection = partial(\n load_compute_detect_centers,\n params=params_clf,\n path_classif=params['path_classif'],\n path_output=params['path_expt'],\n )\n iterate = tl_expt.WrapExecuteSequence(_wrapper_detection, df_paths.iterrows(), nb_workers=params['nb_workers'])\n for dict_center in iterate:\n df_stat = df_stat.append(dict_center, ignore_index=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES_TEMP))\n\n df_stat.set_index(['image'], inplace=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES))\n logging.info('STATISTIC: \\n %r', df_stat.describe())", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def train_distributed():\n # Distributed stuff learnt from this repo: https://github.com/GoogleCloudPlatform/cloudml-dist-\n # mnist-example/blob/master/trainer/task.py\n\n # For Distributed TensorFlow\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_info = env.get('cluster')\n cluster_spec = tf.train.ClusterSpec(cluster_info)\n task_info = env.get('task')\n job_name, task_index = task_info['type'], task_info['index']\n\n device_fn = tf.train.replica_device_setter(\n cluster=cluster_spec,\n worker_device='/job:%s/task:%d' % (job_name, task_index))\n\n print(\"Start job:%s, index:%d\" % (job_name, task_index))\n\n server = tf.train.Server(cluster_spec,\n job_name=job_name, task_index=task_index)\n\n # Start a parameter server node\n if job_name == 'ps':\n server.join()\n\n # Start a master/worker node\n if job_name == 'master' or job_name == 'worker':\n is_chief = (job_name == 'master')\n\n with tf.Graph().as_default() as graph: # TODO necessary?\n with tf.device(device_fn):\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create the model\n print(\"(%s,%d) Creating %d layers of %d units.\" %\n (job_name, task_index, FLAGS.num_layers, FLAGS.size))\n model = create_model(False)\n\n # Create train_dir\n if is_chief:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n\n # TensorBoard summaries\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n # Create supervisor\n init_op = tf.global_variables_initializer()\n\n # Create Supervisor. Disabling checkpoints and summaries, because we do that manually\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op,\n init_fn=lambda session: after_init(session, model, embeddings_file),\n saver=model.saver, global_step=model.global_step,\n save_model_secs=0, save_summaries_secs=0, summary_op=None,\n summary_writer=None)\n\n with sv.managed_session(server.target) as sess:\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders,\n is_chief, job_name, task_index, sv.should_stop)\n sv.stop()", "def run_test(filepath):\n num_class = 120 # dogbreeds class\n model = Resnet50MO(num_class, checkpoint_path=None)\n\n # image settings\n crop_size = model.input_size\n scale_size = model.input_size\n input_size = model.input_size\n input_mean = model.input_mean\n input_std = model.input_std\n\n # hyperparams settings\n epochs = 1\n batch_size = 32 # mini-batch-size\n learning_rate = 0.01\n momentum = 0.5\n decay_factor = 10\n eval_freq = 5 # in epochs\n\n # data generator settings: dataset and dataloader\n train_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n val_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n \n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)\n\n # Loss and backprop settings\n model.cuda()\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=learning_rate,\n momentum=momentum\n )\n\n run_model_train_test(model, train_loader, criterion, optimizer)", "def run_universal_demo(args, use_gpu: bool = True) -> None:\n if \"scannet\" in args.dataset:\n args.img_name_unique = False\n else:\n args.img_name_unique = True\n\n args.u_classes = names_utils.get_universal_class_names()\n args.print_freq = 10\n\n args.split = \"test\"\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.test_gpu)\n logger.info(args)\n logger.info(\"=> creating model ...\")\n args.num_model_classes = len(args.u_classes)\n\n itask = InferenceTask(\n args,\n base_size=args.base_size,\n crop_h=args.test_h,\n crop_w=args.test_w,\n input_file=args.input_file,\n model_taxonomy=\"universal\",\n eval_taxonomy=\"universal\",\n scales=args.scales,\n )\n itask.execute()", "def backend_train_test_loop(e=None, timeout=30,\n is_compute_contributivity='True',\n is_parallelize=''):\n if is_parallelize == '':\n is_parallelize = None\n else:\n is_parallelize = strtobool(is_parallelize)\n\n from databoard.db_tools import backend_train_test_loop\n is_compute_contributivity = strtobool(is_compute_contributivity)\n backend_train_test_loop(\n e, timeout, is_compute_contributivity, is_parallelize)", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def run_task(data_dir, task_id):\n print(\"Train and test for task %d ...\" % task_id)\n\n print(\"We are going to use this\")\n \n\n # Parse data\n train_files = glob.glob('%s/qa3_*_train.txt' % (data_dir, task_id))\n test_files = glob.glob('%s/qa3_*_test.txt' % (data_dir, task_id))\n\n dictionary = {\"nil\": 0}\n train_story, train_questions, train_qstory = parse_babi_task(train_files, dictionary, False)\n test_story, test_questions, test_qstory = parse_babi_task(test_files, dictionary, False)\n\n general_config = BabiConfig(train_story, train_questions, dictionary)\n\n\n # #### R: this line build a empty model to train\n # memory, model, loss = build_model(general_config)\n\n # if general_config.linear_start:\n # train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n # else:\n # train(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n\n\n\n # memory, model, loss = build_model(general_config)\n\n # this line\n test(test_story, test_questions, test_qstory, memory, model, loss, general_config)", "def main():\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()", "def test_multitask_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def test_ecs_tensorflow_training_fasterrcnn_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n num_gpus = ec2_utils.get_instance_num_gpus(instance_id)\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id,\n num_gpus=num_gpus)", "def test_multitask(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc", "def do_testing(gpu=0):\n # expected environment variables\n os.environ[\"BERT_BASE_DIR\"] = \"pretrained/cased_L-12_H-768_A-12\"\n os.environ[\"DATA_DIR\"] = \"dataset\"\n os.environ[\"OUTPUT_DIR\"] = \"output\"\n assert os.environ.get(\"BERT_BASE_DIR\") is not None\n assert os.environ.get(\"DATA_DIR\") is not None\n assert os.environ.get(\"OUTPUT_DIR\") is not None\n\n # set the gpu index\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu)\n # set the required flags\n FLAGS.task_name = \"topic\"\n FLAGS.do_predict = True\n FLAGS.data_dir = os.environ.get(\"DATA_DIR\")\n FLAGS.vocab_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"vocab.txt\")\n FLAGS.bert_config_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_config.json\")\n FLAGS.init_checkpoint = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_model.ckpt\")\n FLAGS.do_lower_case = False\n FLAGS.max_seq_length = 128\n FLAGS.output_dir = os.environ.get(\"OUTPUT_DIR\")\n\n run_classifier.main(0)", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def run_mnist(flags_obj):\n model_function = model_fn\n config = tf.estimator.RunConfig(protocol='grpc+verbs',\n save_checkpoints_secs=300,\n save_summary_steps=200,\n log_step_count_steps=200)\n data_format = flags_obj.data_format\n if data_format is None:\n data_format = ('channels_first'\n if tf.test.is_built_with_cuda() else 'channels_last')\n mnist_classifier = tf.estimator.Estimator(\n model_fn=model_function,\n model_dir=flags_obj.model_dir,\n config=config,\n params={\n 'data_format': data_format,\n })\n\n # Set up training and evaluation input functions.\n def train_input_fn():\n \"\"\"Prepare data for training.\"\"\"\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes use less memory. MNIST is a small\n # enough dataset that we can easily shuffle the full epoch.\n ds = dtrain(flags_obj.data_dir)\n ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)\n\n # Iterate through the dataset a set number (`epochs_between_evals`) of times\n # during each training session.\n ds = ds.repeat()\n return ds\n\n def eval_input_fn():\n return dtest(flags_obj.data_dir).batch(\n 100).make_one_shot_iterator().get_next()\n\n \n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=flags_obj.train_steps)\n eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,throttle_secs=300)\n tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)\n \n '''# Train and evaluate model.\n for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):\n mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\n print('\\nEvaluation results:\\n\\t%s\\n' % eval_results)\n\n if model_helpers.past_stop_threshold(flags_obj.stop_threshold,\n eval_results['accuracy']):\n break\n '''\n # Export the model\n if flags_obj.export_dir is not None:\n image = tf.placeholder(tf.float32, [None, 28, 28])\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'image': image,\n })\n mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn)", "def test_image_task_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def main(_):\n\n params = create_params()\n\n assert params[\"train_dataset_path\"]\n assert params[\"eval_dataset_path\"]\n\n input_fn = input_fn_from_files(\n params[\"train_dataset_path\"])\n eval_input_fn = input_fn_from_files(\n params[\"eval_dataset_path\"])\n\n feature_columns = create_feature_columns(params)\n\n model_fn = create_model_fn(feature_columns)\n estimator = create_tpu_estimator(model_fn, feature_columns, params)\n\n for cycle_index in range(params[\"train_epochs\"]):\n tf.logging.info(\"Starting a training cycle: {}/{}\".format(\n cycle_index + 1, params[\"train_epochs\"]))\n estimator.train(input_fn=input_fn, steps=params[\"steps_per_epoch\"])\n tf.logging.info(\"Beginning evaluation.\")\n eval_results = estimator.evaluate(eval_input_fn,\n steps=params[\"num_eval_steps\"])\n tf.logging.info(\"Evaluation complete.\")\n\n recall_1 = float(eval_results[\"recall@1\"])\n recall_5 = float(eval_results[\"recall@5\"])\n loss = float(eval_results[\"loss\"])\n tf.logging.info(\n \"Iteration {}: recall@1 = {:.4f}, recall@5 = {:.4f}, Loss = {:.4f}\"\n .format(cycle_index + 1, recall_1, recall_5, loss))", "def task():\n\n\tprint('Example task executed.')", "def run_all_tasks(data_dir):\n print(\"Training and testing for all tasks ...\")\n for t in range(20):\n run_task(data_dir, task_id=t + 1)" ]
[ "0.67484915", "0.63618076", "0.60620654", "0.5980725", "0.5914939", "0.59113973", "0.59051156", "0.58966136", "0.5866718", "0.58654135", "0.5864061", "0.58606243", "0.5860597", "0.58368456", "0.58136004", "0.5806316", "0.57960474", "0.5792729", "0.57621115", "0.5742489", "0.57239044", "0.569961", "0.5672451", "0.5668706", "0.5661857", "0.56564087", "0.5644287", "0.5631706", "0.5630299", "0.5612329" ]
0.6672013
1
GPU mnist test for TF Training Instance Type p3.2xlarge Given above parameters, registers a task with family named after this test, runs the task, and waits for the task to be stopped before doing teardown operations of instance and cluster.
def test_ecs_tensorflow_training_mnist_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd, ecs_cluster_name): instance_id, cluster_arn = ecs_container_instance num_gpus = ec2_utils.get_instance_num_gpus(instance_id) ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id, num_gpus=num_gpus)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def test_ecs_tensorflow_training_mnist_cpu(cpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id)", "def test_ecs_tensorflow_training_fasterrcnn_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n num_gpus = ec2_utils.get_instance_num_gpus(instance_id)\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id,\n num_gpus=num_gpus)", "def run_sm_perf_test(image_uri, num_nodes, region):\n _, framework_version = get_framework_and_version_from_tag(image_uri)\n if framework_version.startswith(\"1.\"):\n pytest.skip(\"Skipping benchmark test on TF 1.x images.\")\n\n processor = \"gpu\" if \"gpu\" in image_uri else \"cpu\"\n device_cuda_str = (\n f\"{processor}-{get_cuda_version_from_tag(image_uri)}\" if processor == \"gpu\" else processor\n )\n\n ec2_instance_type = \"p3.16xlarge\" if processor == \"gpu\" else \"c5.18xlarge\"\n\n py_version = \"py2\" if \"py2\" in image_uri else \"py37\" if \"py37\" in image_uri else \"py3\"\n\n time_str = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n commit_info = os.getenv(\"CODEBUILD_RESOLVED_SOURCE_VERSION\")\n target_upload_location = os.path.join(\n BENCHMARK_RESULTS_S3_BUCKET,\n \"tensorflow\",\n framework_version,\n \"sagemaker\",\n \"training\",\n device_cuda_str,\n py_version,\n )\n training_job_name = f\"tf{framework_version[0]}-tr-bench-{device_cuda_str}-{num_nodes}-node-{py_version}-{commit_info[:7]}-{time_str}\"\n\n # Inserting random sleep because this test starts multiple training jobs around the same time, resulting in\n # a throttling error for SageMaker APIs.\n time.sleep(Random(x=training_job_name).random() * 60)\n\n test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"resources\")\n venv_dir = os.path.join(test_dir, \"sm_benchmark_venv\")\n\n ctx = Context()\n\n with ctx.cd(test_dir), ctx.prefix(f\"source {venv_dir}/bin/activate\"):\n log_file = f\"results-{commit_info}-{time_str}-{framework_version}-{device_cuda_str}-{py_version}-{num_nodes}-node.txt\"\n run_out = ctx.run(\n f\"timeout 45m python tf_sm_benchmark.py \"\n f\"--framework-version {framework_version} \"\n f\"--image-uri {image_uri} \"\n f\"--instance-type ml.{ec2_instance_type} \"\n f\"--node-count {num_nodes} \"\n f\"--python {py_version} \"\n f\"--region {region} \"\n f\"--job-name {training_job_name}\"\n f\"2>&1 | tee {log_file}\",\n warn=True,\n echo=True,\n )\n\n if not (run_out.ok or run_out.return_code == 124):\n target_upload_location = os.path.join(target_upload_location, \"failure_log\")\n\n ctx.run(\n f\"aws s3 cp {os.path.join(test_dir, log_file)} {os.path.join(target_upload_location, log_file)}\"\n )\n\n LOGGER.info(f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\")\n\n result_statement, throughput = _print_results_of_test(\n os.path.join(test_dir, log_file), processor\n )\n throughput /= num_nodes\n\n assert run_out.ok, (\n f\"Benchmark Test failed with return code {run_out.return_code}. \"\n f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\"\n )\n\n threshold_table = (\n (\n TENSORFLOW_SM_TRAINING_CPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_CPU_4NODE_THRESHOLD\n )\n if processor == \"cpu\"\n else TENSORFLOW_SM_TRAINING_GPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_GPU_4NODE_THRESHOLD\n )\n threshold = get_threshold_for_image(framework_version, threshold_table)\n LOGGER.info(\n f\"tensorflow {framework_version} sagemaker training {device_cuda_str} {py_version} \"\n f\"imagenet {num_nodes} nodes Throughput: {throughput} images/sec, threshold: {threshold} images/sec\"\n )\n assert throughput > threshold, (\n f\"tensorflow {framework_version} sagemaker training {processor} {py_version} imagenet {num_nodes} nodes \"\n f\"Benchmark Result {throughput} does not reach the threshold {threshold}\"\n )", "def do_testing(gpu=0):\n # expected environment variables\n os.environ[\"BERT_BASE_DIR\"] = \"pretrained/cased_L-12_H-768_A-12\"\n os.environ[\"DATA_DIR\"] = \"dataset\"\n os.environ[\"OUTPUT_DIR\"] = \"output\"\n assert os.environ.get(\"BERT_BASE_DIR\") is not None\n assert os.environ.get(\"DATA_DIR\") is not None\n assert os.environ.get(\"OUTPUT_DIR\") is not None\n\n # set the gpu index\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu)\n # set the required flags\n FLAGS.task_name = \"topic\"\n FLAGS.do_predict = True\n FLAGS.data_dir = os.environ.get(\"DATA_DIR\")\n FLAGS.vocab_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"vocab.txt\")\n FLAGS.bert_config_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_config.json\")\n FLAGS.init_checkpoint = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_model.ckpt\")\n FLAGS.do_lower_case = False\n FLAGS.max_seq_length = 128\n FLAGS.output_dir = os.environ.get(\"OUTPUT_DIR\")\n\n run_classifier.main(0)", "def run_time_operation(self, learning_option, cluster):\r\n\r\n # whether or not test procedure\r\n is_train = tf.placeholder_with_default(True, shape=())\r\n learning_option['is_train'] = is_train\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # construct API\r\n def apiConstructor():\r\n # CIFAR-10 images: [224, 224, 3]\r\n # label: [1000]\r\n def train_in():\r\n x, y = learning_option.get('train_imagenet')\r\n return x, y\r\n def test_in():\r\n x, y = learning_option.get('test_imagenet')\r\n return x, y\r\n\r\n images, labels = tf.cond(is_train, train_in, test_in)\r\n # get output dimension\r\n outdim = list(images.get_shape()[i].value for i in xrange(len(images.get_shape())))\r\n\r\n # set output\r\n self.set_output('image', images)\r\n self.set_output('label', labels)\r\n self.set_dimension('image', outdim)\r\n\r\n # set tf summary\r\n tf.summary.image(self.name, images, max_outputs=10)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()", "def run_universal_demo(args, use_gpu: bool = True) -> None:\n if \"scannet\" in args.dataset:\n args.img_name_unique = False\n else:\n args.img_name_unique = True\n\n args.u_classes = names_utils.get_universal_class_names()\n args.print_freq = 10\n\n args.split = \"test\"\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.test_gpu)\n logger.info(args)\n logger.info(\"=> creating model ...\")\n args.num_model_classes = len(args.u_classes)\n\n itask = InferenceTask(\n args,\n base_size=args.base_size,\n crop_h=args.test_h,\n crop_w=args.test_w,\n input_file=args.input_file,\n model_taxonomy=\"universal\",\n eval_taxonomy=\"universal\",\n scales=args.scales,\n )\n itask.execute()", "def train_distributed():\n # Distributed stuff learnt from this repo: https://github.com/GoogleCloudPlatform/cloudml-dist-\n # mnist-example/blob/master/trainer/task.py\n\n # For Distributed TensorFlow\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_info = env.get('cluster')\n cluster_spec = tf.train.ClusterSpec(cluster_info)\n task_info = env.get('task')\n job_name, task_index = task_info['type'], task_info['index']\n\n device_fn = tf.train.replica_device_setter(\n cluster=cluster_spec,\n worker_device='/job:%s/task:%d' % (job_name, task_index))\n\n print(\"Start job:%s, index:%d\" % (job_name, task_index))\n\n server = tf.train.Server(cluster_spec,\n job_name=job_name, task_index=task_index)\n\n # Start a parameter server node\n if job_name == 'ps':\n server.join()\n\n # Start a master/worker node\n if job_name == 'master' or job_name == 'worker':\n is_chief = (job_name == 'master')\n\n with tf.Graph().as_default() as graph: # TODO necessary?\n with tf.device(device_fn):\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create the model\n print(\"(%s,%d) Creating %d layers of %d units.\" %\n (job_name, task_index, FLAGS.num_layers, FLAGS.size))\n model = create_model(False)\n\n # Create train_dir\n if is_chief:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n\n # TensorBoard summaries\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n # Create supervisor\n init_op = tf.global_variables_initializer()\n\n # Create Supervisor. Disabling checkpoints and summaries, because we do that manually\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op,\n init_fn=lambda session: after_init(session, model, embeddings_file),\n saver=model.saver, global_step=model.global_step,\n save_model_secs=0, save_summaries_secs=0, summary_op=None,\n summary_writer=None)\n\n with sv.managed_session(server.target) as sess:\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders,\n is_chief, job_name, task_index, sv.should_stop)\n sv.stop()", "def run_mnist(flags_obj):\n model_function = model_fn\n config = tf.estimator.RunConfig(protocol='grpc+verbs',\n save_checkpoints_secs=300,\n save_summary_steps=200,\n log_step_count_steps=200)\n data_format = flags_obj.data_format\n if data_format is None:\n data_format = ('channels_first'\n if tf.test.is_built_with_cuda() else 'channels_last')\n mnist_classifier = tf.estimator.Estimator(\n model_fn=model_function,\n model_dir=flags_obj.model_dir,\n config=config,\n params={\n 'data_format': data_format,\n })\n\n # Set up training and evaluation input functions.\n def train_input_fn():\n \"\"\"Prepare data for training.\"\"\"\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes use less memory. MNIST is a small\n # enough dataset that we can easily shuffle the full epoch.\n ds = dtrain(flags_obj.data_dir)\n ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)\n\n # Iterate through the dataset a set number (`epochs_between_evals`) of times\n # during each training session.\n ds = ds.repeat()\n return ds\n\n def eval_input_fn():\n return dtest(flags_obj.data_dir).batch(\n 100).make_one_shot_iterator().get_next()\n\n \n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=flags_obj.train_steps)\n eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,throttle_secs=300)\n tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)\n \n '''# Train and evaluate model.\n for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):\n mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\n print('\\nEvaluation results:\\n\\t%s\\n' % eval_results)\n\n if model_helpers.past_stop_threshold(flags_obj.stop_threshold,\n eval_results['accuracy']):\n break\n '''\n # Export the model\n if flags_obj.export_dir is not None:\n image = tf.placeholder(tf.float32, [None, 28, 28])\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'image': image,\n })\n mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn)", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def run(config_file):\n config = load_config(config_file)\n config_global = config['global']\n\n # setup a logger\n logger = logging.getLogger('experiment')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler_stdout = logging.StreamHandler(sys.stdout)\n handler_stdout.setLevel(config['logger']['level'])\n handler_stdout.setFormatter(formatter)\n logger.addHandler(handler_stdout)\n\n if 'path' in config['logger']:\n handler_file = logging.FileHandler(config['logger']['path'])\n handler_file.setLevel(config['logger']['level'])\n handler_file.setFormatter(formatter)\n logger.addHandler(handler_file)\n\n logger.setLevel(config['logger']['level'])\n\n # Allow the gpu to be used in parallel\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n if 'max_threads' in config_global:\n sess_config.intra_op_parallelism_threads = config_global['max_threads']\n\n # we allow to set the random seed in the config file for reproducibility. However, when running on GPU, results\n # will still be nondeterministic (due to nondeterministic behavior of tensorflow)\n if 'random_seed' in config_global:\n seed = config_global['random_seed']\n logger.info('Using fixed random seed'.format(seed))\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n with tf.Session(config=sess_config) as sess:\n # We are now fetching all relevant modules. It is strictly required that these module contain a variable named\n # 'component' that points to a class which inherits from experiment.Data, experiment.Experiment,\n # experiment.Trainer or experiment.Evaluator\n data_module = config['data-module']\n model_module = config['model-module']\n training_module = config['training-module']\n evaluation_module = config.get('evaluation-module', None)\n\n # The modules are now dynamically loaded\n DataClass = importlib.import_module(data_module).component\n ModelClass = importlib.import_module(model_module).component\n TrainingClass = importlib.import_module(training_module).component\n EvaluationClass = importlib.import_module(evaluation_module).component if evaluation_module else None\n\n # We then wire together all the modules and start training\n data = DataClass(config['data'], config_global, logger)\n model = ModelClass(config['model'], config_global, logger)\n training = TrainingClass(config['training'], config_global, logger)\n\n # setup the data (validate, create generators, load data, or else)\n logger.info('Setting up the data')\n data.setup()\n # build the model (e.g. compile it)\n logger.info('Building the model')\n model.build(data, sess)\n # start the training process\n logger.info('Starting the training process')\n training.start(model, data, sess)\n\n # perform evaluation, if required\n if EvaluationClass:\n logger.info('Evaluating')\n evaluation = EvaluationClass(config['evaluation'], config_global, logger)\n evaluation.start(model, data, sess)\n else:\n logger.info('No evaluation')\n\n logger.info('DONE')", "def main(params):\n params = run_train.prepare_experiment_folder(params, FOLDER_EXPERIMENT)\n\n # run_train.check_pathes_patterns(paths)\n tl_expt.set_experiment_logger(params['path_expt'])\n logging.info('COMPUTER: \\n%r', platform.uname())\n logging.info(tl_expt.string_dict(params, desc='PARAMETERS'))\n\n tl_expt.create_subfolders(params['path_expt'], LIST_SUBFOLDER)\n\n path_csv = os.path.join(params['path_expt'], NAME_CSV_TRIPLES)\n df_paths = get_csv_triplets(\n params['path_list'], path_csv, params['path_images'], params['path_segms'], force_reload=FORCE_RERUN\n )\n\n dict_classif = seg_clf.load_classifier(params['path_classif'])\n params_clf = dict_classif['params']\n params_clf.update(params)\n logging.info(tl_expt.string_dict(params, desc='UPDATED PARAMETERS'))\n\n # perform on new images\n df_stat = pd.DataFrame()\n _wrapper_detection = partial(\n load_compute_detect_centers,\n params=params_clf,\n path_classif=params['path_classif'],\n path_output=params['path_expt'],\n )\n iterate = tl_expt.WrapExecuteSequence(_wrapper_detection, df_paths.iterrows(), nb_workers=params['nb_workers'])\n for dict_center in iterate:\n df_stat = df_stat.append(dict_center, ignore_index=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES_TEMP))\n\n df_stat.set_index(['image'], inplace=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES))\n logging.info('STATISTIC: \\n %r', df_stat.describe())", "def test_multitask(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def main(_):\n\n params = create_params()\n\n assert params[\"train_dataset_path\"]\n assert params[\"eval_dataset_path\"]\n\n input_fn = input_fn_from_files(\n params[\"train_dataset_path\"])\n eval_input_fn = input_fn_from_files(\n params[\"eval_dataset_path\"])\n\n feature_columns = create_feature_columns(params)\n\n model_fn = create_model_fn(feature_columns)\n estimator = create_tpu_estimator(model_fn, feature_columns, params)\n\n for cycle_index in range(params[\"train_epochs\"]):\n tf.logging.info(\"Starting a training cycle: {}/{}\".format(\n cycle_index + 1, params[\"train_epochs\"]))\n estimator.train(input_fn=input_fn, steps=params[\"steps_per_epoch\"])\n tf.logging.info(\"Beginning evaluation.\")\n eval_results = estimator.evaluate(eval_input_fn,\n steps=params[\"num_eval_steps\"])\n tf.logging.info(\"Evaluation complete.\")\n\n recall_1 = float(eval_results[\"recall@1\"])\n recall_5 = float(eval_results[\"recall@5\"])\n loss = float(eval_results[\"loss\"])\n tf.logging.info(\n \"Iteration {}: recall@1 = {:.4f}, recall@5 = {:.4f}, Loss = {:.4f}\"\n .format(cycle_index + 1, recall_1, recall_5, loss))", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def configure_gpu_tf():\n\n try:\n # locate available devices & set required environment variables\n available_device_ids = GPUtil.getFirstAvailable(order='first', maxLoad=0.7, maxMemory=0.7, attempts=1, interval=10)\n available_device_id = available_device_ids[0]\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = str(available_device_id)\n print(f\"\\n GPU Found! running on GPU:{available_device_id}\\n\")\n\n # set GPU configuration (use all GPU memory if device 0, else use <50% of memory)\n tf.debugging.set_log_device_placement(False)\n physical_gpu = tf.config.experimental.list_physical_devices('GPU')[0]\n\n if available_device_id == 0:\n tf.config.experimental.set_memory_growth(physical_gpu, True)\n else:\n tf.config.experimental.set_virtual_device_configuration(\n physical_gpu,\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4500)]\n )\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n assert len(logical_gpus) == 1, \"error creating virtual GPU to fractionally use memory\"\n\n # if we can't find a GPU, or they are all busy, default to using CPU\n except RuntimeError:\n print(\"\\n No GPUs available... running on CPU\\n\")\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'", "def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list", "def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc", "def execute(gpu, exp_batch, exp_alias, suppress_output=True, number_of_workers=12):\n try:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(gpu)\n g_conf.VARIABLE_WEIGHT = {}\n\n # At this point the log file with the correct naming is created.\n # You merge the yaml file with the global configuration structure.\n merge_with_yaml(os.path.join('configs', exp_batch, exp_alias + '.yaml'))\n set_type_of_process('train')\n\n # Set the process into loading status.\n coil_logger.add_message('Loading', {'GPU': gpu})\n\n # Put the output to a separate file if it is the case\n if suppress_output:\n if not os.path.exists('_output_logs'):\n os.mkdir('_output_logs')\n sys.stdout = open(os.path.join('_output_logs', exp_alias + '_' +\n g_conf.PROCESS_NAME + '_' + str(os.getpid()) + \".out\"), \"a\",\n buffering=1)\n sys.stderr = open(os.path.join('_output_logs',\n exp_alias + '_err_'+g_conf.PROCESS_NAME + '_'\n + str(os.getpid()) + \".out\"),\n \"a\", buffering=1)\n\n if coil_logger.check_finish('train'):\n coil_logger.add_message('Finished', {})\n return\n\n # Preload option\n if g_conf.PRELOAD_MODEL_ALIAS is not None:\n checkpoint = torch.load(os.path.join('_logs', g_conf.PRELOAD_MODEL_BATCH,\n g_conf.PRELOAD_MODEL_ALIAS,\n 'checkpoints',\n str(g_conf.PRELOAD_MODEL_CHECKPOINT)+'.pth'))\n\n\n # Get the latest checkpoint to be loaded\n # returns none if there are no checkpoints saved for this model\n checkpoint_file = get_latest_saved_checkpoint()\n if checkpoint_file is not None:\n checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias,\n 'checkpoints', str(get_latest_saved_checkpoint())))\n iteration = checkpoint['iteration']\n best_loss = checkpoint['best_loss']\n best_loss_iter = checkpoint['best_loss_iter']\n print ('iteration: ', iteration, 'best_loss: ', best_loss)\n else:\n iteration = 0\n best_loss = 10000.0\n best_loss_iter = 0\n\n\n # Define the dataset. This structure is has the __get_item__ redefined in a way\n # that you can access the positions from the root directory as a in a vector.\n full_dataset = os.path.join(os.environ[\"COIL_DATASET_PATH\"], g_conf.TRAIN_DATASET_NAME)\n\n # By instantiating the augmenter we get a callable that augment images and transform them into tensors.\n augmenter = Augmenter(g_conf.AUGMENTATION)\n\n # Instantiate the class used to read the dataset\n dataset = CoILDataset(full_dataset, transform=augmenter, preload_name=str(g_conf.NUMBER_OF_HOURS)+'hours_'+g_conf.TRAIN_DATASET_NAME)\n print (\"Loaded dataset\")\n \n # Creates the sampler, this part is responsible for managing the keys. It divides\n # all keys depending on the measurements and produces a set of keys for each bach.\n # define the sampling strategy for mini-batch, different samplers can be found in 'splitter.py'\n data_loader = select_balancing_strategy(dataset, iteration, number_of_workers)\n\n # Instatiate the network architecture\n model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)\n model.cuda()\n\n optimizer = optim.Adam(model.parameters(), lr=g_conf.LEARNING_RATE) # adabound and adamio can also be used here\n\n if checkpoint_file is not None or g_conf.PRELOAD_MODEL_ALIAS is not None:\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n accumulated_time = checkpoint['total_time']\n loss_window = coil_logger.recover_loss_window('train', iteration)\n else: \n # We accumulate iteration time and keep the average speed\n accumulated_time = 0\n loss_window = []\n\n # freeze the perception module weights if required\n # for m in model.perception.parameters():\n # m.requires_grad = False\n \n # total trainable parameters\n model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n total_params = sum([np.prod(p.size()) for p in model_parameters])\n print ('trainable parameters: ', total_params)\n\n # multi-gpu\n print ('number of gpus: ', torch.cuda.device_count())\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n\n criterion = Loss(g_conf.LOSS_FUNCTION)\n\n print ('Start Training')\n\n st = time.time()\n for data in data_loader:\n\n # use this for early stopping if the validation loss is not coming down\n if g_conf.FINISH_ON_VALIDATION_STALE is not None and \\\n check_loss_validation_stopped(iteration, g_conf.FINISH_ON_VALIDATION_STALE):\n break\n\n \"\"\"\n ####################################\n Main optimization loop\n ####################################\n \"\"\"\n\n iteration += 1\n\n if iteration % 1000 == 0:\n adjust_learning_rate_auto(optimizer, loss_window)\n \n # additional learning rate scheduler - cyclic cosine annealing (https://arxiv.org/pdf/1704.00109.pdf)\n # adjust_learning_rate_cosine_annealing(optimizer, loss_window, iteration)\n\n capture_time = time.time()\n controls = data['directions']\n model.zero_grad()\n branches = model(torch.squeeze(data['rgb'].cuda()),\n dataset.extract_inputs(data).cuda())\n loss_function_params = {\n 'branches': branches,\n 'targets': dataset.extract_targets(data).cuda(),\n 'controls': controls.cuda(),\n 'inputs': dataset.extract_inputs(data).cuda(),\n 'branch_weights': g_conf.BRANCH_LOSS_WEIGHT,\n 'variable_weights': g_conf.VARIABLE_WEIGHT\n }\n loss, _ = criterion(loss_function_params)\n loss.backward()\n optimizer.step()\n \"\"\"\n ####################################\n Saving the model if necessary\n ####################################\n \"\"\"\n\n if is_ready_to_save(iteration):\n if torch.cuda.device_count() > 1:\n state_dict_save = model.module.state_dict()\n else:\n state_dict_save = model.state_dict()\n\n state = {\n 'iteration': iteration,\n 'state_dict': state_dict_save,\n 'best_loss': best_loss,\n 'total_time': accumulated_time,\n 'optimizer': optimizer.state_dict(),\n 'best_loss_iter': best_loss_iter\n }\n torch.save(state, os.path.join('_logs', exp_batch, exp_alias\n , 'checkpoints', str(iteration) + '.pth'))\n\n \"\"\"\n ################################################\n Adding tensorboard logs.\n Making calculations for logging purposes.\n These logs are monitored by the printer module.\n #################################################\n \"\"\"\n coil_logger.add_scalar('Loss', loss.data, iteration)\n coil_logger.add_image('Image', torch.squeeze(data['rgb']), iteration)\n if loss.data < best_loss:\n best_loss = loss.data.tolist()\n best_loss_iter = iteration\n\n # Log a random position\n position = random.randint(0, len(data) - 1)\n\n if torch.cuda.device_count() > 1:\n output = model.module.extract_branch(torch.stack(branches[0:4]), controls)\n else:\n output = model.extract_branch(torch.stack(branches[0:4]), controls)\n error = torch.abs(output - dataset.extract_targets(data).cuda())\n\n accumulated_time += time.time() - capture_time\n\n coil_logger.add_message('Iterating',\n {'Iteration': iteration,\n 'Loss': loss.data.tolist(),\n 'Images/s': (iteration * g_conf.BATCH_SIZE) / accumulated_time,\n 'BestLoss': best_loss, 'BestLossIteration': best_loss_iter,\n 'Output': output[position].data.tolist(),\n 'GroundTruth': dataset.extract_targets(data)[\n position].data.tolist(),\n 'Error': error[position].data.tolist(),\n 'Inputs': dataset.extract_inputs(data)[\n position].data.tolist()},\n iteration)\n loss_window.append(loss.data.tolist())\n coil_logger.write_on_error_csv('train', loss.data)\n print(\"Iteration: %d Loss: %f\" % (iteration, loss.data))\n st = time.time()\n\n coil_logger.add_message('Finished', {})\n \n except KeyboardInterrupt:\n coil_logger.add_message('Error', {'Message': 'Killed By User'})\n\n except RuntimeError as e:\n\n coil_logger.add_message('Error', {'Message': str(e)})\n\n except:\n traceback.print_exc()\n coil_logger.add_message('Error', {'Message': 'Something Happened'})", "def test(model_params, dataset_test, testing_params, log_directory, device, cuda_available=True,\n metric_fns=None):\n # DATA LOADER\n test_loader = DataLoader(dataset_test, batch_size=testing_params[\"batch_size\"],\n shuffle=False, pin_memory=True,\n collate_fn=imed_loader_utils.imed_collate,\n num_workers=0)\n\n # LOAD TRAIN MODEL\n fname_model = os.path.join(log_directory, \"best_model.pt\")\n print('\\nLoading model: {}'.format(fname_model))\n model = torch.load(fname_model, map_location=device)\n if cuda_available:\n model.cuda()\n model.eval()\n\n # CREATE OUTPUT FOLDER\n path_3Dpred = os.path.join(log_directory, 'pred_masks')\n if not os.path.isdir(path_3Dpred):\n os.makedirs(path_3Dpred)\n\n # METRIC MANAGER\n metric_mgr = imed_metrics.MetricManager(metric_fns)\n\n # UNCERTAINTY SETTINGS\n if (testing_params['uncertainty']['epistemic'] or testing_params['uncertainty']['aleatoric']) and \\\n testing_params['uncertainty']['n_it'] > 0:\n n_monteCarlo = testing_params['uncertainty']['n_it']\n testing_params['uncertainty']['applied'] = True\n print('\\nComputing model uncertainty over {} iterations.'.format(n_monteCarlo))\n else:\n testing_params['uncertainty']['applied'] = False\n n_monteCarlo = 1\n\n for i_monteCarlo in range(n_monteCarlo):\n preds_npy, gt_npy = run_inference(test_loader, model, model_params, testing_params, path_3Dpred,\n cuda_available, i_monteCarlo)\n metric_mgr(preds_npy, gt_npy)\n\n # COMPUTE UNCERTAINTY MAPS\n if n_monteCarlo > 1:\n imed_utils.run_uncertainty(ifolder=path_3Dpred)\n\n metrics_dict = metric_mgr.get_results()\n metric_mgr.reset()\n print(metrics_dict)\n return metrics_dict", "def run_experiment(hparams):\n\n data_file_name = build_data_file_name(hparams.pair, hparams.time_interval, hparams.data_period)\n\n df = data_pre_processing(data_file_name, hparams.path_to_archives, hparams.path_to_data_dir)\n\n rows = df.shape[0]\n\n train, test = prepare_data(df[rows - 100:rows], hparams.feature_window, hparams.label_window)\n\n print(\"train:{}\".format(train))\n print(\"test:{}\".format(test))\n # my_feature_columns = [tf.feature_column.numeric_column('f')]\n # estimator = tf.estimator.DNNClassifier(\n # feature_columns=[],\n # hidden_units=[1024, 512, 256])\n\n # estimator = tf.estimator.DNNRegressor()", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def test_net_on_dataset(args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0, use_matlab = False, early_stop=False):\n\n \n # print(\"test_net_on_dataset\")\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n \n test_timer.tic()\n \n all_boxes = test_net(args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id, early_stop=early_stop)\n test_timer.toc()\n\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n\n roidb = dataset.get_roidb()\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES + 1\n final_boxes = empty_results(num_classes, num_images)\n test_corloc = 'train' in dataset_name\n \n\n all_cls_scores = {}\n\n for i, entry in enumerate(roidb):\n\n if early_stop and i > 10: break\n\n boxes = all_boxes[entry['image']]\n \n cls_key = entry['image'].replace('.jpg','').split('/')[-1]\n\n # print(cls_key)\n\n if boxes['scores'] is not None:\n if test_corloc:\n # print(\"corlooking\")\n _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'], boxes['boxes'])\n else:\n _, _, cls_boxes_i = box_results_with_nms_and_limit(boxes['scores'], boxes['boxes'])\n\n extend_results(i, final_boxes, cls_boxes_i)\n else:\n final_boxes = None\n \n results = task_evaluation.evaluate_all(dataset, final_boxes, output_dir, test_corloc, use_matlab = use_matlab)\n return results", "def main(seed, filter_, num_classes, setup, model_name, images_dir, precision_mode, test):\n f1, f2 = filter_\n model_name = 'flex_random_seed_{}_resnet_manual_highres_center_only_f1_{}_f2_{}'.format(seed, f1, f2)\n frozen_graph_filepath = './Models/Frozen_graphs/{}_{}/'.format(f1,f2) + model_name + '_frozen_graph.pb'\n frozen_graph, x_tensor, y_tensor = trt_frozen_graph_and_tensors(\n model_name=model_name, \n frozen_graph_filepath=frozen_graph_filepath, \n precision_mode=precision_mode\n )\n\n elapsed_time_full_dataset = []\n sum_of_confusion_matrices = np.zeros((6, 6))\n \n with tf.compat.v1.Session(graph=frozen_graph) as sess:\n for image_file in [img for img in os.listdir(images_dir) if img.endswith('.JPG')]:\n\n img = Image.open(images_dir + image_file)\n sx,sy = img.size\n\n print(\"Image size is %i x %i\" % (sx,sy)) # sx = 4912, sy = 3264\n print(\"Loading image %s\" % image_file)\n\n img_np = np.array(img)/255.0\n del img\n\n print(\"Predicting for image %s (%i x %i pixel)\" % (image_file,sx,sy))\n\n start = time.time()\n predictions_flex = sess.run(y_tensor, feed_dict={x_tensor:np.expand_dims(img_np, 0)})\n elapsed = time.time() - start\n elapsed_time_full_dataset.append(elapsed)\n del img_np #deleting afterwards to not take the deleting time into account\n\n print(\"Prediction took %f seconds (inference on full image)\" % elapsed)\n print(\"Merging predictions\")\n # merge the predictions on the quarter images\n predictions_flex_combined = np.zeros(predictions_flex.shape)\n\n elapsed = time.time()-start\n if embedded_version:\n print(\"Prediction took %f seconds (inference on split up image)\" % elapsed)\n\n if embedded_version:\n predictions_flex = predictions_flex_combined\n\n if save_annotations:\n print(\"Computing annotations...\")\n annotations = []\n d = 4\n for x in range(100, sx-101, d):\n for y in range(100, sy-101, d):\n x0 = int(round(float(x-100)/4) + 15)\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n annotations.append((probs_flex, x, y))\n\n if test: # add a prefix for test to not replace real experiments\n model_name = 'TEST_' + model_name\n\n # saving annotations\n annotation_dir = images_dir.replace('Data', 'Results/seeds/annotations_trt') + image_file\n annotate_and_save(annotations, d, annotation_dir, model_name, precision_mode)\n classes_image = annotate_and_save_per_class(\n annotations, \n d, \n annotation_dir, \n model_name, \n precision_mode\n )\n\n labels = load_labels(annotation_dir)\n confusion_matrix = np.zeros((num_classes, num_classes))\n for (c_name, x, y) in labels:\n if 100 <= x < sx-101 and 100 <= y < sy-101:\n x0 = int(round(float(x-100)/4) + 15 )\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n\n predicted_class = np.argmax(probs_flex)\n c = train_model.get_classes().index(c_name)\n confusion_matrix[c, predicted_class] += 1\n print(confusion_matrix)\n sum_of_confusion_matrices += confusion_matrix\n\n print(sum_of_confusion_matrices)\n sum_of_cm_fp = './Results/seeds/preds_trt/{}/{}_{}/sum_of_cm_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n elapsed_time_fp = './Results/seeds/elapsed_trt/{}/{}_{}/time_taken_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n\n\n np.save(sum_of_cm_fp, sum_of_confusion_matrices)\n np.save(elapsed_time_fp, elapsed_time_full_dataset)\n tf.reset_default_graph()", "def generate_mnist_datasets(\n datapoints_per_task,\n K_list,\n cir_inner_loop_list, \n test_task_idx, \n val_task_idx,\n n_finetune_sets):\n\n # arbitrarily chosen, class-imbalance rate in outer and inner training loops\n cir_outer_loop = 0.5\n cir_inner_loop = 0.5\n # class-imbalance rate in the test sets of the test and validation tasks\n cir_test = 0.5\n # arbitrarily chosen, percentage of data that will be used in the inner training loop\n percent_data_inner_loop = 0.5\n\n percent_data_finetune_val = 0.8\n\n n_test_set = 4000\n\n test_task_idx, val_task_idx = test_task_idx, val_task_idx\n\n finetune_sets_per_K_cir = {}\n test_task_test_set, val_task = {}, {}\n \n\n train_task_list_inner, train_task_list_outer = [], []\n\n train_tasks_idxs = [i for i in range(0,10) if i not in [val_task_idx, test_task_idx]]\n\n base_path = '/home/USER/Documents'\n if (not (os.path.exists(base_path))):\n base_path = '/home/ubuntu/Projects'\n train_images, train_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/train-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/train-labels-idx1-ubyte')\n\n test_images, test_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/t10k-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/t10k-labels-idx1-ubyte')\n\n\n train_images, test_images = train_images.reshape((-1,28,28))/255.0, test_images.reshape((-1,28,28))/255.0\n images = np.concatenate((train_images, test_images))\n labels = np.concatenate((train_labels, test_labels))\n\n test_task_normal_indexes, val_task_normal_indexes = list(np.nonzero(labels == test_task_idx)[0]), list(np.nonzero(train_labels == val_task_idx)[0])\n test_task_X_normal, val_task_X_normal = images[test_task_normal_indexes],train_images[val_task_normal_indexes]\n test_task_Y_normal, val_task_Y_normal = np.zeros_like(labels[test_task_normal_indexes]), np.zeros_like(train_labels[val_task_normal_indexes])\n\n\n # val and test task have anomalies (samples of other numbers) that are not used for training\n # besides the two sets of anomalies (one for val task and one for test task are disjoint)\n test_task_anomalous_indexes = list(np.nonzero(test_labels[:5000] != test_task_idx)[0])\n val_task_anomalous_indexes= [index for index, element in enumerate(list(test_labels[5000:])) if element not in [val_task_idx, test_task_idx]]\n\n\n test_task_X_anomalous, val_task_X_anomalous = test_images[:5000][test_task_anomalous_indexes],test_images[5000:][val_task_anomalous_indexes]\n test_task_Y_anomalous, val_task_Y_anomalous = np.ones_like(test_labels[:5000][test_task_anomalous_indexes]), np.ones_like(test_labels[5000:][val_task_anomalous_indexes])\n\n test_task_X, val_task_X = np.concatenate((test_task_X_normal, test_task_X_anomalous)), np.concatenate((val_task_X_normal, val_task_X_anomalous))\n test_task_Y, val_task_Y = np.expand_dims(np.concatenate((test_task_Y_normal, test_task_Y_anomalous)),-1), np.expand_dims(np.concatenate((val_task_Y_normal, val_task_Y_anomalous)),-1)\n\n\n train_tasks_X_list, train_tasks_Y_list = [], []\n for task_idx in train_tasks_idxs:\n train_task_normal_indexes = list(np.nonzero(train_labels == task_idx)[0]) \n train_task_anomalous_indexes = [index for index, element in enumerate(list(train_labels)) if element not in [task_idx, val_task_idx, test_task_idx]]\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == val_task_idx)[0]) == 0)\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == test_task_idx)[0]) == 0)\n train_task_X_normal, train_task_X_anomalous = train_images[train_task_normal_indexes], train_images[train_task_anomalous_indexes]\n train_task_Y_normal, train_task_Y_anomalous = np.zeros_like(train_labels[train_task_normal_indexes]), np.ones_like(train_labels[train_task_anomalous_indexes])\n train_task_X, train_task_Y = np.concatenate((train_task_X_normal, train_task_X_anomalous)), np.concatenate((train_task_Y_normal, train_task_Y_anomalous))\n train_tasks_X_list.append(train_task_X)\n train_tasks_Y_list.append(np.expand_dims(train_task_Y,-1))\n\n\n\n # building test task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(test_task_Y == 0)[0]), list(np.nonzero(test_task_Y == 1)[0])\n n_test_set_normal = int(n_test_set*cir_test)\n test_set_normal_indexes = random.sample(normal_indexes, n_test_set_normal)\n test_set_anomaly_indexes = random.sample(anomaly_indexes, n_test_set - n_test_set_normal)\n test_set_indexes = []\n test_set_indexes += test_set_normal_indexes\n test_set_indexes += test_set_anomaly_indexes\n\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_X[test_set_indexes], test_task_Y[test_set_indexes]\n\n\n #shuffle\n s_test = np.arange(test_task_test_set['test_X'].shape[0])\n np.random.shuffle(s_test)\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_test_set['test_X'][s_test], test_task_test_set['test_Y'][s_test]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n\n\n for K in K_list:\n finetune_sets_per_cir = {}\n for cir in cir_inner_loop_list:\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n \n finetune_sets_list = []\n\n disjoint = False\n if(cir*K*n_finetune_sets<len(rest_normal_indexes)):\n disjoint = True\n\n n_finetune_normal = int(K*cir)\n n_finetune_anomaly = K - n_finetune_normal\n for i in range(n_finetune_sets):\n # if enough for disjoint do that\n # else sample randomly\n # store in a dict with keys cir_K\n finetune_normal_indexes = random.sample(rest_normal_indexes, n_finetune_normal)\n finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_finetune_anomaly)\n finetune_indexes = []\n finetune_indexes += finetune_normal_indexes\n finetune_indexes += finetune_anomaly_indexes\n finetune_set = {}\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = test_task_X[finetune_indexes], test_task_Y[finetune_indexes]\n\n #shuffle\n s_finetune = np.arange(finetune_set['finetune_X'].shape[0])\n np.random.shuffle(s_finetune)\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = finetune_set['finetune_X'][s_finetune], finetune_set['finetune_Y'][s_finetune]\n\n finetune_sets_list.append(finetune_set)\n \n if(disjoint):\n rest_normal_indexes = [index for index in rest_normal_indexes if index not in finetune_normal_indexes]\n rest_anomaly_indexes = [index for index in rest_anomaly_indexes if index not in finetune_anomaly_indexes]\n\n finetune_sets_per_cir[str(cir)] = finetune_sets_list\n\n finetune_sets_per_K_cir[str(K)] = finetune_sets_per_cir\n\n\n #building val task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(val_task_Y == 0)[0]), list(np.nonzero(val_task_Y == 1)[0])\n n_val_finetune = int(percent_data_finetune_val*datapoints_per_task)\n n_val_test_set = datapoints_per_task - n_val_finetune\n n_val_test_set_normal = int(n_val_test_set*cir_test)\n val_test_set_normal_indexes = random.sample(normal_indexes, n_val_test_set_normal)\n\n\n val_test_set_anomaly_indexes = random.sample(anomaly_indexes, n_val_test_set - n_val_test_set_normal)\n val_test_set_indexes = []\n val_test_set_indexes += val_test_set_normal_indexes\n val_test_set_indexes += val_test_set_anomaly_indexes\n val_task['test_X'], val_task['test_Y'] = val_task_X[val_test_set_indexes], val_task_Y[val_test_set_indexes]\n\n\n rest_normal_indexes = [index for index in normal_indexes if index not in val_test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in val_test_set_anomaly_indexes]\n\n n_val_finetune_normal = int(n_val_finetune*cir_inner_loop)\n val_finetune_normal_indexes = random.sample(rest_normal_indexes, n_val_finetune_normal)\n val_finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_val_finetune - n_val_finetune_normal)\n val_finetune_indexes = []\n val_finetune_indexes += val_finetune_normal_indexes\n val_finetune_indexes += val_finetune_anomaly_indexes\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task_X[val_finetune_indexes], val_task_Y[val_finetune_indexes]\n\n #shuffle\n s_val_finetune = np.arange(val_task['finetune_X'].shape[0])\n s_val_test = np.arange(val_task['test_X'].shape[0])\n np.random.shuffle(s_val_finetune)\n np.random.shuffle(s_val_test)\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task['finetune_X'][s_val_finetune], val_task['finetune_Y'][s_val_finetune]\n val_task['test_X'], val_task['test_Y'] = val_task['test_X'][s_val_test], val_task['test_Y'][s_val_test]\n\n\n\n # building sets of data of the training tasks\n for task_X, task_Y in zip(train_tasks_X_list, train_tasks_Y_list):\n normal_indexes, anomaly_indexes = list(np.nonzero(task_Y == 0)[0]), list(np.nonzero(task_Y == 1)[0])\n\n n_inner_loop = int(percent_data_inner_loop*datapoints_per_task)\n n_inner_loop_normal = int(n_inner_loop*cir_inner_loop)\n n_outer_loop = datapoints_per_task - n_inner_loop\n n_outer_loop_normal = int(n_outer_loop*cir_outer_loop)\n \n inner_loop_normal_indexes = random.sample(normal_indexes, n_inner_loop_normal)\n inner_loop_anomaly_indexes = random.sample(anomaly_indexes, n_inner_loop - n_inner_loop_normal)\n inner_loop_indexes = []\n inner_loop_indexes += inner_loop_normal_indexes\n inner_loop_indexes += inner_loop_anomaly_indexes\n\n train_task_inner_X, train_task_inner_Y = task_X[inner_loop_indexes], task_Y[inner_loop_indexes]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in inner_loop_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in inner_loop_anomaly_indexes]\n\n \n outer_loop_normal_indexes = random.sample(rest_normal_indexes, n_outer_loop_normal)\n outer_loop_anomaly_indexes = random.sample(rest_anomaly_indexes, n_outer_loop - n_outer_loop_normal)\n outer_loop_indexes = []\n outer_loop_indexes += outer_loop_normal_indexes\n outer_loop_indexes += outer_loop_anomaly_indexes\n\n train_task_outer_X, train_task_outer_Y = task_X[outer_loop_indexes], task_Y[outer_loop_indexes]\n\n\n s_inner = np.arange(train_task_inner_X.shape[0])\n s_outer = np.arange(train_task_outer_X.shape[0])\n np.random.shuffle(s_inner)\n np.random.shuffle(s_outer)\n train_task_list_inner.append([train_task_inner_X[s_inner],train_task_inner_Y[s_inner]])\n train_task_list_outer.append([train_task_outer_X[s_outer],train_task_outer_Y[s_outer]])\n\n\n\n train_tasks_inner_X = np.stack([train_task_list_inner[i][0]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_inner_Y = np.stack([train_task_list_inner[i][1]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_outer_X = np.stack([train_task_list_outer[i][0]\n for i in range(len(train_task_list_outer))], 0)\n train_tasks_outer_Y = np.stack([train_task_list_outer[i][1]\n for i in range(len(train_task_list_outer))], 0)\n\n \n train_tasks = {'X_train_inner': train_tasks_inner_X,\n 'Y_train_inner': train_tasks_inner_Y,\n 'X_train_outer': train_tasks_outer_X,\n 'Y_train_outer': train_tasks_outer_Y\n }\n\n\n return train_tasks, val_task, test_task_test_set, finetune_sets_per_K_cir", "def test_multitask_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )" ]
[ "0.6628726", "0.6221861", "0.6108858", "0.6095782", "0.60324526", "0.60173756", "0.6013705", "0.60083187", "0.59039736", "0.58947945", "0.58934516", "0.5819561", "0.5818858", "0.5764458", "0.5733247", "0.5716348", "0.57152057", "0.5713483", "0.5712434", "0.569594", "0.56877816", "0.5667342", "0.56591594", "0.56332433", "0.5602469", "0.5599965", "0.5593883", "0.55924153", "0.55826396", "0.557186" ]
0.6573886
1
GPU Faster RCNN test for TF Training Instance Type g3.8xlarge Given above parameters, registers a task with family named after this test, runs the task, and waits for the task to be stopped before doing teardown operations of instance and cluster.
def test_ecs_tensorflow_training_fasterrcnn_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd, ecs_cluster_name): instance_id, cluster_arn = ecs_container_instance num_gpus = ec2_utils.get_instance_num_gpus(instance_id) ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id, num_gpus=num_gpus)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_testing(gpu=0):\n # expected environment variables\n os.environ[\"BERT_BASE_DIR\"] = \"pretrained/cased_L-12_H-768_A-12\"\n os.environ[\"DATA_DIR\"] = \"dataset\"\n os.environ[\"OUTPUT_DIR\"] = \"output\"\n assert os.environ.get(\"BERT_BASE_DIR\") is not None\n assert os.environ.get(\"DATA_DIR\") is not None\n assert os.environ.get(\"OUTPUT_DIR\") is not None\n\n # set the gpu index\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu)\n # set the required flags\n FLAGS.task_name = \"topic\"\n FLAGS.do_predict = True\n FLAGS.data_dir = os.environ.get(\"DATA_DIR\")\n FLAGS.vocab_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"vocab.txt\")\n FLAGS.bert_config_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_config.json\")\n FLAGS.init_checkpoint = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_model.ckpt\")\n FLAGS.do_lower_case = False\n FLAGS.max_seq_length = 128\n FLAGS.output_dir = os.environ.get(\"OUTPUT_DIR\")\n\n run_classifier.main(0)", "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def test_ecs_tensorflow_training_mnist_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n num_gpus = ec2_utils.get_instance_num_gpus(instance_id)\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id,\n num_gpus=num_gpus)", "def run_sm_perf_test(image_uri, num_nodes, region):\n _, framework_version = get_framework_and_version_from_tag(image_uri)\n if framework_version.startswith(\"1.\"):\n pytest.skip(\"Skipping benchmark test on TF 1.x images.\")\n\n processor = \"gpu\" if \"gpu\" in image_uri else \"cpu\"\n device_cuda_str = (\n f\"{processor}-{get_cuda_version_from_tag(image_uri)}\" if processor == \"gpu\" else processor\n )\n\n ec2_instance_type = \"p3.16xlarge\" if processor == \"gpu\" else \"c5.18xlarge\"\n\n py_version = \"py2\" if \"py2\" in image_uri else \"py37\" if \"py37\" in image_uri else \"py3\"\n\n time_str = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n commit_info = os.getenv(\"CODEBUILD_RESOLVED_SOURCE_VERSION\")\n target_upload_location = os.path.join(\n BENCHMARK_RESULTS_S3_BUCKET,\n \"tensorflow\",\n framework_version,\n \"sagemaker\",\n \"training\",\n device_cuda_str,\n py_version,\n )\n training_job_name = f\"tf{framework_version[0]}-tr-bench-{device_cuda_str}-{num_nodes}-node-{py_version}-{commit_info[:7]}-{time_str}\"\n\n # Inserting random sleep because this test starts multiple training jobs around the same time, resulting in\n # a throttling error for SageMaker APIs.\n time.sleep(Random(x=training_job_name).random() * 60)\n\n test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"resources\")\n venv_dir = os.path.join(test_dir, \"sm_benchmark_venv\")\n\n ctx = Context()\n\n with ctx.cd(test_dir), ctx.prefix(f\"source {venv_dir}/bin/activate\"):\n log_file = f\"results-{commit_info}-{time_str}-{framework_version}-{device_cuda_str}-{py_version}-{num_nodes}-node.txt\"\n run_out = ctx.run(\n f\"timeout 45m python tf_sm_benchmark.py \"\n f\"--framework-version {framework_version} \"\n f\"--image-uri {image_uri} \"\n f\"--instance-type ml.{ec2_instance_type} \"\n f\"--node-count {num_nodes} \"\n f\"--python {py_version} \"\n f\"--region {region} \"\n f\"--job-name {training_job_name}\"\n f\"2>&1 | tee {log_file}\",\n warn=True,\n echo=True,\n )\n\n if not (run_out.ok or run_out.return_code == 124):\n target_upload_location = os.path.join(target_upload_location, \"failure_log\")\n\n ctx.run(\n f\"aws s3 cp {os.path.join(test_dir, log_file)} {os.path.join(target_upload_location, log_file)}\"\n )\n\n LOGGER.info(f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\")\n\n result_statement, throughput = _print_results_of_test(\n os.path.join(test_dir, log_file), processor\n )\n throughput /= num_nodes\n\n assert run_out.ok, (\n f\"Benchmark Test failed with return code {run_out.return_code}. \"\n f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\"\n )\n\n threshold_table = (\n (\n TENSORFLOW_SM_TRAINING_CPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_CPU_4NODE_THRESHOLD\n )\n if processor == \"cpu\"\n else TENSORFLOW_SM_TRAINING_GPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_GPU_4NODE_THRESHOLD\n )\n threshold = get_threshold_for_image(framework_version, threshold_table)\n LOGGER.info(\n f\"tensorflow {framework_version} sagemaker training {device_cuda_str} {py_version} \"\n f\"imagenet {num_nodes} nodes Throughput: {throughput} images/sec, threshold: {threshold} images/sec\"\n )\n assert throughput > threshold, (\n f\"tensorflow {framework_version} sagemaker training {processor} {py_version} imagenet {num_nodes} nodes \"\n f\"Benchmark Result {throughput} does not reach the threshold {threshold}\"\n )", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def run_time_operation(self, learning_option, cluster):\r\n\r\n # whether or not test procedure\r\n is_train = tf.placeholder_with_default(True, shape=())\r\n learning_option['is_train'] = is_train\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # construct API\r\n def apiConstructor():\r\n # CIFAR-10 images: [224, 224, 3]\r\n # label: [1000]\r\n def train_in():\r\n x, y = learning_option.get('train_imagenet')\r\n return x, y\r\n def test_in():\r\n x, y = learning_option.get('test_imagenet')\r\n return x, y\r\n\r\n images, labels = tf.cond(is_train, train_in, test_in)\r\n # get output dimension\r\n outdim = list(images.get_shape()[i].value for i in xrange(len(images.get_shape())))\r\n\r\n # set output\r\n self.set_output('image', images)\r\n self.set_output('label', labels)\r\n self.set_dimension('image', outdim)\r\n\r\n # set tf summary\r\n tf.summary.image(self.name, images, max_outputs=10)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()", "def run_universal_demo(args, use_gpu: bool = True) -> None:\n if \"scannet\" in args.dataset:\n args.img_name_unique = False\n else:\n args.img_name_unique = True\n\n args.u_classes = names_utils.get_universal_class_names()\n args.print_freq = 10\n\n args.split = \"test\"\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.test_gpu)\n logger.info(args)\n logger.info(\"=> creating model ...\")\n args.num_model_classes = len(args.u_classes)\n\n itask = InferenceTask(\n args,\n base_size=args.base_size,\n crop_h=args.test_h,\n crop_w=args.test_w,\n input_file=args.input_file,\n model_taxonomy=\"universal\",\n eval_taxonomy=\"universal\",\n scales=args.scales,\n )\n itask.execute()", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def test_ecs_tensorflow_training_mnist_cpu(cpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id)", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def configure_gpu_tf():\n\n try:\n # locate available devices & set required environment variables\n available_device_ids = GPUtil.getFirstAvailable(order='first', maxLoad=0.7, maxMemory=0.7, attempts=1, interval=10)\n available_device_id = available_device_ids[0]\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = str(available_device_id)\n print(f\"\\n GPU Found! running on GPU:{available_device_id}\\n\")\n\n # set GPU configuration (use all GPU memory if device 0, else use <50% of memory)\n tf.debugging.set_log_device_placement(False)\n physical_gpu = tf.config.experimental.list_physical_devices('GPU')[0]\n\n if available_device_id == 0:\n tf.config.experimental.set_memory_growth(physical_gpu, True)\n else:\n tf.config.experimental.set_virtual_device_configuration(\n physical_gpu,\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4500)]\n )\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n assert len(logical_gpus) == 1, \"error creating virtual GPU to fractionally use memory\"\n\n # if we can't find a GPU, or they are all busy, default to using CPU\n except RuntimeError:\n print(\"\\n No GPUs available... running on CPU\\n\")\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'", "def train_distributed():\n # Distributed stuff learnt from this repo: https://github.com/GoogleCloudPlatform/cloudml-dist-\n # mnist-example/blob/master/trainer/task.py\n\n # For Distributed TensorFlow\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_info = env.get('cluster')\n cluster_spec = tf.train.ClusterSpec(cluster_info)\n task_info = env.get('task')\n job_name, task_index = task_info['type'], task_info['index']\n\n device_fn = tf.train.replica_device_setter(\n cluster=cluster_spec,\n worker_device='/job:%s/task:%d' % (job_name, task_index))\n\n print(\"Start job:%s, index:%d\" % (job_name, task_index))\n\n server = tf.train.Server(cluster_spec,\n job_name=job_name, task_index=task_index)\n\n # Start a parameter server node\n if job_name == 'ps':\n server.join()\n\n # Start a master/worker node\n if job_name == 'master' or job_name == 'worker':\n is_chief = (job_name == 'master')\n\n with tf.Graph().as_default() as graph: # TODO necessary?\n with tf.device(device_fn):\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create the model\n print(\"(%s,%d) Creating %d layers of %d units.\" %\n (job_name, task_index, FLAGS.num_layers, FLAGS.size))\n model = create_model(False)\n\n # Create train_dir\n if is_chief:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n\n # TensorBoard summaries\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n # Create supervisor\n init_op = tf.global_variables_initializer()\n\n # Create Supervisor. Disabling checkpoints and summaries, because we do that manually\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op,\n init_fn=lambda session: after_init(session, model, embeddings_file),\n saver=model.saver, global_step=model.global_step,\n save_model_secs=0, save_summaries_secs=0, summary_op=None,\n summary_writer=None)\n\n with sv.managed_session(server.target) as sess:\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders,\n is_chief, job_name, task_index, sv.should_stop)\n sv.stop()", "def main(unused_argv):\n # Load data\n (train_adj, full_adj, train_feats, test_feats, y_train, y_val, y_test,\n train_mask, val_mask, test_mask, _, val_data, test_data, num_data,\n visible_data) = load_data(FLAGS.data_prefix, FLAGS.dataset, FLAGS.precalc)\n\n # Partition graph and do preprocessing\n if FLAGS.bsize > 1:\n _, parts = partition_utils.partition_graph(train_adj, visible_data,\n FLAGS.num_clusters)\n parts = [np.array(pt) for pt in parts]\n else:\n (parts, features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess(train_adj, train_feats, y_train,\n train_mask, visible_data,\n FLAGS.num_clusters,\n FLAGS.diag_lambda)\n\n (_, val_features_batches, val_support_batches, y_val_batches,\n val_mask_batches) = utils.preprocess(full_adj, test_feats, y_val, val_mask,\n np.arange(num_data),\n FLAGS.num_clusters_val,\n FLAGS.diag_lambda)\n\n (_, test_features_batches, test_support_batches, y_test_batches,\n test_mask_batches) = utils.preprocess(full_adj, test_feats, y_test,\n test_mask, np.arange(num_data),\n FLAGS.num_clusters_test,\n FLAGS.diag_lambda)\n idx_parts = list(range(len(parts)))\n\n # Some preprocessing\n model_func = models.GCN\n\n # Define placeholders\n placeholders = {\n 'support':\n tf.sparse_placeholder(tf.float32),\n 'features':\n tf.placeholder(tf.float32),\n 'labels':\n tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),\n 'labels_mask':\n tf.placeholder(tf.int32),\n 'dropout':\n tf.placeholder_with_default(0., shape=()),\n 'num_features_nonzero':\n tf.placeholder(tf.int32) # helper variable for sparse dropout\n }\n\n # Create model\n model = model_func(\n placeholders,\n input_dim=test_feats.shape[1],\n logging=True,\n multilabel=FLAGS.multilabel,\n norm=FLAGS.layernorm,\n precalc=FLAGS.precalc,\n num_layers=FLAGS.num_layers)\n\n # Initialize session\n sess = tf.Session()\n tf.set_random_seed(seed)\n\n # Init variables\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n cost_val = []\n total_training_time = 0.0\n # Train model\n for epoch in range(FLAGS.epochs):\n t = time.time()\n np.random.shuffle(idx_parts)\n if FLAGS.bsize > 1:\n (features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess_multicluster(\n train_adj, parts, train_feats, y_train, train_mask,\n FLAGS.num_clusters, FLAGS.bsize, FLAGS.diag_lambda)\n for pid in range(len(features_batches)):\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n else:\n np.random.shuffle(idx_parts)\n for pid in idx_parts:\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n\n total_training_time += time.time() - t\n print_str = 'Epoch: %04d ' % (epoch + 1) + 'training time: {:.5f} '.format(\n total_training_time) + 'train_acc= {:.5f} '.format(outs[2])\n\n # Validation\n if FLAGS.validation:\n cost, acc, micro, macro = evaluate(sess, model, val_features_batches,\n val_support_batches, y_val_batches,\n val_mask_batches, val_data,\n placeholders)\n cost_val.append(cost)\n print_str += 'val_acc= {:.5f} '.format(\n acc) + 'mi F1= {:.5f} ma F1= {:.5f} '.format(micro, macro)\n\n tf.logging.info(print_str)\n\n if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(\n cost_val[-(FLAGS.early_stopping + 1):-1]):\n tf.logging.info('Early stopping...')\n break\n\n tf.logging.info('Optimization Finished!')\n\n # Save model\n saver.save(sess, FLAGS.save_name)\n\n # Load model (using CPU for inference)\n with tf.device('/cpu:0'):\n sess_cpu = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))\n sess_cpu.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess_cpu, FLAGS.save_name)\n # Testing\n test_cost, test_acc, micro, macro = evaluate(\n sess_cpu, model, test_features_batches, test_support_batches,\n y_test_batches, test_mask_batches, test_data, placeholders)\n print_str = 'Test set results: ' + 'cost= {:.5f} '.format(\n test_cost) + 'accuracy= {:.5f} '.format(\n test_acc) + 'mi F1= {:.5f} ma F1= {:.5f}'.format(micro, macro)\n tf.logging.info(print_str)", "def finetune(ft_ds, model, task, epochs=10, eval_ds=None):\n\n print('==========FINETUNE==========')\n\n # Filter out undesired examples with excluded_label\n ds = ft_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.finetune_preprocess)\n ds = ds.shuffle(1000)\n ds = ds.batch(FLAGS.finetune_bs)\n\n # loss, metrics, optimizers\n train_loss= tf.keras.metrics.Mean(name='train_loss')\n train_sup_acc = tf.keras.metrics.Accuracy(name='train_supervised_accuracy')\n criterion_sup = tf.nn.softmax_cross_entropy_with_logits \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) \n for epoch in range(epochs): \n train_loss.reset_states()\n train_sup_acc.reset_states()\n for x in ds:\n with tf.GradientTape() as tape:\n image = x['image']\n labels = x[task['name']]\n out = model(image, mode='supervised', sup_layers=1, training=True)\n # print(tf.math.argmax(out, axis=-1))\n metrics.update_supervised_accuracy(train_sup_acc, labels, out)\n loss = criterion_sup(tf.one_hot(labels, depth=task['num_classes']), out)\n loss = tf.math.reduce_mean(loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(\n filter(lambda gv: gv[0] is not None, zip(gradients, model.trainable_variables))\n )\n train_loss.update_state(loss)\n print('supervised loss')\n print(train_loss.result())\n print('supervised accuracy')\n print(train_sup_acc.result())\n\n # Evaluate results on eval_ds if possible\n if eval_ds: \n evaluate(eval_ds, model, task)", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n constants.TRAIN_BATCH_SIZE,\n is_train=True\n )\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n constants.EVAL_BATCH_SIZE,\n is_train=False\n )\n\n # # check for availabe tpu and gpu units\n # try:\n # tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n # tf.config.experimental_connect_to_cluster(tpu)\n # tf.tpu.experimental.initialize_tpu_system(tpu)\n # strategy = tf.distribute.experimental.TPUStrategy(tpu)\n # except ValueError:\n # strategy = tf.distribute.MirroredStrategy()\n\n # with strategy.scope():\n model = get_model(fn_args)\n\n try:\n log_dir = fn_args.model_run_dir\n except KeyError:\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), \"logs\")\n\n absl.logging.info('Tensorboard logging to {}'.format(log_dir))\n\n callbacks = [\n # tf.keras.callbacks.ModelCheckpoint(\"DeepLabV3plus.ckpt\", verbose=1, save_weights_only=True, save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor=\"iou_score\", factor=0.2, patience=6, verbose=1, mode=\"max\"),\n tf.keras.callbacks.EarlyStopping(monitor=\"iou_score\", patience=16, mode=\"max\", verbose=1, restore_best_weights=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=\"batch\")\n ]\n\n absl.logging.info('Start training the top classifier')\n \n model.fit(\n train_dataset,\n epochs=constants.EPOCHS,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=callbacks\n )\n\n signatures = {\n 'serving_default':\n _get_serve_image_fn(model).get_concrete_function(\n tf.TensorSpec(\n shape=[None, constants.HEIGHT, constants.WIDTH, 3],\n dtype=tf.float32,\n name=_transformed_name(constants.IMAGE_KEY)\n )\n )\n }\n\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def run(config_file):\n config = load_config(config_file)\n config_global = config['global']\n\n # setup a logger\n logger = logging.getLogger('experiment')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler_stdout = logging.StreamHandler(sys.stdout)\n handler_stdout.setLevel(config['logger']['level'])\n handler_stdout.setFormatter(formatter)\n logger.addHandler(handler_stdout)\n\n if 'path' in config['logger']:\n handler_file = logging.FileHandler(config['logger']['path'])\n handler_file.setLevel(config['logger']['level'])\n handler_file.setFormatter(formatter)\n logger.addHandler(handler_file)\n\n logger.setLevel(config['logger']['level'])\n\n # Allow the gpu to be used in parallel\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n if 'max_threads' in config_global:\n sess_config.intra_op_parallelism_threads = config_global['max_threads']\n\n # we allow to set the random seed in the config file for reproducibility. However, when running on GPU, results\n # will still be nondeterministic (due to nondeterministic behavior of tensorflow)\n if 'random_seed' in config_global:\n seed = config_global['random_seed']\n logger.info('Using fixed random seed'.format(seed))\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n with tf.Session(config=sess_config) as sess:\n # We are now fetching all relevant modules. It is strictly required that these module contain a variable named\n # 'component' that points to a class which inherits from experiment.Data, experiment.Experiment,\n # experiment.Trainer or experiment.Evaluator\n data_module = config['data-module']\n model_module = config['model-module']\n training_module = config['training-module']\n evaluation_module = config.get('evaluation-module', None)\n\n # The modules are now dynamically loaded\n DataClass = importlib.import_module(data_module).component\n ModelClass = importlib.import_module(model_module).component\n TrainingClass = importlib.import_module(training_module).component\n EvaluationClass = importlib.import_module(evaluation_module).component if evaluation_module else None\n\n # We then wire together all the modules and start training\n data = DataClass(config['data'], config_global, logger)\n model = ModelClass(config['model'], config_global, logger)\n training = TrainingClass(config['training'], config_global, logger)\n\n # setup the data (validate, create generators, load data, or else)\n logger.info('Setting up the data')\n data.setup()\n # build the model (e.g. compile it)\n logger.info('Building the model')\n model.build(data, sess)\n # start the training process\n logger.info('Starting the training process')\n training.start(model, data, sess)\n\n # perform evaluation, if required\n if EvaluationClass:\n logger.info('Evaluating')\n evaluation = EvaluationClass(config['evaluation'], config_global, logger)\n evaluation.start(model, data, sess)\n else:\n logger.info('No evaluation')\n\n logger.info('DONE')", "def train():\n args = arguments_st_train()\n\n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n\n \n if args.use_random_seed:\n tf.set_random_seed(args.random_seed)\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n input_size=input_size,\n random_scale=args.random_scale,\n random_mirror=args.random_mirror,\n random_crop=args.random_crop,\n ignore_label=args.ignore_label,\n img_mean=IMG_MEAN,\n coord=coord,\n task=args.task)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Create network.\n with slim.arg_scope(vgg_arg_scope(weight_decay=args.weight_decay, use_batch_norm=True, is_training=True)):\n if args.network == 'vgg_16_deeplab_st':\n net, end_points = vgg_16_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n elif args.network == 'vgg_16_shortcut_deeplab_st':\n net, end_points = vgg_16_shortcut_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n else:\n raise Exception('network name is not recognized!')\n \n \n # Predictions.\n raw_output = end_points['vgg_16/fc8_voc12']\n\n # gt labels\n raw_gt = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes,\n one_hot=False, task=args.task) # [batch_size, h, w]\n\n # losses\n if args.task == 'normal':\n loss = get_normal_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n elif args.task == 'seg':\n loss = get_seg_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n \n # Image summary for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, axis=3)\n pred = tf.expand_dims(raw_output_up, dim=3)\n \n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes, args.task], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes, args.task], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n \n # Define loss and optimisation parameters.\n train_op, step_ph = create_train_ops_st(reduced_loss, args)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n\n # Load variables if the checkpoint is provided.\n if args.restore_from is not None:\n load_st(sess, args)\n \n # Saver for storing checkpoints of the model.\n save_op = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=args.max_to_keep)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n # Iterate over training steps.\n for step in range(args.num_steps):\n start_time = time.time()\n feed_dict = { step_ph : step }\n \n if step % args.save_pred_every == 0:\n loss_value, images, labels, preds, summary, _ = sess.run([reduced_loss, image_batch, label_batch, pred, total_summary, train_op], feed_dict=feed_dict)\n summary_writer.add_summary(summary, step)\n save(save_op, sess, args.snapshot_dir, step)\n else:\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\n duration = time.time() - start_time\n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n coord.request_stop()\n coord.join(threads)", "def configure_training_federated(\n task_spec: training_specs.TaskSpecFederated,\n *, # Caller passes below args by name.\n resnet_layers: int = 18,\n num_classes: int = 100,\n l2_weight_decay: float = 1e-4,\n) -> training_specs.RunnerSpecFederated:\n\n return _Cifar100ImageTask(\n task_spec,\n resnet_layers=resnet_layers,\n num_classes=num_classes,\n l2_weight_decay=l2_weight_decay).build_federated_runner_spec()", "def main(_):\n\n params = create_params()\n\n assert params[\"train_dataset_path\"]\n assert params[\"eval_dataset_path\"]\n\n input_fn = input_fn_from_files(\n params[\"train_dataset_path\"])\n eval_input_fn = input_fn_from_files(\n params[\"eval_dataset_path\"])\n\n feature_columns = create_feature_columns(params)\n\n model_fn = create_model_fn(feature_columns)\n estimator = create_tpu_estimator(model_fn, feature_columns, params)\n\n for cycle_index in range(params[\"train_epochs\"]):\n tf.logging.info(\"Starting a training cycle: {}/{}\".format(\n cycle_index + 1, params[\"train_epochs\"]))\n estimator.train(input_fn=input_fn, steps=params[\"steps_per_epoch\"])\n tf.logging.info(\"Beginning evaluation.\")\n eval_results = estimator.evaluate(eval_input_fn,\n steps=params[\"num_eval_steps\"])\n tf.logging.info(\"Evaluation complete.\")\n\n recall_1 = float(eval_results[\"recall@1\"])\n recall_5 = float(eval_results[\"recall@5\"])\n loss = float(eval_results[\"loss\"])\n tf.logging.info(\n \"Iteration {}: recall@1 = {:.4f}, recall@5 = {:.4f}, Loss = {:.4f}\"\n .format(cycle_index + 1, recall_1, recall_5, loss))", "def main(seed, filter_, num_classes, setup, model_name, images_dir, precision_mode, test):\n f1, f2 = filter_\n model_name = 'flex_random_seed_{}_resnet_manual_highres_center_only_f1_{}_f2_{}'.format(seed, f1, f2)\n frozen_graph_filepath = './Models/Frozen_graphs/{}_{}/'.format(f1,f2) + model_name + '_frozen_graph.pb'\n frozen_graph, x_tensor, y_tensor = trt_frozen_graph_and_tensors(\n model_name=model_name, \n frozen_graph_filepath=frozen_graph_filepath, \n precision_mode=precision_mode\n )\n\n elapsed_time_full_dataset = []\n sum_of_confusion_matrices = np.zeros((6, 6))\n \n with tf.compat.v1.Session(graph=frozen_graph) as sess:\n for image_file in [img for img in os.listdir(images_dir) if img.endswith('.JPG')]:\n\n img = Image.open(images_dir + image_file)\n sx,sy = img.size\n\n print(\"Image size is %i x %i\" % (sx,sy)) # sx = 4912, sy = 3264\n print(\"Loading image %s\" % image_file)\n\n img_np = np.array(img)/255.0\n del img\n\n print(\"Predicting for image %s (%i x %i pixel)\" % (image_file,sx,sy))\n\n start = time.time()\n predictions_flex = sess.run(y_tensor, feed_dict={x_tensor:np.expand_dims(img_np, 0)})\n elapsed = time.time() - start\n elapsed_time_full_dataset.append(elapsed)\n del img_np #deleting afterwards to not take the deleting time into account\n\n print(\"Prediction took %f seconds (inference on full image)\" % elapsed)\n print(\"Merging predictions\")\n # merge the predictions on the quarter images\n predictions_flex_combined = np.zeros(predictions_flex.shape)\n\n elapsed = time.time()-start\n if embedded_version:\n print(\"Prediction took %f seconds (inference on split up image)\" % elapsed)\n\n if embedded_version:\n predictions_flex = predictions_flex_combined\n\n if save_annotations:\n print(\"Computing annotations...\")\n annotations = []\n d = 4\n for x in range(100, sx-101, d):\n for y in range(100, sy-101, d):\n x0 = int(round(float(x-100)/4) + 15)\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n annotations.append((probs_flex, x, y))\n\n if test: # add a prefix for test to not replace real experiments\n model_name = 'TEST_' + model_name\n\n # saving annotations\n annotation_dir = images_dir.replace('Data', 'Results/seeds/annotations_trt') + image_file\n annotate_and_save(annotations, d, annotation_dir, model_name, precision_mode)\n classes_image = annotate_and_save_per_class(\n annotations, \n d, \n annotation_dir, \n model_name, \n precision_mode\n )\n\n labels = load_labels(annotation_dir)\n confusion_matrix = np.zeros((num_classes, num_classes))\n for (c_name, x, y) in labels:\n if 100 <= x < sx-101 and 100 <= y < sy-101:\n x0 = int(round(float(x-100)/4) + 15 )\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n\n predicted_class = np.argmax(probs_flex)\n c = train_model.get_classes().index(c_name)\n confusion_matrix[c, predicted_class] += 1\n print(confusion_matrix)\n sum_of_confusion_matrices += confusion_matrix\n\n print(sum_of_confusion_matrices)\n sum_of_cm_fp = './Results/seeds/preds_trt/{}/{}_{}/sum_of_cm_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n elapsed_time_fp = './Results/seeds/elapsed_trt/{}/{}_{}/time_taken_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n\n\n np.save(sum_of_cm_fp, sum_of_confusion_matrices)\n np.save(elapsed_time_fp, elapsed_time_full_dataset)\n tf.reset_default_graph()", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def test_all_tf_execution_regimes(test_case):\n if BACKEND == 'backend_tensorflow':\n return test_util.test_all_tf_execution_regimes(test_case)\n else:\n return test_case", "def run_sequence(seq: Sequence, tracker: Tracker, debug=False, num_gpu=8):\n '''2021.1.2 Add multiple gpu support'''\n try:\n worker_name = multiprocessing.current_process().name\n worker_id = int(worker_name[worker_name.find('-') + 1:]) - 1\n gpu_id = worker_id % num_gpu\n torch.cuda.set_device(gpu_id)\n except:\n pass\n\n def _results_exist():\n if seq.object_ids is None:\n if seq.dataset in ['trackingnet', 'got10k']:\n base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)\n bbox_file = '{}.txt'.format(base_results_path)\n else:\n bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name)\n return os.path.isfile(bbox_file)\n else:\n bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids]\n missing = [not os.path.isfile(f) for f in bbox_files]\n return sum(missing) == 0\n\n if _results_exist() and not debug:\n print('FPS: {}'.format(-1))\n return\n\n print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))\n\n if debug:\n output = tracker.run_sequence(seq, debug=debug)\n else:\n try:\n output = tracker.run_sequence(seq, debug=debug)\n except Exception as e:\n print(e)\n return\n\n sys.stdout.flush()\n\n if isinstance(output['time'][0], (dict, OrderedDict)):\n exec_time = sum([sum(times.values()) for times in output['time']])\n num_frames = len(output['time'])\n else:\n exec_time = sum(output['time'])\n num_frames = len(output['time'])\n\n print('FPS: {}'.format(num_frames / exec_time))\n\n if not debug:\n _save_tracker_output(seq, tracker, output)", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def warmup_resnet_imagenet_128_gpu_8_real(self):\n test_id = 'warmup_resnet_imagenet.gpu_8.128.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=128, gpus=8,\n repeat=1, total_batches=1300)\n self.run_test_suite(config)", "def benchmark_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, data_dir=self.fake_data_dir, data_name='imagenet')\n self._run_benchmark(params)", "def run(args):\n # CONFIG\n run_name = get_run_name(args)\n logger.info(f'*** Starting run {run_name} ***')\n data_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/finetune_data/{args.finetune_data}'\n output_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/runs/{run_name}'\n\n # Get configs\n pretrained_model_config_path = get_model_config_path(args)\n model_config = get_model_config(pretrained_model_config_path)\n\n # Meta data/label mapping\n input_meta_data = get_input_meta_data(data_dir)\n label_mapping = get_label_mapping(data_dir)\n logger.info(f'Loaded training data meta.json file: {input_meta_data}')\n\n # Calculate steps, warmup steps and eval steps\n train_data_size = input_meta_data['train_data_size']\n num_labels = input_meta_data['num_labels']\n max_seq_length = input_meta_data['max_seq_length']\n if args.limit_train_steps is None:\n steps_per_epoch = int(train_data_size / args.train_batch_size)\n else:\n steps_per_epoch = args.limit_train_steps\n warmup_steps = int(args.num_epochs * train_data_size * args.warmup_proportion/ args.train_batch_size)\n if args.limit_eval_steps is None:\n eval_steps = int(math.ceil(input_meta_data['eval_data_size'] / args.eval_batch_size))\n else:\n eval_steps = args.limit_eval_steps\n\n # some logging\n if args.init_checkpoint is None:\n logger.info(f'Finetuning on datset {args.finetune_data} using default pretrained model {args.model_class}')\n else:\n logger.info(f'Finetuning on datset {args.finetune_data} using pretrained model in {args.init_checkpoint} of type {args.model_class}')\n logger.info(f'Running {args.num_epochs} epochs with {steps_per_epoch:,} steps per epoch')\n logger.info(f'Using warmup proportion of {args.warmup_proportion}, resulting in {warmup_steps:,} warmup steps')\n logger.info(f'Using learning rate: {args.learning_rate}, training batch size: {args.train_batch_size}, num_epochs: {args.num_epochs}')\n\n # Get model\n classifier_model, core_model = get_model(args, model_config, steps_per_epoch, warmup_steps, num_labels, max_seq_length)\n optimizer = classifier_model.optimizer\n loss_fn = get_loss_fn(num_labels)\n try:\n if ',' in args.validation_freq:\n validation_freq = args.validation_freq.split(',')\n validation_freq = [int(v) for v in validation_freq]\n else:\n validation_freq = int(args.validation_freq)\n except:\n raise ValueError(f'Invalid argument for validation_freq!')\n logger.info(f'Using a validation frequency of {validation_freq}')\n\n # Restore checkpoint\n if args.init_checkpoint:\n checkpoint_path = f'gs://{args.bucket_name}/{args.project_name}/pretrain/runs/{args.init_checkpoint}'\n checkpoint = tf.train.Checkpoint(model=core_model)\n checkpoint.restore(checkpoint_path).assert_existing_objects_matched()\n logger.info(f'Successfully restored checkpoint from {checkpoint_path}')\n\n # Run keras compile\n logger.info(f'Compiling keras model...')\n classifier_model.compile(\n optimizer=optimizer,\n loss=loss_fn,\n metrics=get_metrics())\n logger.info(f'... done')\n\n # Create all custom callbacks\n summary_dir = os.path.join(output_dir, 'summaries')\n summary_callback = tf.keras.callbacks.TensorBoard(summary_dir, profile_batch=0)\n time_history_callback = keras_utils.TimeHistory(\n batch_size=args.train_batch_size,\n log_steps=args.time_history_log_steps,\n logdir=summary_dir)\n custom_callbacks = [summary_callback, time_history_callback]\n if args.save_model:\n logger.info('Using save_model option...')\n checkpoint_path = os.path.join(output_dir, 'checkpoint')\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)\n custom_callbacks.append(checkpoint_callback)\n if args.early_stopping_epochs > 0:\n logger.info(f'Using early stopping of after {args.early_stopping_epochs} epochs of val_loss not decreasing')\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(patience=args.early_stopping_epochs, monitor='val_loss')\n custom_callbacks.append(early_stopping_callback)\n\n # Generate dataset_fn\n train_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'train.tfrecords'),\n max_seq_length,\n args.train_batch_size,\n is_training=True)\n eval_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'dev.tfrecords'),\n max_seq_length,\n args.eval_batch_size,\n is_training=False)\n\n # Add mertrics callback to calculate performance metrics at the end of epoch\n performance_metrics_callback = Metrics(\n eval_input_fn,\n label_mapping,\n os.path.join(summary_dir, 'metrics'),\n eval_steps,\n args.eval_batch_size,\n validation_freq)\n custom_callbacks.append(performance_metrics_callback)\n\n # Run keras fit\n time_start = time.time()\n logger.info('Run training...')\n history = classifier_model.fit(\n x=train_input_fn(),\n validation_data=eval_input_fn(),\n steps_per_epoch=steps_per_epoch,\n epochs=args.num_epochs,\n validation_steps=eval_steps,\n validation_freq=validation_freq,\n callbacks=custom_callbacks,\n verbose=1)\n time_end = time.time()\n training_time_min = (time_end-time_start)/60\n logger.info(f'Finished training after {training_time_min:.1f} min')\n\n # Write training log\n all_scores = performance_metrics_callback.scores\n all_predictions = performance_metrics_callback.predictions\n if len(all_scores) > 0:\n final_scores = all_scores[-1]\n logger.info(f'Final eval scores: {final_scores}')\n else:\n final_scores = {}\n full_history = history.history\n if len(full_history) > 0:\n final_val_loss = full_history['val_loss'][-1]\n final_loss = full_history['loss'][-1]\n logger.info(f'Final training loss: {final_loss:.2f}, Final validation loss: {final_val_loss:.2f}')\n else:\n final_val_loss = None\n final_loss = None\n data = {\n 'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'run_name': run_name,\n 'final_loss': final_loss,\n 'final_val_loss': final_val_loss,\n 'max_seq_length': max_seq_length,\n 'num_train_steps': steps_per_epoch * args.num_epochs,\n 'eval_steps': eval_steps,\n 'steps_per_epoch': steps_per_epoch,\n 'training_time_min': training_time_min,\n 'data_dir': data_dir,\n 'output_dir': output_dir,\n 'all_scores': all_scores,\n 'all_predictions': all_predictions,\n 'num_labels': num_labels,\n 'label_mapping': label_mapping,\n **full_history,\n **final_scores,\n **vars(args),\n }\n # Write run_log\n f_path_training_log = os.path.join(output_dir, 'run_logs.json')\n logger.info(f'Writing training log to {f_path_training_log}...')\n save_to_json(data, f_path_training_log)\n # Write bert config\n model_config.id2label = label_mapping\n model_config.label2id = {v:k for k, v in label_mapping.items()}\n model_config.max_seq_length = max_seq_length\n model_config.num_labels = num_labels\n f_path_bert_config = os.path.join(output_dir, 'bert_config.json')\n logger.info(f'Writing BERT config to {f_path_bert_config}...')\n save_to_json(model_config.to_dict(), f_path_bert_config)", "def test_net_on_dataset(args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0, use_matlab = False, early_stop=False):\n\n \n # print(\"test_net_on_dataset\")\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n \n test_timer.tic()\n \n all_boxes = test_net(args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id, early_stop=early_stop)\n test_timer.toc()\n\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n\n roidb = dataset.get_roidb()\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES + 1\n final_boxes = empty_results(num_classes, num_images)\n test_corloc = 'train' in dataset_name\n \n\n all_cls_scores = {}\n\n for i, entry in enumerate(roidb):\n\n if early_stop and i > 10: break\n\n boxes = all_boxes[entry['image']]\n \n cls_key = entry['image'].replace('.jpg','').split('/')[-1]\n\n # print(cls_key)\n\n if boxes['scores'] is not None:\n if test_corloc:\n # print(\"corlooking\")\n _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'], boxes['boxes'])\n else:\n _, _, cls_boxes_i = box_results_with_nms_and_limit(boxes['scores'], boxes['boxes'])\n\n extend_results(i, final_boxes, cls_boxes_i)\n else:\n final_boxes = None\n \n results = task_evaluation.evaluate_all(dataset, final_boxes, output_dir, test_corloc, use_matlab = use_matlab)\n return results", "def run_vgg_experiment(args, device):\n validation_ratio, record_train_acc, record_val_acc, record_test_acc = utils.configure_training_mode(args)\n\n train_loader, validation_loader, test_loader = datasets.build_cifar10_loaders(args.batch_size,\n validation_ratio=validation_ratio,\n train_validation_split_seed=0)\n local_loss_list = utils.get_loss(args)\n nonlinearity = utils.get_nonlinearity(args)\n\n optimizer_local, local_opt_arguments_dict, local_scheduler_arguments_dict, \\\n optimizer_final, final_opt_arguments_dict, final_scheduler_arguments_dict = \\\n utils.choose_optimizers_and_parameters(args)\n\n conv_sizes = [128, 256, 256, 512, 512, 512]\n\n if args.vgg_conv_size_multiplier != 1:\n for i in range(len(conv_sizes)):\n conv_sizes[i] = conv_sizes[i] * args.vgg_conv_size_multiplier\n do_pooling = [False, True, False, True, True, True]\n\n if args.divisive_norm_conv:\n divisive_norm_list_conv = [networks.DivisiveNorm(args.divnorm_power, args.grouping_dim, args.grouped_var_delta)\n for i in range(len(conv_sizes))]\n else:\n divisive_norm_list_conv = None\n\n kernel_sizes = [3 for i in range(len(conv_sizes))]\n fc_layers = [1024]\n\n if args.divisive_norm_fc:\n divisive_norm_list_fc = [networks.DivisiveNorm(args.divnorm_power, args.grouping_dim,\n args.grouped_var_delta)\n for i in range(len(fc_layers))]\n else:\n divisive_norm_list_fc = None\n\n alt_feedback_type = None\n if args.feedback_alignment:\n alt_feedback_type = 'feedback_alignment'\n elif args.sign_symmetry:\n alt_feedback_type = 'sign_symmetry'\n\n net = networks.Network(nonlinearity, local_loss_list, optimizer_local,\n torch.optim.lr_scheduler.MultiStepLR, conv_sizes, kernel_sizes,\n do_pooling, fc_layers, 'max', 'CIFAR10', bias=False,\n local_opt_arguments_dict=local_opt_arguments_dict,\n local_scheduler_arguments_dict=local_scheduler_arguments_dict,\n dropout_p=args.dropout_p, batch_norm=args.batch_norm,\n divisive_norm_list_conv=divisive_norm_list_conv, divisive_norm_list_fc=divisive_norm_list_fc,\n spatial_dropout=args.spatial_dropout, alt_feedback_type=alt_feedback_type)\n\n net = net.to(device)\n print(net)\n\n final_loss = nn.CrossEntropyLoss()\n\n if args.backprop:\n final_opt = optimizer_final(net.parameters(), **final_opt_arguments_dict)\n compute_local_loss = False\n update_local_loss = False\n else:\n final_opt = optimizer_final(net.softmax_layer.parameters(), **final_opt_arguments_dict)\n compute_local_loss = True\n update_local_loss = True\n\n final_scheduler = torch.optim.lr_scheduler.MultiStepLR(final_opt, **final_scheduler_arguments_dict)\n\n train_acc, val_acc, test_acc = utils.train_network(\n net, device, final_loss, final_opt, final_scheduler, args.n_epochs, train_loader, validation_loader,\n test_loader, compute_local_loss=compute_local_loss, update_local_loss=update_local_loss,\n record_train_acc=record_train_acc, record_val_acc=record_val_acc, record_test_acc=record_test_acc,\n print_results=True, backprop_batch_manhattan=args.backprop_batch_manhattan)\n\n return train_acc, val_acc, test_acc", "def main(_) -> None:\n params = train_utils.parse_configuration(FLAGS)\n mode = FLAGS.mode\n model_dir = FLAGS.model_dir\n if 'train' in FLAGS.mode:\n # Pure eval modes do not output yaml files. Otherwise continuous eval job\n # may race against the train job for writing the same file.\n train_utils.serialize_config(params, model_dir)\n\n if FLAGS.seed is not None:\n logging.info('Setting tf seed.')\n tf.random.set_seed(FLAGS.seed)\n\n task = RankingTask(\n params=params.task,\n optimizer_config=params.trainer.optimizer_config,\n logging_dir=model_dir,\n steps_per_execution=params.trainer.steps_per_loop,\n name='RankingTask')\n\n enable_tensorboard = params.trainer.callbacks.enable_tensorboard\n\n strategy = distribute_utils.get_distribution_strategy(\n distribution_strategy=params.runtime.distribution_strategy,\n all_reduce_alg=params.runtime.all_reduce_alg,\n num_gpus=params.runtime.num_gpus,\n tpu_address=params.runtime.tpu)\n\n with strategy.scope():\n model = task.build_model()\n\n def get_dataset_fn(params):\n return lambda input_context: task.build_inputs(params, input_context)\n\n train_dataset = None\n if 'train' in mode:\n train_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.train_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n validation_dataset = None\n if 'eval' in mode:\n validation_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.validation_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n if params.trainer.use_orbit:\n with strategy.scope():\n checkpoint_exporter = train_utils.maybe_create_best_ckpt_exporter(\n params, model_dir)\n trainer = RankingTrainer(\n config=params,\n task=task,\n model=model,\n optimizer=model.optimizer,\n train='train' in mode,\n evaluate='eval' in mode,\n train_dataset=train_dataset,\n validation_dataset=validation_dataset,\n checkpoint_exporter=checkpoint_exporter)\n\n train_lib.run_experiment(\n distribution_strategy=strategy,\n task=task,\n mode=mode,\n params=params,\n model_dir=model_dir,\n trainer=trainer)\n\n else: # Compile/fit\n checkpoint = tf.train.Checkpoint(model=model, optimizer=model.optimizer)\n\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n if latest_checkpoint:\n checkpoint.restore(latest_checkpoint)\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n directory=model_dir,\n max_to_keep=params.trainer.max_to_keep,\n step_counter=model.optimizer.iterations,\n checkpoint_interval=params.trainer.checkpoint_interval)\n checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager)\n\n time_callback = keras_utils.TimeHistory(\n params.task.train_data.global_batch_size,\n params.trainer.time_history.log_steps,\n logdir=model_dir if enable_tensorboard else None)\n callbacks = [checkpoint_callback, time_callback]\n\n if enable_tensorboard:\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=model_dir,\n update_freq=min(1000, params.trainer.validation_interval),\n profile_batch=FLAGS.profile_steps)\n callbacks.append(tensorboard_callback)\n\n num_epochs = (params.trainer.train_steps //\n params.trainer.validation_interval)\n current_step = model.optimizer.iterations.numpy()\n initial_epoch = current_step // params.trainer.validation_interval\n\n eval_steps = params.trainer.validation_steps if 'eval' in mode else None\n\n if mode in ['train', 'train_and_eval']:\n logging.info('Training started')\n history = model.fit(\n train_dataset,\n initial_epoch=initial_epoch,\n epochs=num_epochs,\n steps_per_epoch=params.trainer.validation_interval,\n validation_data=validation_dataset,\n validation_steps=eval_steps,\n callbacks=callbacks,\n )\n model.summary()\n logging.info('Train history: %s', history.history)\n elif mode == 'eval':\n logging.info('Evaluation started')\n validation_output = model.evaluate(validation_dataset, steps=eval_steps)\n logging.info('Evaluation output: %s', validation_output)\n else:\n raise NotImplementedError('The mode is not implemented: %s' % mode)" ]
[ "0.63982433", "0.63317746", "0.6286564", "0.62145746", "0.6099212", "0.6017336", "0.59935385", "0.5987405", "0.59732693", "0.5968696", "0.59411806", "0.58942217", "0.58580977", "0.58526134", "0.5831714", "0.58247334", "0.5802177", "0.57978106", "0.5790498", "0.5788643", "0.57848793", "0.5765881", "0.5725443", "0.5709479", "0.5701298", "0.5694823", "0.56922585", "0.5683883", "0.56756234", "0.5645465" ]
0.6722839
0
Takes a results list and puts it in a pandas dataframe together with other relevant variables (runs, generations, and language class)
def language_stats_to_dataframe(results, n_runs, n_gens, possible_form_lengths): if len(possible_form_lengths) == 1: n_language_classes = 4 else: n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?) column_proportion = np.array(results) if n_language_classes == 4 and column_proportion.shape[2] > n_language_classes: column_proportion_compositional_summed = np.zeros((n_runs, n_gens, n_language_classes)) for r in range(len(column_proportion_compositional_summed)): for g in range(len(column_proportion_compositional_summed[0])): column_proportion_compositional_summed[r][g] = np.array([column_proportion[r][g][0], column_proportion[r][g][1], column_proportion[r][g][2]+column_proportion[r][g][3], column_proportion[r][g][4]]) column_proportion = column_proportion_compositional_summed.flatten() else: column_proportion = column_proportion.flatten() column_runs = [] for i in range(n_runs): for j in range(n_gens): for k in range(n_language_classes): column_runs.append(i) column_runs = np.array(column_runs) column_generation = [] for i in range(n_runs): for j in range(n_gens): for k in range(n_language_classes): column_generation.append(j) column_generation = np.array(column_generation) column_type = [] for i in range(n_runs): for j in range(n_gens): if len(possible_form_lengths) == 1: column_type.append('degenerate') column_type.append('holistic') column_type.append('compositional') column_type.append('other') else: column_type.append('degenerate') column_type.append('holistic') column_type.append('holistic_diversify_signal') column_type.append('compositional') column_type.append('compositional_reduplicate_segments') column_type.append('compositional_reduplicate_whole_signal') column_type.append('other') data = {'run': column_runs, 'generation': column_generation, 'proportion': column_proportion, 'class': column_type} lang_class_prop_over_gen_df = pd.DataFrame(data) return lang_class_prop_over_gen_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataframe(result):\n # List of elements in the search result\n names = []\n snippet = []\n url = []\n \n # Append search results to list\n for j,item in enumerate(result):\n for i,element in enumerate(result[j]['items']):\n names.append(result[j]['items'][i]['title'])\n snippet.append(result[j]['items'][i]['snippet'])\n url.append(result[j]['items'][i]['link'])\n \n # Create a dataframe\n df = pd.DataFrame(list(zip(names, snippet,url)), \n columns =['name', 'snippet','url']) \n \n return df", "def make_results_df(results):\n max_val = max(x[1] for x in results)\n\n df = []\n for i in range(max_val + 1):\n df.append([])\n for j in range(max_val + 1):\n df[-1].append(results.get((i, j), np.nan))\n return pd.DataFrame(df)", "def prepare_wg_data(results):\n wg_df = pd.DataFrame(results)\n wg_df['search_engine'] = 'wg-gesucht.de'\n return wg_df", "def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df", "def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df", "def collect_results( results_dir = \"experiments\" ) :\n #%%\n import pandas as pd\n exps_fn = os.listdir( results_dir )\n dics = []\n for fname in exps_fn :\n with open( results_dir + \"/\" + fname, \"rt\", encoding=\"utf8\" ) as f_out :\n dics.append( json.load( f_out ) )\n\n results_df = pd.DataFrame( dics )\n #%%\n return results_df", "def parseResults(result):\n # Split the results based on newline characters\n results_cut = result.text.split('\\n')[12:-49]\n # Initialize lists of the values to be parsed from results_cut \n visit_id = []\n name = []\n ra_hour = []\n ra_min = []\n ra_sec = []\n dec_deg = []\n dec_min = []\n dec_sec = []\n v_mag = []\n ra_motion = []\n dec_motion = []\n # Iterate through results_cut and append them to the respective lists\n for line in results_cut:\n visit_id.append(int(line[6:12]))\n name.append(line[12:36])\n ra_hour.append(int(line[38:40]))\n ra_min.append(int(line[41:43]))\n ra_sec.append(float(line[44:48]))\n dec_deg.append(int(line[49:52]))\n dec_min.append(int(line[53:55]))\n dec_sec.append(int(line[56:58]))\n try:\n v_mag.append(float(line[60:64]))\n except ValueError:\n # If there is no reported v_mag for the object, return -99\n v_mag.append(-99.0)\n ra_motion.append('%s%i' % (line[84], int(line[82:84])))\n dec_motion.append('%s%i' % (line[91], int(line[89:91])))\n # Initialize the pandas dataframe to be returned\n results_df = pd.DataFrame(np.array([visit_id, name, ra_hour, ra_min, ra_sec, \n dec_deg, dec_min, dec_sec, v_mag, \n ra_motion, dec_motion]).T, \n columns=['visit_id', 'name', 'ra_hour', 'ra_min', 'ra_sec', \n 'dec_deg', 'dec_min', 'dec_sec', 'v_mag', \n 'ra_motion', 'dec_motion'])\n # Add the lists to the dataframe\n results_df['visit_id'] = pd.to_numeric(results_df['visit_id'])\n results_df['ra_hour'] = pd.to_numeric(results_df['ra_hour'])\n results_df['ra_min'] = pd.to_numeric(results_df['ra_min'])\n results_df['ra_sec'] = pd.to_numeric(results_df['ra_sec'])\n results_df['dec_deg'] = pd.to_numeric(results_df['dec_deg'])\n results_df['dec_min'] = pd.to_numeric(results_df['dec_min'])\n results_df['dec_sec'] = pd.to_numeric(results_df['dec_sec'])\n results_df['v_mag'] = pd.to_numeric(results_df['v_mag'])\n results_df['ra_motion'] = pd.to_numeric(results_df['ra_motion'])\n results_df['dec_motion'] = pd.to_numeric(results_df['dec_motion'])\n \n return results_df", "def get_results(r):\n myDict = {}\n for name in r[\"results\"]:\n myDict[name[\"name\"]] = {\n \"rank\": name[\"rank\"],\n \"ticker\": name[\"ticker\"],\n \"upvotes\": name[\"upvotes\"],\n \"mentions\": name[\"mentions\"],\n \"mentions_24h_ago\": name[\"mentions_24h_ago\"],\n }\n df = pd.DataFrame.from_dict(myDict, orient=\"index\")\n df[\"rank\"] = df[\"rank\"].astype(int)\n df[\"upvotes\"] = df[\"upvotes\"].astype(int)\n df[\"mentions\"] = df[\"mentions\"].astype(int)\n df[\"mentions_24h_ago\"] = df[\"mentions_24h_ago\"].astype(int)\n\n df[\"delta_mentions_24h\"] = df[\"mentions\"] - df[\"mentions_24h_ago\"]\n df = df[~(df[\"upvotes\"] <= 1000)]\n df = df.sort_values(by=[\"delta_mentions_24h\"], ascending=False)\n return df", "def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results", "def analyzeResults(self):\n results = [self.analyzeClusterPerformance(c) for c in self.clusterLabels]\n rDF = pd.DataFrame(results)\n self.resultList.append(rDF)", "def interpret_results(rules):\n df_res = rules.sort_values(by=['lift'], ascending=False)\n # df_res.head()\n return df_res", "def check_results_as_data_frame(check_to_check_results: Dict[Check, CheckResult]) -> DataFrame:\n check_names = []\n status = []\n descriptions = []\n for check_result in check_to_check_results.values():\n check_names.append(check_result.check)\n status.append(check_result.status)\n descriptions.append(check_result.description)\n return DataFrame(zip(check_names, status, descriptions), columns=[\"check_name\", \"status\", \"description\"])", "def export_results(self):\n problemIDs = list(set([result.problemID for result in self.results]))\n configIDs = list(set([result.configID for result in self.results]))\n\n labels = []\n labels.extend(TestResults._fields)\n labels.extend(SizeMetrics._fields) \n # Remove unused columns\n labels.remove(\"size_metrics\")\n labels.remove(\"problemID\")\n labels.remove(\"configID\")\n\n # output = pd.Panel(items=labels, major_axis=problemIDs, minor_axis=configIDs)\n multiindex = pd.MultiIndex.from_product([problemIDs, configIDs], names=[\"problems\", \"configs\"])\n\n output = pd.DataFrame(index=multiindex, columns=labels)\n output.columns.names = [\"stats\"]\n\n for result in self.results:\n problemID = result.problemID\n configID = result.configID\n for label in [label for label in TestResults._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result, label)\n for label in [label for label in SizeMetrics._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result.size_metrics, label)\n\n # Compute Statistics\n output.fillna(value=np.nan, inplace=True)\n output.sort_index(inplace=True)\n try:\n TestFramework.compute_mosek_error(output, \"opt_val\", \"mosek_config\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_mosek_error: 'mosek_config' or 'opt_val' field not found.\")\n try:\n TestFramework.compute_performance(output, \"solve_time\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_performance: 'solve_time' field not found.\")\n return output", "def get_pandas(self):\n return pd.DataFrame(self.results)", "def results_to_df(ary, ks, ns):\n \n # create columns as dictionaries\n results = {}\n results['algorithm'] = ['knn' for i in range(ary.size / 4)] + ['cnn' for j in range(ary.size / 4)]\n results['sample_size'] = ns * (2 * len(ks))\n k = []\n for ii in range(len(ks)):\n k += [ks[ii] for jj in range(len(ns))]\n results['k'] = k + k\n results['run_time'] = ary[0].reshape(60)\n results['accuracy'] = ary[1].reshape(60)\n \n return pd.DataFrame(results)", "def concat_all_evaluation_results(list_of_folders):\n\n\n train_eval_df_list = []\n val_eval_df_list = []\n train_val_eval_df_list = []\n\n\n for item in list_of_folders:\n path_to_eval_folder = os.path.join(EMBEDDING_DEST, item)\n files = os.listdir(path_to_eval_folder)\n\n for f in files:\n\n # for each evaluation result csv file, see whether it is from training set, or validation set, or training+validation\n if f.endswith(\"image_level_evaluation_result_top_tri.csv\"):\n\n if \"random\" in f:\n if \"random_training_validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_val_eval_df_list.append(df)\n\n elif \"random_training\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_eval_df_list.append(df)\n\n\n elif \"random_validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n val_eval_df_list.append(df)\n\n\n else:\n if \"triplet\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_val_eval_df_list.append(df)\n\n elif \"training\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_eval_df_list.append(df)\n\n elif \"validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n val_eval_df_list.append(df)\n\n\n # add 'training_' or 'validation_' to the column names of evaluation results coming from training and validation sets.\n # This is to be able to distinguish them in the final general csv file.\n\n columns = list(train_val_eval_df_list[0])\n train_columns = [\"training_\"+item for item in columns[1:]]\n train_columns = [columns[0]] + train_columns\n train_columns_dict ={}\n \n val_columns = [\"validation_\"+item for item in columns[1:]]\n val_columns = [columns[0]] + val_columns\n val_columns_dict ={}\n\n #train_and_val_columns = [\"train_and_validation_\"+item for item in columns[1:]]\n #train_and_val_columns = [columns[0]] + train_and_val_columns\n #train_and_val_columns_dict ={}\n\n\n for i in range(len(columns)):\n train_columns_dict[columns[i]] = train_columns[i]\n val_columns_dict[columns[i]] = val_columns[i]\n #train_and_val_columns_dict[columns[i]] = train_and_val_columns[i]\n\n\n concatenated_training_df = pd.concat(train_eval_df_list, sort=False)\n concatenated_training_df = concatenated_training_df.rename(columns=train_columns_dict)\n\n concatenated_validation_df = pd.concat(val_eval_df_list, sort=False)\n concatenated_validation_df = concatenated_validation_df.rename(columns=val_columns_dict)\n \n concatenated_train_and_validation_df = pd.concat(train_val_eval_df_list, sort=False)\n #concatenated_train_and_validation_df = concatenated_train_and_validation_df.rename(columns=train_and_val_columns_dict)\n\n\n concatenated_training_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\", \"training_all_evaluation_result_top_tri.csv\"),index=None)\n concatenated_validation_df.to_csv(os.path.join(EMBEDDING_DEST, \"compare_with_no_sz\", \"validation_all_evaluation_result_top_tri.csv\"),index=None)\n concatenated_train_and_validation_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\",\"training_and_validation_all_evaluation_result_top_tri.csv\"), index=None)\n\n # ---------\n # If you have columns on arguments, keep them in training but drop them in validation and train_and_val to prevent duplicates\n list_of_cols_in_validation_df = list(concatenated_validation_df)\n list_of_cols_in_train_val_df = list(concatenated_train_and_validation_df)\n args_cols = get_json_argument_list()\n\n args_cols_val = [\"validation_\"+item for item in args_cols]\n \n if len(list_of_cols_in_train_val_df) == len(list_of_cols_in_validation_df) and len(list_of_cols_in_train_val_df) > 7:\n concatenated_validation_df = concatenated_validation_df.drop(args_cols_val, axis=1, errors='ignore')\n concatenated_train_and_validation_df = concatenated_train_and_validation_df.drop(args_cols, axis=1, errors='ignore')\n\n\n # ---------\n\n all_three_df_list = [concatenated_training_df, concatenated_validation_df, concatenated_train_and_validation_df]\n concatenated_all_df = pd.concat(all_three_df_list, axis=1)\n concatenated_all_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\", \"all_evaluation_result_top_tri.csv\"), index=None)", "def result_to_dataframe(data):\n letters, statistics = zip(*data)\n dataframe = pd.DataFrame(data=list(statistics), index=letters, columns=['SUM', 'SUM_OF_SQUARES', 'MAX', 'MIN', 'COUNT']).sort_index()\n dataframe['MEAN'] = dataframe['SUM'] / dataframe['COUNT']\n dataframe['VARIANCE'] = dataframe['SUM_OF_SQUARES'] / dataframe['COUNT'] - dataframe['MEAN']**2\n dataframe['STANDARD_DEVIATION'] = dataframe['VARIANCE']**0.5\n logging.info(\"Total datapoints read: {}.\".format(dataframe['COUNT'].sum()))\n return dataframe", "def get_results(self, methods: list = None):\n df = pd.DataFrame(self._results)\n if (methods is not None) & ('method' in df.columns):\n df = df.loc[[x in methods for x in df.method.values]]\n return df", "def run_tests():\n with open(FILENAME) as file:\n # Loads testing parameters from the yaml file.\n tests = yaml.safe_load(file)\n\n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results['Last Average Score'] = \"\"\n results['No of Q-Learning episodes'] = \"\"\n\n # run experiments:\n for i, test in enumerate(test_dict):\n grid = Rooms(test[\"env_size\"], testing=True)\n learning = QLearning(grid, test[\"gamma\"], test[\"alpha\"], test[\"agent_start_pos\"])\n e_greedy = Policy(\"e-greedy\", test[\"epsilon\"], test[\"decay\"])\n greedy = Policy(policy_type=\"greedy\")\n experiment = Experiments(grid, learning, greedy, test[\"iters\"],\n test[\"agent_start_pos\"], test[\"test_no\"])\n\n for session in range(test[\"iters\"]):\n learning.run_multiple_episodes(test[\"batch_episodes\"], e_greedy)\n mean_reward = experiment.run_experiments(test[\"exp_per_batch\"])\n\n results.loc[i,'Last Average Score'] = mean_reward\n results.loc[i,'No of Q-Learning episodes'] = (session + 1) * test[\"batch_episodes\"]\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n # plot & save graphs\n experiment.generate_results(test[\"test_no\"], test)\n\n return results", "def create_df_saved_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results[\"items\"]:\r\n try:\r\n track_name.append(items[\"track\"]['name'])\r\n track_id.append(items[\"track\"]['id'])\r\n artist.append(items[\"track\"][\"artists\"][0][\"name\"])\r\n duration.append(items[\"track\"][\"duration_ms\"])\r\n album.append(items[\"track\"][\"album\"][\"name\"])\r\n popularity.append(items[\"track\"][\"popularity\"])\r\n except TypeError: \r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n return df", "def save_to_dataframe(self):\n titles, years, months, days, authors = list(), list(), list(), list(), list()\n for doc in self.results[\"documents\"]:\n titles.append(doc['title'])\n years.append(doc['year'])\n months.append(doc['month'])\n days.append(doc['day'])\n authors.append(doc['authors'])\n return pd.DataFrame({\"title\": titles, \"years\": years, \"months\": months, \"days\": days, \"author\": authors})", "def read_results(\n self,\n model_run_names: list,\n model_names: list,\n output_names: list,\n timesteps: list = None,\n decisions: list = None,\n time_decision_tuples: list = None,\n ):\n\n self.validate_names(model_run_names, model_names, output_names)\n\n results_dict = self._store.get_results(\n model_run_names,\n model_names[0],\n output_names,\n timesteps,\n decisions,\n time_decision_tuples,\n )\n\n # Keep tabs on the units for each output\n for model_run_name in model_run_names:\n for output_name in output_names:\n res = results_dict[model_run_name][output_name]\n self._output_units[res.name] = res.unit\n\n # For each output, concatenate all requested model runs into a single data frame\n formatted_frames = []\n for output_name in output_names:\n # Get each DataArray as a pandas data frame and concatenate, resetting the index to\n # give back a flat data array\n list_of_df = [results_dict[x][output_name].as_df() for x in model_run_names]\n names_of_df = [x for x in results_dict.keys()]\n\n formatted_frames.append(\n pd.concat(\n list_of_df, keys=names_of_df, names=[\"model_run\"]\n ).reset_index()\n )\n\n # Append the other output columns to the first data frame\n formatted_frame = formatted_frames.pop(0)\n output_names.pop(0)\n\n for other_frame, output_name in zip(formatted_frames, output_names):\n assert (formatted_frame[\"model_run\"] == other_frame[\"model_run\"]).all()\n assert (\n formatted_frame[\"timestep_decision\"] == other_frame[\"timestep_decision\"]\n ).all()\n formatted_frame[output_name] = other_frame[output_name]\n\n # Unpack the timestep_decision tuples into individual columns and drop the combined\n formatted_frame[[\"timestep\", \"decision\"]] = pd.DataFrame(\n formatted_frame[\"timestep_decision\"].tolist(), index=formatted_frame.index\n )\n\n formatted_frame = formatted_frame.drop(columns=[\"timestep_decision\"])\n\n # Now reorder the columns. Want model_run then timestep then decision\n cols = formatted_frame.columns.tolist()\n\n assert cols[0] == \"model_run\"\n cols.insert(1, cols.pop(cols.index(\"timestep\")))\n cols.insert(2, cols.pop(cols.index(\"decision\")))\n assert cols[0:3] == [\"model_run\", \"timestep\", \"decision\"]\n\n return formatted_frame[cols]", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def save_results(self):\n results = pd.concat([\n pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)", "def _load_results(self):\n\n _LOG.debug(\"stats colnames: %s\", \", \".join(self._stats_colnames))\n _LOG.debug(\"additional colnames: %s\", \", \".join(self._more_colnames))\n\n for res in self.rsts:\n _LOG.debug(\"hover colnames: %s\", \", \".join(self._hov_colnames[res.reportid]))\n\n colnames = []\n for colname in self._hov_colnames[res.reportid] + self._more_colnames:\n if colname in res.colnames_set:\n colnames.append(colname)\n\n csel = Trivial.list_dedup(self._stats_colnames + colnames)\n res.clear_filts()\n res.set_csel(csel)\n res.load_df()\n\n # We'll be dropping columns and adding temporary columns, so we'll affect the original\n # dataframe. This is more effecient than creating copies.\n self._mangle_loaded_res(res)", "def _process_results(self):\n self.portfolio.create_backtest_result_dataframe()\n stats = self._show_stats()\n return stats", "def create_pandas_dataframes():\n train, test = Email.load_emails_from_data()\n\n train_y = [int(t.is_spam) for t in train]\n test_y = [int(t.is_spam) for t in test]\n\n vocab = get_vocabulary_vector(train)\n print(\"[ INF ] Vocab Size:\", len(vocab))\n\n train = [t.vectorize_tokens(vocab) for t in train]\n test = [t.vectorize_tokens(vocab) for t in test]\n\n train = pd.DataFrame.from_records(train, columns=vocab)\n test = pd.DataFrame.from_records(test, columns=vocab)\n\n train['is_spam'] = train_y\n test['is_spam'] = test_y\n\n return train, test", "def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df", "def write_results(self, results):\n predictions = open('hmm_results.csv', 'w')\n predictions.write(\"Type,Prediction\")\n for type in results:\n if type == 'O':\n continue\n predictions.write(\"\\n\" + str(type) + \",\")\n for interval in results[type]:\n predictions.write(str(interval) + \" \")\n predictions.close()" ]
[ "0.72283614", "0.71309644", "0.6913391", "0.6884854", "0.6688005", "0.65761745", "0.6487272", "0.6475751", "0.6433946", "0.632648", "0.6261752", "0.6226283", "0.6154", "0.6113945", "0.610395", "0.6074205", "0.6056309", "0.60420865", "0.6016165", "0.5957813", "0.59397256", "0.5927495", "0.59171116", "0.59171116", "0.59165233", "0.58962923", "0.58832914", "0.5858185", "0.58419687", "0.58330846" ]
0.73413634
0
Takes a pandas dataframe which contains the proportions of language classes over generations and plots timecourses
def plot_timecourse_language_types(lang_class_prop_over_gen_df, title, file_path, file_name): sns.set_style("darkgrid") sns.set_context("talk") fig, ax = plt.subplots() if len(possible_form_lengths) == 1: palette = sns.color_palette(["black", "red", "green", "grey"]) else: palette = sns.color_palette(["black", sns.color_palette("colorblind")[3], sns.color_palette("colorblind")[1], sns.color_palette("colorblind")[2], sns.color_palette("colorblind")[9], sns.color_palette("colorblind")[0], sns.color_palette("colorblind")[7]]) sns.lineplot(x="generation", y="proportion", hue="class", data=lang_class_prop_over_gen_df, palette=palette) # sns.lineplot(x="generation", y="proportion", hue="class", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style="bars") plt.tick_params(axis='both', which='major', labelsize=18) plt.tick_params(axis='both', which='minor', labelsize=18) plt.ylim(-0.05, 1.05) plt.title(title, fontsize=22) plt.xlabel('Generation', fontsize=20) plt.ylabel('Mean proportion', fontsize=20) handles, labels = ax.get_legend_handles_labels() labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O'] # ax.legend(handles=handles[1:], labels=labels[1:]) ax.legend(handles=handles, labels=labels) plt.tight_layout() plt.savefig(file_path + "Timecourse_plot_lang_types_" + file_name + ".png") plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_word_class_pr_genre(df):\n df['nouns'] = df['nouns'] * 100\n df['verbs'] = df['verbs'] * 100\n df['adverbs'] = df['adverbs'] * 100\n # plotting nouns\n plotting_helper_method('nouns', 'genre', df)\n plt.title('Amount of nouns pr song pr. genre')\n plt.xlabel(\"Amount of nouns in each song\")\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/nouns_pr_genre_plot')\n\n # plotting verbs\n plotting_helper_method('verbs', 'genre', df)\n plt.title('Amount of verbs pr song pr. genre')\n plt.xlabel('Amount of verbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/verbs_pr_genre_plot')\n\n # plotting adverbs\n plotting_helper_method('adverbs', 'genre', df)\n plt.title('Amount of adverbs pr song pr. genre')\n plt.xlabel('Amount of adverbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/adverbs_pr_genre_plot')", "def dataframe_to_language_stats(dataframe, n_runs, n_batches, n_gens, possible_form_lengths):\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n proportion_column = np.array(dataframe['proportion'])\n proportion_column_as_results = proportion_column.reshape((n_runs*n_batches, n_gens, n_language_classes))\n return proportion_column_as_results", "def plot_pie_charts_of_word_class_distribution(df):\n genre_dict = {\n 'g':'Rock',\n 'b':'Hip-Hop',\n 'r':'Pop'\n }\n for _, genre in genre_dict.items():\n filtered_df = df[df['genre'] == genre]\n \n # plotting circle diagram for the specific genre\n avg_percentage_nouns = filtered_df['nouns'].mean()\n avg_percentage_verbs = filtered_df['verbs'].mean()\n avg_percentage_adverbs = filtered_df['adverbs'].mean()\n\n total = avg_percentage_nouns + avg_percentage_nouns + avg_percentage_nouns\n nouns = avg_percentage_nouns / total * 100\n verbs = avg_percentage_verbs / total * 100\n adverbs = avg_percentage_adverbs / total * 100\n\n # Pie chart\n labels = ['Nouns', 'Verbs', 'Adverbs']\n sizes = [nouns, verbs, adverbs]\n\n _, ax1 = plt.subplots()\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n # Equal aspect ratio ensures that pie is drawn as a circle\n ax1.axis('equal') \n plt.tight_layout()\n plt.title(f'Circle diagram of the genre \"{genre}\"s average word classes distribution')\n plt.show()\n # plt.savefig(f'src/visualization/feature_plots/{genre}_word_class_distribution')", "def plot_barplot_language_types(lang_class_prop_over_gen_df, title, file_path, file_name, n_runs, n_batches, n_gens, gen_start, lang_class_baselines_all, lang_class_baselines_fully_expressive, possible_form_lengths):\n\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n\n proportion_column_as_results = dataframe_to_language_stats(lang_class_prop_over_gen_df, n_runs, n_batches, n_gens, possible_form_lengths)\n\n proportion_column_from_start_gen = proportion_column_as_results[:, gen_start:]\n\n proportion_column_from_start_gen = proportion_column_from_start_gen.flatten()\n\n runs_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n for k in range(n_language_classes):\n runs_column_from_start_gen.append(i)\n runs_column_from_start_gen = np.array(runs_column_from_start_gen)\n\n generation_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n for k in range(n_language_classes):\n generation_column_from_start_gen.append(j)\n generation_column_from_start_gen = np.array(generation_column_from_start_gen)\n\n class_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n if n_language_classes == 4:\n class_column_from_start_gen.append('degenerate')\n class_column_from_start_gen.append('holistic')\n class_column_from_start_gen.append('compositional')\n class_column_from_start_gen.append('other')\n elif n_language_classes == 7:\n class_column_from_start_gen.append('D')\n class_column_from_start_gen.append('H')\n class_column_from_start_gen.append('H+Div.')\n class_column_from_start_gen.append('C')\n class_column_from_start_gen.append('C+Red.-part')\n class_column_from_start_gen.append('C+Red.-whole')\n class_column_from_start_gen.append('O')\n\n new_data_dict = {'run': runs_column_from_start_gen,\n 'generation': generation_column_from_start_gen,\n 'proportion': proportion_column_from_start_gen,\n 'class': class_column_from_start_gen}\n\n lang_class_prop_over_gen_df_from_starting_gen = pd.DataFrame(new_data_dict)\n\n if len(possible_form_lengths) == 1:\n palette = sns.color_palette([\"black\", \"red\", \"green\", \"grey\"])\n else:\n palette = sns.color_palette([\"black\",\n sns.color_palette(\"colorblind\")[3],\n sns.color_palette(\"colorblind\")[1],\n sns.color_palette(\"colorblind\")[2],\n sns.color_palette(\"colorblind\")[9],\n sns.color_palette(\"colorblind\")[0],\n sns.color_palette(\"colorblind\")[7]])\n\n sns.barplot(x=\"class\", y=\"proportion\", data=lang_class_prop_over_gen_df_from_starting_gen, palette=palette)\n\n # plt.axhline(y=lang_class_baselines_all[0], xmin=0.0, xmax=0.25, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[1], xmin=0.25, xmax=0.5, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[2], xmin=0.5, xmax=0.75, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[3], xmin=0.75, xmax=1.0, color='k', linestyle='--', linewidth=2)\n #\n # if title == 'Mutual Understanding Only' or title == 'Minimal Effort & Mutual Understanding':\n # plt.axhline(y=lang_class_baselines_fully_expressive[0], xmin=0.25, xmax=0.5, color='0.6', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_fully_expressive[1], xmin=0.5, xmax=0.75, color='0.6', linestyle='--', linewidth=2)\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n # plt.xlabel('Language class')\n plt.xlabel('', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n plt.tight_layout()\n\n if holistic_without_partial_meaning is True:\n plt.savefig(file_path + \"Barplot_lang_types_\" + file_name + \"_burn_in_\" + str(gen_start) + \".png\")\n else:\n plt.savefig(file_path + \"Barplot_lang_types_\" + file_name + \"_burn_in_\" + str(gen_start) + \"_NEW.png\")\n plt.show()", "def leitner_proportions(df):\n denom = df.shape[0]\n prop_dict = {}\n\n for i in range(1,6):\n df_i = df[df['comfort_level'] == i]\n numer = df_i.shape[0]\n prop_dict[i] = numer / denom\n\n prop_df = pd.DataFrame.from_dict([prop_dict], orient='columns') \n\n prop_df = prop_df.T.rename(columns={0:'proportion'}) \n \n return prop_df", "def getFigBysubClass(df, path, nameClass):\n\ttmp = pd.DataFrame()\n\ttmp = tmp.append(df)\n\tdicoNbTrClass = countTranscript.getFig3Percent(path)\n\tdicoNbTrBt = countTranscript.getFig5Percent(path)\n\tdel tmp['nuclA']\n\tdel tmp['nuclT']\n\tdel tmp['nuclN']\n\tdel tmp['Type']\n\tclassDf = pd.DataFrame()\n\tclassDftmp = tmp[ tmp.Class == nameClass]\n\tgroups = classDftmp.groupby('Biotype')\n\tfor name, group in groups:\n\t\tgroupFilter = group[ group.Location == 'intron' ]\n\t\tgroupFilter = groupFilter.append( group[ group.Location == 'exon' ])\n\t\trow = sumSubTable(groupFilter, name)\n\t\trow['Biotype'] = name\n\t\trow['Class'] = nameClass\n\t\tif name not in dicoNbTrBt['Tot']:\n\t\t\tdicoNbTrBt['Tot'][name] = 0\n\t\tif name not in dicoNbTrBt['Wt']:\n\t\t\tdicoNbTrBt['Wt'][name] = 0\n\t\tif name not in dicoNbTrBt['Shuf']:\n\t\t\tdicoNbTrBt['Shuf'][name] = 0\n\t\trow['nbTr'] = dicoNbTrBt['Tot'][name]\n\t\trow['NbTrpG4Wt'] = dicoNbTrBt['Wt'][name]\n\t\trow['NbTrpG4Shuf'] = dicoNbTrBt['Shuf'][name]\n\t\trow.update(computePercent(row))\n\t\trow = pd.DataFrame(row, index=[len(classDftmp)+1])\n\t\tclassDf = classDf.append(row)\n\trow = {'Class' : nameClass,\n\t\t\t'Biotype' : nameClass,\n\t\t\t'nuclG' : sum(classDftmp.nuclG),\n\t\t\t'nuclC' : sum(classDftmp.nuclC),\n\t\t\t'nbTr' : dicoNbTrClass['Tot'][nameClass],\n\t\t\t'NbpG4rWt' : sum(classDftmp.NbpG4rWt),\n\t\t\t'NbpG4rShuf' : sum(classDftmp.NbpG4rShuf),\n\t\t\t'NbTrpG4Wt' : dicoNbTrClass['Wt'][nameClass],\n\t\t\t'NbTrpG4Shuf' : dicoNbTrClass['Shuf'][nameClass],\n\t\t\t'Tot' : sum(classDftmp.Tot)}\n\trow.update(computePercent(row))\n\trow = pd.DataFrame(row, index=[len(classDf)+1])\n\tclassDf = classDf.append(row)\n\tclassDf = computeDensity(classDf, 'Segment')\n\treturn classDf", "def language_stats_to_dataframe(results, n_runs, n_gens, possible_form_lengths):\n\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n\n column_proportion = np.array(results)\n\n if n_language_classes == 4 and column_proportion.shape[2] > n_language_classes:\n column_proportion_compositional_summed = np.zeros((n_runs, n_gens, n_language_classes))\n for r in range(len(column_proportion_compositional_summed)):\n for g in range(len(column_proportion_compositional_summed[0])):\n column_proportion_compositional_summed[r][g] = np.array([column_proportion[r][g][0], column_proportion[r][g][1], column_proportion[r][g][2]+column_proportion[r][g][3], column_proportion[r][g][4]])\n column_proportion = column_proportion_compositional_summed.flatten()\n\n else:\n column_proportion = column_proportion.flatten()\n\n column_runs = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_runs.append(i)\n column_runs = np.array(column_runs)\n\n column_generation = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_generation.append(j)\n column_generation = np.array(column_generation)\n\n column_type = []\n for i in range(n_runs):\n for j in range(n_gens):\n if len(possible_form_lengths) == 1:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('compositional')\n column_type.append('other')\n else:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('holistic_diversify_signal')\n column_type.append('compositional')\n column_type.append('compositional_reduplicate_segments')\n column_type.append('compositional_reduplicate_whole_signal')\n column_type.append('other')\n\n data = {'run': column_runs,\n 'generation': column_generation,\n 'proportion': column_proportion,\n 'class': column_type}\n\n lang_class_prop_over_gen_df = pd.DataFrame(data)\n\n return lang_class_prop_over_gen_df", "def grid_plot_google(proverbs_list, data, dim = (4,4), ylog = False): \n\n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0], dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2)\n \n \n res = None\n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize=14)\n fig.text(0.02, 0.5, 'Frequency among all volumes in Google Books', va='center', rotation='vertical', fontsize=14)\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i].lower()),horizontalalignment='left', transform=ax.transAxes)\n\n ts = data[data.proverb ==proverbs_list[i]]\n ts = ts[data.year >= 1800]\n ts.year = pd.to_datetime(ts.year, format = '%Y', errors='coerce')\n ts.index = ts.year\n ts = ts.sort_index()\n ts = ts.reindex(pd.date_range('01/01/1800', '01/01/2019', freq = 'AS'), fill_value=0)\n #get 5-year rolling average\n ts2 = ts.copy()\n ts2 = ts2.rolling(window = 5).mean()\n print(ts)\n\n if res != None:\n ts = ts.resample(res).sum()\n \n if ylog == False:\n pass\n\n elif ylog == True:\n ax.set_yscale('log') \n \n ax.plot(ts.index, ts['vol_norm'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['vol_norm'], alpha = 0.9, color='darkorange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def plot_timecourse_repair_counts(repair_counts_over_gen_df, title, file_path, file_name):\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n fig, ax = plt.subplots()\n\n palette = sns.color_palette(\"colorblind\")\n\n sns.lineplot(x=\"generation\", y=\"independent_repair_proportion\", data=repair_counts_over_gen_df, palette=palette)\n # sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style=\"bars\")\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n plt.xlabel('Generation', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n # handles, labels = ax.get_legend_handles_labels()\n #\n # labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O']\n\n # # ax.legend(handles=handles[1:], labels=labels[1:])\n # ax.legend(handles=handles, labels=labels)\n plt.tight_layout()\n plt.savefig(file_path + \"Timecourse_plot_repairs_\" + file_name + \".png\")\n plt.show()", "def proportions_visualiser(\n df: pd.core.frame.DataFrame,\n colum_name: str = \"Sensor Glucose (mg/dL)\",\n limits: Dict[str, int] = {\"low\": 70, \"high\": 180},\n windows: Dict[str, int] = {\"weekly\": 7, \"monthly\": 30},\n kind: str = \"TIR\",\n) -> NoReturn:\n\n valid_kinds = [\"TIR\", \"TBR\", \"TAR\"]\n\n if \"low\" not in limits.keys() or \"high\" not in limits.keys():\n raise Exception(f\"limits.keys() should be ['low', 'high'] not {limits.keys()}\")\n\n titles = {\n \"TIR\": f\"Time In Range [{limits['low']},{limits['high']})\",\n \"TAR\": f\"Time Above Range >= {limits['high']}\",\n \"TBR\": f\"Time Below Range < {limits['low']}\",\n }\n\n kind = kind.upper()\n if kind not in valid_kinds:\n raise Exception(\n f\"Invalid kind `{kind}`, select one from {valid_kinds} or refer to help({self.__name__})\"\n )\n\n TIR = (\n lambda y: 100\n * y[(y >= limits[\"low\"]) & (y < limits[\"high\"])].count()\n / y.count()\n )\n TBR = lambda y: 100 * y[(y < limits[\"low\"])].count() / y.count()\n TAR = lambda y: 100 * y[(y >= limits[\"high\"])].count() / y.count()\n\n _proportions = df[colum_name].groupby(df.index.date).apply(eval(f\"{kind}\"))\n\n _proportions.plot(**{\"label\": \"daily\"})\n\n for key, value in windows.items():\n _ax = _proportions.rolling(value).mean().plot(**{\"label\": key})\n\n _mean_proportion = _proportions.mean()\n plt.ylabel(\"Percentage\")\n plt.axhline(\n _mean_proportion,\n **{\"label\": f\"mean = {round(_mean_proportion,1)}\", \"c\": \"blue\"},\n )\n plt.legend()\n plt.title(titles[kind])", "def visualize(X: pd.DataFrame, y: pd.DataFrame) -> None:\r\n y[\"Action\"].value_counts().plot.pie(explode=(0.02, 0.04, 0.05, 0.09), title=\"Proportion of classes in dataset\")\r\n plt.savefig(\"Figures/proportions\")\r\n\r\n for i, column in enumerate(X.columns):\r\n fig, ax = plt.subplots(1, 2)\r\n\r\n ax[0].hist(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[0].set_xlabel(column)\r\n ax[0].set_ylabel(\"Frequency\")\r\n\r\n ax[1].boxplot(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[1].set_xlabel(\"Action\")\r\n ax[1].set_ylabel(column)\r\n\r\n X[column].hist(by=y[\"Action\"])\r\n\r\n ax[0].legend([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n ax[1].set_xticklabels([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n fig.suptitle(\"Distribution of classes among attributes\")\r\n plt.savefig(\"Figures/boxplots\")", "def makeComparsionChart(columns, data):\n fig = plt.figure(figsize=(16, 10))\n gs = gridspec.GridSpec(2, 3, wspace = 0.2, hspace=0.2, right=0.96, left=0.04)\n ax1 = plt.subplot(gs[0, 0:1], label=\"\")\n ax2 = plt.subplot(gs[0, 1:2], label=\"\" )\n ax3 = plt.subplot(gs[0, 2:3], label=\"\" )\n ax4 = plt.subplot(gs[1, 0:1], label=\"\" )\n ax5 = plt.subplot(gs[1, 1:2], label=\"\" )\n ax1.set_title('Before Scaling')\n ax2.set_title('After Standard Scaler')\n ax3.set_title('After Min-Max Scaler')\n ax4.set_title('After Roboust Scaler')\n ax5.set_title('After Normalization')\n\n for column in columns:\n sns.kdeplot(data[0][column], ax=ax1, legend=False)\n sns.kdeplot(data[1][column], ax=ax2, legend=False)\n sns.kdeplot(data[2][column], ax=ax3, legend=False)\n sns.kdeplot(data[3][column], ax=ax4, legend=False)\n sns.kdeplot(data[4][column], ax=ax5, legend=False)\n\n plt.show()", "def samplecost(app, endclasses, fxnmode, samptype='std', title=\"\"):\n associated_scens=[]\n for phase in app.phases:\n associated_scens = associated_scens + app.scenids.get((fxnmode, phase), [])\n costs = np.array([endclasses[scen]['cost'] for scen in associated_scens])\n times = np.array([time for phase, timemodes in app.sampletimes.items() if timemodes for time in timemodes if fxnmode in timemodes.get(time)] ) \n rates = np.array(list(app.rates_timeless[fxnmode].values()))\n \n tPlot, axes = plt.subplots(2, 1, sharey=False, gridspec_kw={'height_ratios': [3, 1]})\n phasetimes_start =[times[0] for phase, times in app.phases.items()]\n phasetimes_end =[times[1] for phase, times in app.phases.items()]\n ratetimes =[]\n ratesvect =[]\n phaselocs = []\n for (ind, phasetime) in enumerate(phasetimes_start):\n axes[0].axvline(phasetime, color=\"black\") \n phaselocs= phaselocs +[(phasetimes_end[ind]-phasetimes_start[ind])/2 + phasetimes_start[ind]]\n\n axes[1].axvline(phasetime, color=\"black\") \n ratetimes = ratetimes + [phasetimes_start[ind]] + [phasetimes_end[ind]]\n ratesvect = ratesvect + [rates[ind]] + [rates[ind]]\n #axes[1].text(middletime, 0.5*max(rates), list(app.phases.keys())[ind], ha='center', backgroundcolor=\"white\")\n #rate plots\n axes[1].set_xticks(phaselocs)\n axes[1].set_xticklabels(list(app.phases.keys()))\n \n axes[1].plot(ratetimes, ratesvect)\n axes[1].set_xlim(phasetimes_start[0], phasetimes_end[-1])\n axes[1].set_ylim(0, np.max(ratesvect)*1.2 )\n axes[1].set_ylabel(\"Rate\")\n axes[1].set_xlabel(\"Time (\"+str(app.units)+\")\")\n axes[1].grid()\n #cost plots\n axes[0].set_xlim(phasetimes_start[0], phasetimes_end[-1])\n axes[0].set_ylim(0, 1.2*np.max(costs))\n if samptype=='fullint':\n axes[0].plot(times, costs, label=\"cost\")\n else:\n if samptype=='quadrature' or samptype=='pruned piecewise-linear': \n sizes = 1000*np.array([weight if weight !=1/len(timeweights) else 0.0 for phase, timeweights in app.weights[fxnmode].items() for time, weight in timeweights.items() if time in times])\n axes[0].scatter(times, costs,s=sizes, label=\"cost\", alpha=0.5)\n axes[0].stem(times, costs, label=\"cost\", markerfmt=\",\", use_line_collection=True)\n \n axes[0].set_ylabel(\"Cost\")\n axes[0].grid()\n if title: axes[0].set_title(title)\n elif type(fxnmode[0])==tuple: axes[0].set_title(\"Cost function of \"+str(fxnmode)+\" over time\")\n else: axes[0].set_title(\"Cost function of \"+fxnmode[0]+\": \"+fxnmode[1]+\" over time\")\n #plt.subplot_adjust()\n plt.tight_layout()", "def costovertime(endclasses, app, costtype='expected cost'):\n costovertime = cost_table(endclasses, app)\n plt.plot(list(costovertime.index), costovertime[costtype])\n plt.title('Total '+costtype+' of all faults over time.')\n plt.ylabel(costtype)\n plt.xlabel(\"Time (\"+str(app.units)+\")\")\n plt.grid()", "def grid_plot_nyt(proverbs_list, data, dim = (4,4), res = '1M'):\n \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0], dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.3, hspace = 0.2)\n \n\n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize=14)\n fig.text(0.02, 0.5, 'Frequency among all articles in NYT', va='center', rotation='vertical', fontsize=14)\n \n #get month resolution\n ts = data.copy()\n resamp = ts.resample(res).sum()\n resamp = resamp.div(resamp['total'], axis =0)\n ts = resamp\n \n #get year resolution\n ts2 = data.copy()\n resamp = ts.resample('1Y').sum()\n resamp = resamp.div(resamp['total'], axis =0)\n ts2 = resamp\n \n #make each plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n\n print(ts[proverbs_list[i]])\n ax.plot(ts.index, ts[proverbs_list[i]], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2[proverbs_list[i]], alpha = 0.9, color = 'orange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def dashboard(df):\n panamax = (df.loc[:, \"Class\"] == \"Panamax\")\n post_panamax = (df.loc[:, \"Class\"] == \"Post-Panamax\")\n nearshore = (df.loc[:, \"Location\"] == \"Nearshore\")\n offshore = (df.loc[:, \"Location\"] == \"Offshore\")\n inbound = (df.loc[:, \"Course Behavior\"] == \"Inbound\")\n outbound = (df.loc[:, \"Course Behavior\"] == \"Outbound\")\n dat = {\"Proportion<br>of Transits\":[\n str(round(sum(panamax) / len(df) * 100, 2)) + \"%\",\n str(round(sum(post_panamax) / len(df) * 100, 2)) + \"%\", \"100%\"\n ],\n \"Compliance<br>Rate\":[\n str(round(sum(panamax & (df.loc[:, \"VSPD kn\"] <= 10)) /\n sum(panamax) * 100, 2)) + \"%\",\n str(round(sum(post_panamax & (df.loc[:, \"VSPD kn\"] <= 10)) /\n sum(post_panamax) * 100, 2)) + \"%\",\n str(round(sum(df.loc[:, \"VSPD kn\"] <= 10) / len(df) * 100, 2)) + \"%\"\n ],\n \"Mean<br>VSPD\":[\n str(round(df[panamax].loc[:, \"VSPD kn\"].mean(), 2)) + \" kn\",\n str(round(df[post_panamax].loc[:, \"VSPD kn\"].mean(), 2)) + \" kn\",\n str(round(df.loc[:, \"VSPD kn\"].mean(), 2)) + \" kn\"\n ],\n \"Nearshore<br>Median VSPD\":[\n str(round(df[nearshore & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[nearshore & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[nearshore].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"Offshore<br>Median VSPD\":[\n str(round(df[offshore & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[offshore & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[offshore].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"Inbound<br>Median VSPD\":[\n str(round(df[inbound & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[inbound & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[inbound].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"Outbound<br>Median VSPD\":[\n str(round(df[outbound & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[outbound & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[outbound].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"VSPD-WSPD<br>Correlation\":[\n str(round(df[panamax].dropna().loc[:, (\"VSPD kn\", \"WSPD mph\")].corr()\n .iloc[0][1], 2)),\n str(round(df[post_panamax].dropna().loc[:,\n (\"VSPD kn\", \"WSPD mph\")].corr().iloc[0][1], 2)),\n str(round(df.dropna().loc[:,\n (\"VSPD kn\", \"WSPD mph\")].corr().iloc[0][1], 2))\n ]\n }\n index = [\"Panamax\", \"Post-Panamax\", \"Combined\"]\n return pd.DataFrame(dat, index)", "def trip_duration_stats(df):", "def forebears (WFROM,WTO,efrom, eto, g=25):\n \n c.execute(\"\"\"\n SELECT wyear, eyear, count (eyear), wnationality\n FROM clean \n WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)\n AND WYEAR >= ? and WYEAR <= ? \n AND eyear >= ? AND eyear <= ? \n GROUP BY wyear, eyear\n ORDER BY wyear, eyear\"\"\", (WFROM, WTO, efrom, eto))\n\n years = c.fetchall()\n epigraphtotal = sum (s for (x,y,s,n) in years)\n #plt.xlim(WFROM, WTO)\n #plt.ylim(100, -1500)\n #colors = list(mcolors.TABLEAU_COLORS.keys()) *20\n #print(colors)\n \n \n gen =dd(lambda: dd(int))\n gentotal= dd(int)\n for (x,y,s,n) in years:\n gen[generation(x,g)][generation(y-x,g)] += 1\n gentotal[generation(x,g)] +=1\n \n for x in gen:\n for y in gen[x]:\n print(x, y, gen[x][y], gentotal[x])\n\n \n\n plt.figure(figsize=(10, 5))\n ax=plt.axes()\n\n\n #df.plot(colormap=gray) \n cumtotal = [0]*len(gen)\n\n for d in range(0,-200, -1):\n #for d in range(min(gen.keys()),max(gen.keys()),-1):\n xv = list(gen.keys())\n yv = [rat(gen[x][d],gentotal[x]) for x in xv]\n plt.bar(xv, yv, bottom=cumtotal,\n tick_label=[x*g for x in xv])\n cumtotal = [x + y for x, y in zip(yv, cumtotal)]\n #colors.pop()\n #print(d, cumtotal)\n plt.xlabel('Year of Work (in generations)')\n plt.ylabel(f'Share of Distance to forebear (in {g} year generations)')\n plt.title(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')\n plt.savefig(f\"figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png\")\n plt.close()", "def plot_df(data_frame):\n plt.figure(figsize = (10, 5))\n chart = sns.countplot(data_frame['label'], \n palette=\"Set1\"\n )\n plt.show()", "def visualize_timecourses_grid(timecourses_cols, cols_names, out_filename, hgap=20, vgap=20):\n\n\tnframes, ncomponents = timecourses_cols[0].shape\n\tncols = len(timecourses_cols)\n\n\tf, axarr = pyplot.subplots(ncomponents, ncols)\n\n\t# fig = pyplot.figure(figsize=(4, 2))\n\n\tthemin = min([timecourses_cols[col].min() for col in range(ncols)])\n\tthemax = min([timecourses_cols[col].max() for col in range(ncols)])\n\n\tfor k in range(ncomponents):\n\t\tfor col in range(ncols):\n\t\t\taxarr[k, col].plot(timecourses_cols[col][:,k])\n\n\t\t\tif k == 0:\n\t\t\t\taxarr[k, col].set_title(cols_names[col])\n\n\t\t\taxarr[k, col].tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')\n\t\t\taxarr[k, col].set_ylim(themin, themax)\n\t\t\taxarr[k, col].set_xticklabels([])\n\t\t\taxarr[k, col].set_yticklabels([])\n\n\tpyplot.tight_layout()\n\tpyplot.savefig(out_filename)\n\tpyplot.close(f)", "def proportion_with_cardinals(df, PATH):\n \n df_test = df.copy()\n df_test['cardinal'] = df.title.apply(contains_cardinal)\n\n click = df_test[df_test.target == 1]\n non = df_test[df_test.target == 0]\n click = click.groupby(['cardinal']).target.count()\n non = non.groupby(['cardinal']).target.count()\n \n non = non[1]/non[0] * 100\n click = click[1]/click[0] * 100\n # plot the results\n fig, ax = plt.subplots(figsize=(12,6))\n sns.barplot(x=['Normal', \"Clickbait\"], y=[non, click], ax=ax)\n plt.title(\"Percent of Titles Containing Cardinal Numbers\", size = 24)\n plt.xlabel(\"Article Class\", size=24)\n plt.ylabel(\"Percent %\", size = 24)\n plt.ylim(0, 100)\n plt.xticks([0,1], label=[\"Normal\", \"Clickbait\"], size=24)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n \n return ax", "def inst_class_stats(df, col='num_pkts'):\n classes = df.groupby('class_label')\n stat = classes[col].describe()\n return stat", "def graph_course(self):\n group = self.__data[\"filted_general_groupby\"]\n graph = {}\n if self.analysis[\"courses\"] is None:\n self.courses_list()\n\n # inicializa o dicionario que vai guardar o grafico\n for course in self.analysis[\"courses\"].index:\n graph[course] = []\n\n for i in range(18):\n min_v = i * 5\n max_v = min_v + 4.99\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n min_v = 95\n max_v = 100\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n self.analysis[\"graph_course\"] = graph", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def context_study_stats(frame_path=METRICS_DIR+'/merge.csv'):\n frame = pd.read_csv(frame_path)\n print(frame['LOC_prod'].mean())\n print(frame['LOC_prod'].sum())\n print(frame['LOC_test'].sum())\n print(frame['no_mutations'].sum())\n print(frame.shape[0])\n\n sizes = frame.groupby('project').size()\n prod = frame.groupby('project')['LOC_prod'].sum( )\n test = frame.groupby('project')['LOC_test'].sum()\n mutants = frame.groupby('project')['no_mutations'].sum()\n\n result = pd.DataFrame({'project': list(sizes.index),\n 'size': list(sizes),\n 'prod': list(prod),\n 'test': list(test),\n 'mutants': list(mutants)},\n columns=['project', 'size', 'prod', 'test', 'mutants'])\n print(result.to_latex())", "def makePdf(sources):\n pdf = PdfPages(\"sample_features.pdf\")\n classnames = []\n classname_dict = {}\n x = 2 # number of subplot columns\n y = 3 # number of subplot rows\n for source in sources:\n lc = source.lcs[0]\n\n if lc.classname not in classnames:\n classnames.append(lc.classname)\n classname_dict[lc.classname] = [lc]\n else:\n classname_dict[lc.classname].append(lc)\n\n if len(classname_dict[lc.classname]) < 3:\n\n label = lc.classname + \"; ID: \" + lc.id\n # all_times histogram:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(label)\n ax.axis('off')\n\n ax1 = fig.add_subplot(321)\n ax2 = fig.add_subplot(322)\n ax2.axis('off')\n ax3 = fig.add_subplot(323)\n ax4 = fig.add_subplot(324)\n ax4.axis('off')\n ax5 = fig.add_subplot(325)\n ax6 = fig.add_subplot(326)\n ax6.axis('off')\n\n hist, bins, other = ax1.hist(lc.all_times, 50, normed=True)\n ax1.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Histogram (normed) of all $\\Delta$Ts')\n\n ax2.text(0.0, 0.9, (r'$\\bullet$med time to next obs: ' +\n str(np.round(lc.cads_med, 4))))\n ax2.text(0.0, 0.75, (r'$\\bullet$avg time to next obs: ' +\n str(np.round(lc.avgt, 4))))\n ax2.text(0.0, 0.6, (r'$\\bullet$std dev of time to next obs: ' +\n str(np.round(lc.cads_std, 4))))\n ax2.text(0.0, 0.45, (r'$\\bullet$med of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_med, 4))))\n ax2.text(0.0, 0.3, (r'$\\bullet$avg of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_avg, 4))))\n ax2.text(0.0, 0.15, (r'$\\bullet$std dev of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_std, 4))))\n\n hist, bins, other = ax3.hist(lc.cads, 50)\n ax3.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Hist of time to next obs')\n\n ax6.text(\n 0.0, 0.9, r'$\\bullet$Number of epochs: ' + str(lc.n_epochs))\n ax6.text(0.0, 0.75, (r'$\\bullet$Time b/w first & last obs (days): ' +\n str(np.round(lc.total_time, 2))))\n ax6.text(0.0, 0.6, (r'$\\bullet$Average error in mag: ' +\n str(np.round(lc.avg_err, 4))))\n ax6.text(0.0, 0.45, (r'$\\bullet$Median error in mag: ' +\n str(np.round(lc.med_err, 4))))\n ax6.text(0.0, 0.3, (r'$\\bullet$Std dev of error: ' +\n str(np.round(lc.std_err, 4))))\n ax6.text(0.0, 0.15, '')\n\n ax5.scatter(lc.epochs, lc.mags)\n\n ax4.text(0.0, 0.9, (r'$\\bullet$Avg double to single step ratio: ' +\n str(np.round(lc.avg_double_to_single_step, 3))))\n ax4.text(0.0, 0.75, (r'$\\bullet$Med double to single step: ' +\n str(np.round(lc.med_double_to_single_step, 3))))\n ax4.text(0.0, 0.6, (r'$\\bullet$Std dev of double to single step: ' +\n str(np.round(lc.std_double_to_single_step, 3))))\n ax4.text(\n 0.0, 0.45,\n (r'$\\bullet$1st peak to 2nd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_2, 3))))\n ax4.text(\n 0.0, 0.3,\n (r'$\\bullet$2ndt peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_2_to_3, 3))))\n ax4.text(\n 0.0, 0.15,\n (r'$\\bullet$1st peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_3, 3))))\n\n pdf.savefig(fig)\n\n pdf.close()\n\n pdf = PdfPages('feature_plots.pdf')\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n classnamenum = 0\n\n colors = ['red', 'yellow', 'green', 'blue', 'gray', 'orange', 'cyan',\n 'magenta']\n for classname, lcs in list(classname_dict.items()):\n classnamenum += 1\n print(classname, len(lcs), 'light curves.')\n attr1 = []\n attr2 = []\n attr3 = []\n attr4 = []\n attr5 = []\n attr6 = []\n attr7 = []\n attr8 = []\n for lc in lcs:\n attr1.append(lc.n_epochs)\n attr2.append(lc.avgt)\n attr3.append(lc.cads_std)\n attr4.append(lc.total_time)\n attr5.append(lc.all_times_hist_peak_val)\n attr6.append(lc.cad_probs[5000])\n attr7.append(lc.all_times_nhist_peak_1_to_3)\n attr8.append(lc.all_times_nhist_peak_val)\n\n ax2.scatter(attr1, attr2, color=colors[classnamenum], label=classname)\n ax1.scatter(attr3, attr4, color=colors[classnamenum], label=classname)\n ax2.set_xlabel('N Epochs')\n ax2.set_ylabel('Avg time to next obs')\n ax1.set_xlabel('Standard dev. of time to next obs')\n ax1.set_ylabel('Time b/w first and last obs')\n\n ax3.scatter(attr5, attr6, color=colors[classnamenum], label=classname)\n ax4.scatter(attr7, attr8, color=colors[classnamenum], label=classname)\n ax3.set_xlabel(r'All $\\Delta$T hist peak val')\n ax3.set_ylabel('Prob time to next obs <= 5000 min')\n ax4.set_xlabel(r'$\\Delta$Ts normed hist peak 1 to peak 3')\n ax4.set_ylabel(r'Peak val of all $\\Delta$Ts normed hist')\n\n #ax1.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})\n ax2.legend(bbox_to_anchor=(1.1, 1.1), prop={'size': 6})\n #ax3.legend(loc='upper right',prop={'size':6})\n #ax4.legend(loc='upper right',prop={'size':6})\n\n pdf.savefig(fig)\n\n pdf.close()\n return 0", "def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)", "def count_plot_target_class(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.groupby([self.target_column]).size()) # print the sum of every class\r\n\r\n sns.countplot(data=self.data_frame, x=self.data_frame[self.target_column])\r\n plt.title(self.dataframe_name + ': Display the distribution of ' + self.target_column + ' class')\r\n plt.xlabel('Target Name: ' + self.target_column)\r\n plt.ylabel('Count')\r\n self.save_plot_as_image()\r\n plt.show()", "def grid_plot_twitter(proverbs_list, data,dim = (4,4), ylog = False, rt = False): \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0],dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2)\n \n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize = 14)\n fig.text(0.02, 0.5, 'Frequency among all {}-grams on Twitter'.format(len(proverbs_list[0].split())), va='center', rotation='vertical', fontsize = 14)\n \n #loop to create each timeseries plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n ts = data[data.proverb ==proverbs_list[i]]\n ts.date = pd.to_datetime(ts.date, format = '%Y-%m-%d', errors='coerce')\n ts.index = ts.date\n ts = ts.sort_index()\n print(ts)\n ts2 = ts.copy()[['freq_noRT', 'freq']]\n print(ts2)\n ts2 = ts2.rolling(window=30).mean()\n print(ts2)\n\n \n if ylog == False:\n pass\n\n elif ylog == True:\n ax.set_yscale('log') \n\n if rt == False:\n ax.plot(ts.index, ts['freq_noRT'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq_noRT'], alpha = 0.9, color='darkorange') \n \n elif rt ==True:\n ax.plot(ts.index, ts['freq'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq'], alpha = 0.9, color='darkorange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def prop_types(houses:pd.DataFrame) -> None:\n sns.set_style('whitegrid')\n indexNames= houses[houses['PRICE'] >= 3000000].index\n houses= houses.drop(indexNames)\n \n ax= sns.catplot(x= 'PROPERTY_TYPE', y= 'PRICE', kind= 'box', data= houses)\n ax.set_xticklabels(rotation=30)\n plt.tight_layout()\n plt.show()\n \n ax= sns.countplot(x= 'PROPERTY_TYPE', data= houses)\n ax.set_xticklabels(ax.get_xticklabels(), rotation= 30, ha=\"right\", fontsize=9)\n plt.show()" ]
[ "0.6448972", "0.6392558", "0.6383187", "0.6325968", "0.58468354", "0.5693776", "0.5645798", "0.5629048", "0.5622483", "0.5611402", "0.55704165", "0.5545969", "0.54707754", "0.5421868", "0.541172", "0.5408939", "0.53972", "0.5382327", "0.5376701", "0.5373505", "0.5348653", "0.5318779", "0.53117114", "0.53011274", "0.5285753", "0.527667", "0.5270411", "0.5264465", "0.5251325", "0.52398163" ]
0.66838074
0
Takes a pandas dataframe which contains the proportions of language classes over generations and generates a barplot (excluding the burnin period)
def plot_barplot_language_types(lang_class_prop_over_gen_df, title, file_path, file_name, n_runs, n_batches, n_gens, gen_start, lang_class_baselines_all, lang_class_baselines_fully_expressive, possible_form_lengths): sns.set_style("darkgrid") sns.set_context("talk") if len(possible_form_lengths) == 1: n_language_classes = 4 else: n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?) proportion_column_as_results = dataframe_to_language_stats(lang_class_prop_over_gen_df, n_runs, n_batches, n_gens, possible_form_lengths) proportion_column_from_start_gen = proportion_column_as_results[:, gen_start:] proportion_column_from_start_gen = proportion_column_from_start_gen.flatten() runs_column_from_start_gen = [] for i in range(n_runs*n_batches): for j in range(gen_start, n_gens): for k in range(n_language_classes): runs_column_from_start_gen.append(i) runs_column_from_start_gen = np.array(runs_column_from_start_gen) generation_column_from_start_gen = [] for i in range(n_runs*n_batches): for j in range(gen_start, n_gens): for k in range(n_language_classes): generation_column_from_start_gen.append(j) generation_column_from_start_gen = np.array(generation_column_from_start_gen) class_column_from_start_gen = [] for i in range(n_runs*n_batches): for j in range(gen_start, n_gens): if n_language_classes == 4: class_column_from_start_gen.append('degenerate') class_column_from_start_gen.append('holistic') class_column_from_start_gen.append('compositional') class_column_from_start_gen.append('other') elif n_language_classes == 7: class_column_from_start_gen.append('D') class_column_from_start_gen.append('H') class_column_from_start_gen.append('H+Div.') class_column_from_start_gen.append('C') class_column_from_start_gen.append('C+Red.-part') class_column_from_start_gen.append('C+Red.-whole') class_column_from_start_gen.append('O') new_data_dict = {'run': runs_column_from_start_gen, 'generation': generation_column_from_start_gen, 'proportion': proportion_column_from_start_gen, 'class': class_column_from_start_gen} lang_class_prop_over_gen_df_from_starting_gen = pd.DataFrame(new_data_dict) if len(possible_form_lengths) == 1: palette = sns.color_palette(["black", "red", "green", "grey"]) else: palette = sns.color_palette(["black", sns.color_palette("colorblind")[3], sns.color_palette("colorblind")[1], sns.color_palette("colorblind")[2], sns.color_palette("colorblind")[9], sns.color_palette("colorblind")[0], sns.color_palette("colorblind")[7]]) sns.barplot(x="class", y="proportion", data=lang_class_prop_over_gen_df_from_starting_gen, palette=palette) # plt.axhline(y=lang_class_baselines_all[0], xmin=0.0, xmax=0.25, color='k', linestyle='--', linewidth=2) # plt.axhline(y=lang_class_baselines_all[1], xmin=0.25, xmax=0.5, color='k', linestyle='--', linewidth=2) # plt.axhline(y=lang_class_baselines_all[2], xmin=0.5, xmax=0.75, color='k', linestyle='--', linewidth=2) # plt.axhline(y=lang_class_baselines_all[3], xmin=0.75, xmax=1.0, color='k', linestyle='--', linewidth=2) # # if title == 'Mutual Understanding Only' or title == 'Minimal Effort & Mutual Understanding': # plt.axhline(y=lang_class_baselines_fully_expressive[0], xmin=0.25, xmax=0.5, color='0.6', linestyle='--', linewidth=2) # plt.axhline(y=lang_class_baselines_fully_expressive[1], xmin=0.5, xmax=0.75, color='0.6', linestyle='--', linewidth=2) plt.tick_params(axis='both', which='major', labelsize=18) plt.tick_params(axis='both', which='minor', labelsize=18) plt.ylim(-0.05, 1.05) plt.title(title, fontsize=22) # plt.xlabel('Language class') plt.xlabel('', fontsize=20) plt.ylabel('Mean proportion', fontsize=20) plt.tight_layout() if holistic_without_partial_meaning is True: plt.savefig(file_path + "Barplot_lang_types_" + file_name + "_burn_in_" + str(gen_start) + ".png") else: plt.savefig(file_path + "Barplot_lang_types_" + file_name + "_burn_in_" + str(gen_start) + "_NEW.png") plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")", "def visualize_data(df):\n # Remove 'not available'\n genres = df.genre.unique().tolist()\n remove_index = genres.index('Not Available')\n genres.pop(remove_index)\n print('Genres: ', genres)\n\n # Extract number of songs in each genre\n genre_counts = df.genre.value_counts().tolist()\n genre_counts.pop(remove_index)\n print('Counts: ', genre_counts)\n\n # Plot bar graph\n plt.bar(genres, genre_counts)\n plt.xlabel('Genres')\n plt.ylabel('Count')\n plt.show()", "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def stopword_bar(df, stop_words, ax):\n df_test = df.copy()\n df_test['prop'] = df.title.apply(stopword_proportion)\n sns.barplot(data=df_test, x='target', y='prop', ax=ax, ci=False)\n ax.set_title(\"Ratio of Stopwords Between Classes\", size=20)\n ax.set_ylim([1,2])\n ax.set_ylabel(\"Ratio\", size=20)\n ax.set_xlabel(\"Article Class\", size=20)\n plt.xticks(ticks=range(2),labels=['Normal','Clickbait'], size=20)\n return ax", "def show_class_imbalance(df, title='Class Imbalance', PATH=None):\n ax = sns.barplot(x=[\"Normal\", \"Clickbait\"], y=df.groupby(['target']).target.count())\n ax.set_title(title, size=20)\n plt.xticks([0,1],[\"Normal\", \"Clickbait\"], size = 20)\n ax.set_ylabel(\"Document Count\", size=17)\n ax.set_xlabel(\"Article Class\", size=20)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n return ax", "def plot_class_balances(df, col):\n\n ser_counts = df[col].value_counts()\n ser_counts.plot.bar()\n plt.title(col + ' Counts \\n(classes={})'.format(ser_counts.shape[0]))\n \n plt.show()", "def message_genre_bar_chart(df):\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n return {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }", "def plot_balance_class(classes):\n unique, counts = np.unique(classes, return_counts=True)\n plt.bar(unique, counts)\n plt.title('Class Frequency')\n plt.xlabel('Class')\n plt.ylabel('Frequency')\n plt.show()", "def bar_plot(df, data_pt):\n \n x=df.loc[data_pt]\n y= df.columns.tolist()\n sorte=x.tolist()\n a=sorted(zip(sorte, y))[-10:]\n y=[y for _, y in a]\n ## soru burda yapıp altı ona göre duzeliyecegim birde\n \n x = df[y].loc[data_pt]\n \n # Here we modify the tickangle of the xaxis, resulting in rotated labels.\n #title={'text': \"<b>Comparing features with Golden for Cycle {}\".format(cycle),\n # 'y':0.9,'x':0.5,'xanchor': 'center','yanchor': 'top'}\n\n \n trace = {'type': 'bar',\n 'orientation':'h',\n 'x' : x,\n 'y' : y}\n data = Data([trace])\n layout = {'title' : \"<b>Reconstruction error in each dimension for cycle{}\".format(data_pt),\n 'titlefont':{'size' : 20},\n 'xaxis' : {'title': '<b>Reconstruction Error',\n 'titlefont':{'size' : 20},\n 'tickangle': -45, 'tickfont': {'size':15} ,},\n \n 'yaxis' : {'title': '<b>Features',\n 'titlefont':{'size' : 20},\n 'tickfont': {'size':15},},\n 'margin' : {'l':100, 'r' : 1, 'b': 200, 't': 100, 'pad' : 1},\n 'height' : 600, 'width' : 800,\n }\n \n fig = Figure(data = data, layout = layout)\n \n return pyo.iplot(fig)", "def category_bar_chart(df):\n label_names = df.drop(['message', 'original', 'genre', 'id'], axis=1).columns\n label_counts = []\n for column in label_names:\n label_counts.append(df[column].sum())\n return {\n 'data': [\n Bar(\n x=label_names,\n y=label_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Labelled Categories',\n 'yaxis': {\n 'title': \"Count\",\n 'type': 'log'\n },\n 'xaxis': {\n 'title': \"Category\"\n }\n }\n }", "def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()", "def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)", "def plot_pie_charts_of_word_class_distribution(df):\n genre_dict = {\n 'g':'Rock',\n 'b':'Hip-Hop',\n 'r':'Pop'\n }\n for _, genre in genre_dict.items():\n filtered_df = df[df['genre'] == genre]\n \n # plotting circle diagram for the specific genre\n avg_percentage_nouns = filtered_df['nouns'].mean()\n avg_percentage_verbs = filtered_df['verbs'].mean()\n avg_percentage_adverbs = filtered_df['adverbs'].mean()\n\n total = avg_percentage_nouns + avg_percentage_nouns + avg_percentage_nouns\n nouns = avg_percentage_nouns / total * 100\n verbs = avg_percentage_verbs / total * 100\n adverbs = avg_percentage_adverbs / total * 100\n\n # Pie chart\n labels = ['Nouns', 'Verbs', 'Adverbs']\n sizes = [nouns, verbs, adverbs]\n\n _, ax1 = plt.subplots()\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n # Equal aspect ratio ensures that pie is drawn as a circle\n ax1.axis('equal') \n plt.tight_layout()\n plt.title(f'Circle diagram of the genre \"{genre}\"s average word classes distribution')\n plt.show()\n # plt.savefig(f'src/visualization/feature_plots/{genre}_word_class_distribution')", "def plot_word_class_pr_genre(df):\n df['nouns'] = df['nouns'] * 100\n df['verbs'] = df['verbs'] * 100\n df['adverbs'] = df['adverbs'] * 100\n # plotting nouns\n plotting_helper_method('nouns', 'genre', df)\n plt.title('Amount of nouns pr song pr. genre')\n plt.xlabel(\"Amount of nouns in each song\")\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/nouns_pr_genre_plot')\n\n # plotting verbs\n plotting_helper_method('verbs', 'genre', df)\n plt.title('Amount of verbs pr song pr. genre')\n plt.xlabel('Amount of verbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/verbs_pr_genre_plot')\n\n # plotting adverbs\n plotting_helper_method('adverbs', 'genre', df)\n plt.title('Amount of adverbs pr song pr. genre')\n plt.xlabel('Amount of adverbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/adverbs_pr_genre_plot')", "def proportion_with_cardinals(df, PATH):\n \n df_test = df.copy()\n df_test['cardinal'] = df.title.apply(contains_cardinal)\n\n click = df_test[df_test.target == 1]\n non = df_test[df_test.target == 0]\n click = click.groupby(['cardinal']).target.count()\n non = non.groupby(['cardinal']).target.count()\n \n non = non[1]/non[0] * 100\n click = click[1]/click[0] * 100\n # plot the results\n fig, ax = plt.subplots(figsize=(12,6))\n sns.barplot(x=['Normal', \"Clickbait\"], y=[non, click], ax=ax)\n plt.title(\"Percent of Titles Containing Cardinal Numbers\", size = 24)\n plt.xlabel(\"Article Class\", size=24)\n plt.ylabel(\"Percent %\", size = 24)\n plt.ylim(0, 100)\n plt.xticks([0,1], label=[\"Normal\", \"Clickbait\"], size=24)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n \n return ax", "def bar_chart(self, period='M', annot=True):\n assert period in [\"W\", \"M\", \"Y\"], \"Wrong Period. Chose between 'W' - 'M' - 'Y'\"\n assert isinstance(annot, bool), 'Error! Annot parameter must be boolean'\n months = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n periods = {\"M\": (\"Monthly\",\"Months\"), \"Y\": (\"Yearly\", \"Years\"), \"W\": (\"Weekly\", \"Weeks\")}\n data = self.data.copy()\n data.set_index(pd.to_datetime(data.index), inplace=True)\n sample = pd.concat([data.head(1), data.resample(period).last()])\n sample['Var%'] = (sample['Profit/Loss'] - sample['Profit/Loss'].shift(1)) / sample['Value'].shift(1) * 100 \n sample.dropna(inplace=True)\n colors = sample['Var%'].apply(lambda x: \"green\" if x > 0 else \"red\")\n fig = plt.figure(figsize=(4,2), dpi=200)\n fig.patch.set_facecolor('#ececec')\n ax = fig.add_subplot(111)\n ax.set_xlabel(periods[period][1])\n ax.set_ylabel(\"Var (%)\")\n ax.set_title(f\"{periods[period][0]} Profit / Loss %\")\n ax.bar(np.arange(len(sample)), sample['Var%'], 0.35, color=colors, alpha=1, label=f\"{periods[period][0]} Statistics\")\n ax.set_xticks(np.arange(len(sample)))\n if period == \"Y\":\n labels = [x for x in sample.index.year]\n ax.set_ylim(sample['Var%'].min()-2,sample['Var%'].max()+2) \n elif period == \"W\":\n sample_M = pd.concat([data.head(1), data.resample(\"M\").last()])\n ax.set_xticks(np.arange(-2, len(sample_M)*4-2, 4))\n labels = [m + \"-\" + y for m, y in zip([months[x-1] for x in sample_M.index.month[1:]], [str(x) for x in sample_M.index.year[1:]])]\n m = months[int(months.index(labels[-1][:-5])) + 1] if int(months.index(labels[-1][:-5])) + 1 != 12 else months[0]\n y = int(labels[-1][-4:]) if m != 0 else int(labels[-1][-4:]+1)\n labels.append(m + '-' + str(y))\n else:\n labels = [m + \"-\" + y for m, y in zip([months[x-1] for x in sample.index.month], [str(x) for x in sample.index.year])]\n ax.set_xticklabels(labels)\n cords = {'M': (0.2, 0.5, 4, 1), 'W': (0.5, 0.5, 'x-small', 1), 'Y': (0.045, 0.3, 'x-large', 0.85)}\n if annot:\n for d, v in zip(range(len(sample)), sample['Var%']):\n if v > 0:\n ax.annotate(str(round(v, 2)) + \" %\", xy=(d - cords[period][0], v+cords[period][1]), fontsize=cords[period][2])\n else:\n ax.annotate(str(round(v, 2)) + \" %\", xy=(d - cords[period][0], v-cords[period][3]), fontsize=cords[period][2])\n if period != \"Y\":\n fig.autofmt_xdate()\n ax.grid(True, alpha=0.5)\n ax.legend()\n return fig, ax", "def proportions_visualiser(\n df: pd.core.frame.DataFrame,\n colum_name: str = \"Sensor Glucose (mg/dL)\",\n limits: Dict[str, int] = {\"low\": 70, \"high\": 180},\n windows: Dict[str, int] = {\"weekly\": 7, \"monthly\": 30},\n kind: str = \"TIR\",\n) -> NoReturn:\n\n valid_kinds = [\"TIR\", \"TBR\", \"TAR\"]\n\n if \"low\" not in limits.keys() or \"high\" not in limits.keys():\n raise Exception(f\"limits.keys() should be ['low', 'high'] not {limits.keys()}\")\n\n titles = {\n \"TIR\": f\"Time In Range [{limits['low']},{limits['high']})\",\n \"TAR\": f\"Time Above Range >= {limits['high']}\",\n \"TBR\": f\"Time Below Range < {limits['low']}\",\n }\n\n kind = kind.upper()\n if kind not in valid_kinds:\n raise Exception(\n f\"Invalid kind `{kind}`, select one from {valid_kinds} or refer to help({self.__name__})\"\n )\n\n TIR = (\n lambda y: 100\n * y[(y >= limits[\"low\"]) & (y < limits[\"high\"])].count()\n / y.count()\n )\n TBR = lambda y: 100 * y[(y < limits[\"low\"])].count() / y.count()\n TAR = lambda y: 100 * y[(y >= limits[\"high\"])].count() / y.count()\n\n _proportions = df[colum_name].groupby(df.index.date).apply(eval(f\"{kind}\"))\n\n _proportions.plot(**{\"label\": \"daily\"})\n\n for key, value in windows.items():\n _ax = _proportions.rolling(value).mean().plot(**{\"label\": key})\n\n _mean_proportion = _proportions.mean()\n plt.ylabel(\"Percentage\")\n plt.axhline(\n _mean_proportion,\n **{\"label\": f\"mean = {round(_mean_proportion,1)}\", \"c\": \"blue\"},\n )\n plt.legend()\n plt.title(titles[kind])", "def plot_bar_chart_quantum_vs_classical(\n df_bugs: pd.DataFrame,\n column_to_inspect: str,\n mapping_dict: Dict[str, str],\n categories_to_exclude: List[str] = [],\n categories_keep_only: List[str] = None,\n out_file_name: str = None,\n out_folder_path: str = None,\n horizontal: bool = False,\n map_value_since_beginning: bool = False,\n figsize: Tuple[int, int] = (10, 5),\n legend_placement: str = 'upper center'\n ):\n\n fig, ax = plt.subplots(figsize=figsize)\n\n df = expand_columns(df_bugs, column_to_inspect)\n df = df[~(df[column_to_inspect].isin(categories_to_exclude))]\n\n if categories_keep_only is not None:\n df = df[df[column_to_inspect].isin(categories_keep_only)]\n\n if map_value_since_beginning:\n df[column_to_inspect] = df[column_to_inspect].map(mapping_dict)\n\n categories_q_bugs = list(df[\n df['type'] == 'Quantum'].groupby(\n column_to_inspect).count().sort_values(\n by='type', ascending=False).index)\n\n for component in df[column_to_inspect].unique():\n if component not in categories_q_bugs:\n categories_q_bugs.append(component)\n\n args = {\n \"hue\": \"type\",\n \"data\": df,\n \"palette\": PALETTE,\n \"ax\": ax,\n \"order\": categories_q_bugs\n }\n\n if horizontal:\n sns.countplot(y=column_to_inspect, **args)\n ax.grid(axis='x')\n else:\n sns.countplot(x=column_to_inspect, **args)\n ax.grid(axis='y')\n\n if not map_value_since_beginning:\n # map the value at the latest stage, thus in the labels\n obj_labels = ax.get_xticklabels()\n for i, l in enumerate(obj_labels):\n obj_labels[i] = mapping_dict[l.get_text()]\n ax.set_xticklabels(obj_labels, rotation=60, ha='right')\n\n ax.set_xlabel(capitalize(column_to_inspect), fontsize=15)\n ax.set_ylabel(\"Count\", fontsize=15)\n plt.legend(title=\"Type of Bug\", loc=legend_placement)\n plt.tight_layout()\n\n if out_file_name is not None and out_folder_path is not None:\n fig.savefig(os.path.join(out_folder_path, out_file_name), format=\"pdf\")", "def plot_norm_bar(df, title, figsize=(12,7)):\n fig, ax = plt.subplots(ncols=1, figsize=figsize)\n fig.suptitle(title)\n cat_value_counts = df.fillna('missing').value_counts(normalize=True)\n sns.barplot(y = cat_value_counts.index, x= cat_value_counts.values*100)\n ax.set(xlabel= 'percentage', ylabel=str(df.name))\n \n plt.plot()\n\n return", "def plot_uv_bar(df, colname, colorid=0):\n if (colname in list(df.columns)):\n \n # Set figure size \n fig, ax = plt.subplots(figsize=(8,6))\n \n # set colorid for bar plot\n base_color = sns.color_palette()[colorid]\n\n # variable counts to calculate percentage\n cdict_count = df[colname].value_counts().to_dict() \n total_count = df.shape[0]\n \n \n if (len(list(cdict_count.keys())) > 5):\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.125\n # max. no. of categories Vs % rotation \n rottext_pct = 90 \n # font size for % display\n fontsiz_pct = 12\n else:\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.075\n # max. no. of categories Vs % rotation \n rottext_pct = 0 \n # font size for % display\n fontsiz_pct = 16\n \n \n # plotting...\n sns.countplot(data = df, x = colname\n , order = list(cdict_count.keys())\n , color = base_color\n , saturation = 0.7)\n\n # title and labels\n plt.title('Order of '+ colname, fontsize=20)\n plt.xlabel(colname + ' Type', fontsize=16)\n plt.ylabel('Count', fontsize=16)\n \n # x-,y- ticks\n locs, labels = plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n\n # display % count information on each tower of bar plot\n for loc, label in zip(locs, labels):\n count = cdict_count[label.get_text()]\n pct_string = '{:0.1f}%'.format(count*100/total_count)\n plt.text(loc, count-maxcount_pct, pct_string, ha='center', color='w', fontsize=fontsiz_pct, rotation=rottext_pct)\n\n return plt.show()\n\n else:\n \n print(' >>>Error:',colname,' is not in DataFrame')", "def leitner_bar(levels):\n\n df = pd.DataFrame(levels, columns=['comfort_level'])\n prop_df = leitner_proportions(df)\n locs = get_label_locs(prop_df)\n\n fig = px.bar(prop_df.T, orientation='h', width=400, height=200)\n fig.update_xaxes(\n showticklabels=False,\n showgrid=False,\n title_text='')\n fig.update_yaxes(showticklabels=False,\n showgrid=False,\n showline=False,\n zeroline=False,\n title_text='')\n fig.update_layout(\n plot_bgcolor = '#ffffff',\n showlegend = False,\n annotations=[\n dict(\n x=xval,\n y=0.5,\n text=txt,\n showarrow=False,\n xref='paper',\n yref='paper',\n font=dict(\n family='Lato',\n size=30,\n color=\"#000000\")\n ) for xval, txt in zip(locs, prop_df.index)\n ]\n )\n fig.update_traces(marker=dict(color=\"#FF909A\"),\n selector=dict(name='1'))\n fig.update_traces(marker=dict(color=\"#EFC9ED\"),\n selector=dict(name='2'))\n fig.update_traces(marker=dict(color=\"#C8F5FF\"),\n selector=dict(name='3'))\n fig.update_traces(marker=dict(color=\"#D5E3FF\"),\n selector=dict(name='4'))\n fig.update_traces(marker=dict(color=\"#FFF4BD\"),\n selector=dict(name='5'))\n return fig.to_json()", "def plot_bar(source_files, column_ids, column_names, normalize, sort, plot_difference, freq_bound, title=None,\n dtype=int):\n\n def _filter_data(raw_data, numerical):\n \"\"\" Filters plot-able data. \"\"\"\n # Retain numeric information\n legal_count_inventory = digits + '.'\n # Retain POS tags, also\n legal_entry_inventory = ascii_uppercase + '$'\n filtered_data = list()\n for data_point in raw_data:\n skip = False\n for symbol in list(str(data_point)):\n if symbol not in legal_count_inventory and symbol not in legal_entry_inventory:\n skip = True\n if not skip:\n if numerical:\n filtered_data.append(dtype(data_point))\n else:\n filtered_data.append(data_point)\n # Optionally normalize count values, resulting in a proportion plot\n if numerical and normalize:\n filtered_data = filtered_data / np.sum(filtered_data)\n return np.array(filtered_data)\n\n # Set plot parameters\n sns.set_style('whitegrid')\n sns.set_context('paper')\n\n # Compile data to be plotted within a new dataframe\n # Not necessary, but convenient when plotting with seaborn\n source_dict = dict()\n # Read in data and sort alphanumeric features (e.g. POS tags) alphabetically\n df_features = pd.read_table(source_files[0], header=None, names=['Tag', 'Count'], skip_blank_lines=True)\n df_features = df_features.sort_values('Tag', ascending=True)\n df_reference = pd.read_table(source_files[1], header=None, names=['Tag', 'Count'], skip_blank_lines=True)\n df_reference = df_reference.sort_values('Tag', ascending=True)\n # Isolate columns to be plotted\n entries = _filter_data(df_features.iloc[:, column_ids[0]].values, False)\n counts = _filter_data(df_features.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus A\n reference_counts = _filter_data(df_reference.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus B\n # Construct dataframe to be visualized\n source_dict[column_names[0]] = entries\n source_dict['reference_counts'] = reference_counts\n # Generate frequency mask to exclude low-frequency features from the plot\n # Optional; results in a clearer, better readable visualization\n frequency_mask = np.array(\n [int(counts[i] >= freq_bound or reference_counts[i] >= freq_bound) for i in range(counts.shape[0])])\n source_dict['frequency_mask'] = frequency_mask\n # Calculate per-feature count differences (i.e. target counts vs. reference counts), if specified\n if plot_difference:\n diffs = counts - reference_counts\n source_dict[column_names[1]] = diffs\n else:\n source_dict[column_names[1]] = counts\n features = pd.DataFrame.from_dict(source_dict)\n # Sort by count value and apply frequency mask\n if sort:\n features = features.sort_values(column_names[0], ascending=True)\n if freq_bound > 0:\n features = features.drop(features[features.frequency_mask == 0].index)\n\n # Make plot\n fig, ax = plt.subplots()\n fig.set_size_inches(8, 6)\n if plot_difference:\n colors = ['coral' if feature >= 0 else 'skyblue' for feature in features[column_names[1]]]\n sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette=colors)\n else:\n sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette='Set2')\n sns.despine()\n if title is not None:\n plt.title(title)\n plt.show()", "def __word_frequency_barplot(self,df, column_name, nr_top_words=20):\n tokenized_only_dict = Counter(np.concatenate(df[column_name].values))\n tokenized_only_df = pd.DataFrame.from_dict(tokenized_only_dict, orient='index')\n tokenized_only_df.rename(columns={0: 'count'}, inplace = True)\n tokenized_only_df.sort_values('count', ascending=False, inplace=True)\n fig, axs = plt.subplots(1,2,figsize=(20,8))\n \n a = tokenized_only_df['count'].values[:nr_top_words]\n amin, amax = min(a) , max(a)\n norm = []\n\n for i, val in enumerate(a):\n norm.append( (val - amin) / (amax- amin))\n\n sns.barplot( norm, list(range(nr_top_words)), palette='hls', orient= 'h', ax=axs[0])\n axs[0].set_yticks(list(range(nr_top_words)))\n axs[0].set_yticklabels(tokenized_only_df.index[:nr_top_words], fontsize=18)\n axs[0].set_title(\"Word Frequencies \" , fontsize=20)\n axs[0].set_xlabel(\"(a) Frequency of a Word\", fontsize = 18)\n\n document_lengths = []\n if column_name == self.__origintext_columnname or column_name == \"clean_text\" :\n document_lengths = np.array(list(map(len, df[column_name].str.split())))\n elif column_name == \"removed_stopwords\" or column_name == \"stem_words\":\n document_lengths = np.array(list(map(len, df[column_name])))\n\n print(\"The average number of Words in a document is: {}.\".format(np.mean(document_lengths)))\n print(\"The max number of Words in a document is: {}.\".format(np.max(document_lengths)))\n print(\"The min number of Words in a document is: {}.\".format(np.min(document_lengths)))\n axs[1].set_title('Distribution of number of words on ' , fontsize = 20)\n axs[1].set_xlabel(\"(b) Sentence Length\", fontsize = 18)\n sns.distplot(document_lengths, bins = 50 , ax =axs[1])\n plt.show()", "def eda_plot():\n\n df1 = pd.read_csv('eda_malware.csv')\n df2 = pd.read_csv('eda_random.csv')\n df3 = pd.read_csv('eda_popular.csv')\n\n df = pd.concat([df1, df2, df3], ignore_index=True)\n df['label'].replace([0,1],['Benign','Malware'],inplace=True)\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB', '#97C1A9']\n # b vs. m: node types counts\n f1 = pd.crosstab(df['label'], df['node_types_counts'])\n\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 5768], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types.png')\n\n # for a better look, limit type 5 malware to 2k counts only\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 2000], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types1.png')\n\n # node types\n # for malware: extract node types info for node types counts > 5, and sum up each types counts\n node_types = df[(df['label'] == 'Malware') & (df['node_types_counts'] >= 5)]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [215060, 2823059, 3135725, 5641356, 10679709, 16547701]\n labels = ['Others', 'static,Node', 'public,static,Node', 'Node', 'external,Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Malware: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_m.png')\n\n # for benign: extract node types info for node types counts, and sum up each types counts\n node_types = df[(df['label'] == 'Benign')]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [77967, 2892033, 2964924, 5287258, 6478196, 20364339]\n labels = ['Others', 'staticNode', 'public,staticNode', 'external,Node', 'Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Benign: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_b.png')\n\n # benign vs malware: counts\n sizes = [8435, 802]\n labels = ['Benign', 'Malware']\n\n colors = ['#EAB6AB','#D9E6F3']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Number of Benign vs. Malware', y=1.05)\n\n plt.show()\n fig1.savefig('bm_counts.png')\n\n # number of edges vs number of nodes\n groups = df.groupby('label')\n colors = ['#FFAEA5', '#A2E1DB']\n\n # Plot\n fig, ax = plt.subplots()\n ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n for name, group in groups:\n if name == 'Benign':\n c = colors[0]\n else:\n c = colors[1]\n ax.plot(group.number_edges, group.number_nodes, marker='o', linestyle='', ms=4, label=name, c=c)\n ax.legend()\n ax.set_xlabel('Number of Edges')\n ax.set_ylabel('Number of Nodes')\n ax.set_title('Benign & Malware: Number of Edges vs. Number of Nodes', y=1.05)\n\n plt.show()\n fig.savefig('bm_edges_nodes.png')", "def createChart(cladeGroup, data, taxonomyDict, outputFile):\n dfData = []\n for clade in cladeGroup: \n temp, other, totalTemp = valueCountsSpecies(data, cladeGroup[clade], taxonomyDict)\n relativeTemp = {}\n for val in temp:\n relativeTemp[val] = (temp[val] / sum(list(temp.values())))*100\n dfData.append(relativeTemp)\n\n tempDF = pd.DataFrame(dfData, index=list(cladeGroup.keys()))\n tempDF = tempDF.fillna(0)\n\n # Plotting\n sns.set(rc={'figure.figsize':(20,15)}, font_scale=2)\n ax = tempDF.plot(kind=\"bar\", stacked=True, colormap=ListedColormap(sns.color_palette(\"twilight\", 12)), rot=0)\n for rect in ax.patches:\n # Find where everything is located\n height = rect.get_height()\n width = rect.get_width()\n x = rect.get_x()\n y = rect.get_y()\n \n # The height of the bar is the data value and can be used as the label\n label_text = f'{height:.2f}%' # f'{width:.2f}' to format decimal values\n \n # ax.text(x, y, text)\n label_x = x + width / 2\n label_y = y + height / 2\n \n # only plot labels greater than given width\n if height > 0.00:\n ax.text(label_x, label_y, label_text, ha='center', va='center', fontsize=20, color=\"w\")\n\n plt.legend(loc=\"center right\", bbox_to_anchor=(1.25, 0.5), ncol=1)\n plt.savefig(outputFile, bbox_inches=\"tight\")\n plt.show()\n return", "def visualize(X: pd.DataFrame, y: pd.DataFrame) -> None:\r\n y[\"Action\"].value_counts().plot.pie(explode=(0.02, 0.04, 0.05, 0.09), title=\"Proportion of classes in dataset\")\r\n plt.savefig(\"Figures/proportions\")\r\n\r\n for i, column in enumerate(X.columns):\r\n fig, ax = plt.subplots(1, 2)\r\n\r\n ax[0].hist(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[0].set_xlabel(column)\r\n ax[0].set_ylabel(\"Frequency\")\r\n\r\n ax[1].boxplot(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[1].set_xlabel(\"Action\")\r\n ax[1].set_ylabel(column)\r\n\r\n X[column].hist(by=y[\"Action\"])\r\n\r\n ax[0].legend([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n ax[1].set_xticklabels([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n fig.suptitle(\"Distribution of classes among attributes\")\r\n plt.savefig(\"Figures/boxplots\")", "def barplot_topn_countries(df: pd.core.frame.DataFrame, feature: str,\n topn: int, kind: str, year: str, figsize=(12,6)) -> None:\n if kind != 'Import' and kind != 'Export':\n raise ValueError('Trade flow is not set to Import or Export')\n\n plt.figure(figsize=figsize)\n g = sns.barplot(x='Reporter', y=(feature,'sum'), data=df[0:topn],\n palette='muted')\n\n if topn > 5 and topn <= 10:\n rot = 0\n elif topn > 10:\n rot = 75\n else:\n rot = 0\n\n g.set_xticklabels(g.get_xticklabels(), rotation=rot)\n plt.ticklabel_format(style='plain', axis='y')\n if year == 'all':\n plt.title(f'Top-{topn} {kind}ers of vaccines around the globe', fontweight='bold')\n else:\n plt.title(f'Top-{topn} {kind}ers of vaccines around the globe in {year}', fontweight='bold')\n plt.xlabel(f'{kind}er Country')\n if feature == 'Trade Value (US$)':\n plt.ylabel(f'Total amount of {kind}s in US$')\n else:\n plt.ylabel(f'Total amount of {kind}s in Netweight (kg)')\n plt.grid(True, alpha = 0.3)\n plt.show()", "def drawStackedBarPlot(df, column, hue):\n plt.style.use('default')\n plt.style.use('dark_background')\n p_table = pd.pivot_table(df, index=column, \n columns=hue, aggfunc='size')\n p_table = p_table.div(p_table.sum(axis=1), axis=0)\n p_table.plot.bar(stacked=True, figsize=(14,7))\n plt.xlabel('Spekraltyp')\n plt.ylabel('Anteil')\n plt.show()", "def barGraph(listOfWord, listOfFrequency):\r\n\r\n\tindex = np.arange(len(listOfWord))\r\n\r\n\tplt.title(\"Frekuensi Kemunculan Kata\")\r\n\tplt.barh(index, listOfFrequency)\r\n\tplt.xlabel('Frekuensi')\r\n\tplt.yticks(index, listOfWord, fontsize=6)\r\n\r\n\tplt.show()", "def plot_df(data_frame):\n plt.figure(figsize = (10, 5))\n chart = sns.countplot(data_frame['label'], \n palette=\"Set1\"\n )\n plt.show()" ]
[ "0.6668762", "0.6598685", "0.6580668", "0.65461254", "0.6532494", "0.6513617", "0.6504175", "0.6385042", "0.63797927", "0.63521934", "0.6340164", "0.63299674", "0.619069", "0.61589485", "0.6137865", "0.6120342", "0.607238", "0.6047123", "0.60318536", "0.59974545", "0.5966363", "0.5954948", "0.5947267", "0.5937", "0.59294987", "0.59135085", "0.59118944", "0.5906591", "0.5905728", "0.5903932" ]
0.6864728
0
Initialize a new HTTP client event router object uri is a URI for this event router. A new URI derived from this is created for the HTTP client event relay. host is the IP address of host name to which the HTTP connection is made. port is the TCP port number to which the HTTP connection is made.
def __init__(self, uri=None, host='', port=8082, simplex=False): super(EventRouterHTTPC, self).__init__(uri) relayuri = self.getUri()+"/HTTPC" self._relay = EventRelayHTTPC(self, relayuri, host, port, simplex) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, router, uri=None, host='', port=8082, simplex=False):\n super(EventRelayHTTPC, self).__init__(uri)\n self._router = router\n self._queue = Queue()\n self._event = threading.Event()\n self._closing = False\n self._queueEvent = threading.Event()\n self._simplex = simplex\n # Have 'router' send all subscriptions events to this object\n router.routeEventFrom(None, None, self)\n router.doSubscribeRequest(self, -1, None, None)\n\n # Create HTTP \"connection\", and start thread to respond to new events from it.\n\n \n self._httpcon = httplib.HTTPConnection(host=host, port=port)\n \n self._thread = threading.Thread(name=uri, target=self.processEvent)\n self._thread.start()\n return", "def __init__(self, uri_or_host, port=None, path=None):\r\n\r\n if port is not None:\r\n warnings.warn(\"Please use the THttpClient('http://host:port/path') syntax\", DeprecationWarning, stacklevel=2)\r\n self.host = uri_or_host\r\n self.port = port\r\n assert path\r\n self.path = path\r\n self.scheme = 'http'\r\n else:\r\n parsed = urlparse.urlparse(uri_or_host)\r\n self.scheme = parsed.scheme\r\n assert self.scheme in ('http', 'https')\r\n if self.scheme == 'http':\r\n self.port = parsed.port or httplib.HTTP_PORT\r\n elif self.scheme == 'https':\r\n self.port = parsed.port or httplib.HTTPS_PORT\r\n self.host = parsed.hostname\r\n self.path = parsed.path\r\n if parsed.query:\r\n self.path += '?%s' % parsed.query\r\n self.__wbuf = StringIO()\r\n self.__http = None\r\n self.__timeout = None", "def __init__(self, host, port):\n self.host = host\n self.port = port", "def __init__(self, host, port):\n self.host = host\n self.port = port", "def __init__(self, host, port):\n self._host = host\n self._port = port", "def __init__(self, host, port):\n self._host = host\n self._port = port\n self._data = None", "def __init__(self, router):\n self._router = router", "def __init__(self, scheme, host, port, path, query=None):\n self._hash = None\n self._str = None\n self._scheme = self._makeEmptyNone(scheme)\n self._host = host\n self._port = port\n self._path = self._makeEmptyNone(path)\n self._query = self._makeEmptyNone(query)\n self._isRegularURI = True", "def __init__(self, host, port, **kwargs):\n\n SocketHandler.__init__(self, host, port)\n BaseHandler.__init__(self, **kwargs)", "def __init__(self, host=HOST, port=PORT):\r\n self._socket = None\r\n\r\n if host is not None:\r\n self.connect(host, port)", "def __init__(self, router):\n\n self.router = router", "def __init__(self, uri):\n\n self.uri = uri", "def __init__(self, host, port = 3480):\n self.host = host\n self.port = port\n Vera.__init__(self)", "def __init__(self, host, port):\n self._closed = False\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((host, port))\n self.sock.listen(5)", "def main(self):\n addr = (self.uri, self.port)\n try:\n self.client.connect(addr)\n except socket.gaierror:\n print(\"[ERROR] not a valid URI. Try again please...\")\n else:\n print(\"[SETUP] client connected to IPv4 address\", self.uri, \"on port\", self.port)\n self.handler()", "def __init__(self, rzrip, rzrport='8026'):\n self.ip = rzrip\n self.port = rzrport\n self.url = 'http://' + rzrip + ':' + rzrport + '/razor/api'", "def __init__(self, host='http://localhost:6373'):\n self._host = host", "def __init__(self, host: str, port: int):\n self.__host: str = host\n self.__port: int = port", "def __init__(self, host):\n self.host = host", "def __init__(self, host):\n self.host = host", "def __init__(self, host, server_port):\n\n # Set up the socket connection to the server\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n # TODO: Finish init process with necessary code\n self.host = host\n self.server_port = server_port\n self.run()", "def __init__(self, uri='http://localhost:18123'):\n self.log = logging.getLogger('{}.Service'.format(__name__))\n self.uri = uri", "def __init__(self):\n self._server = None\n self._address = \"\"\n self._port = 0", "def create_connection(loop, uri):\n\n proto_pos = uri.find('://')\n protocol_name = uri[0:proto_pos]\n\n if protocol_name not in PROTOCOL_MAP:\n raise ValueError(\"Unknown protocol %s\" % protocol_name)\n\n address_str = uri[proto_pos + 3:]\n\n protocol_cls, address_parser = PROTOCOL_MAP[protocol_name]\n\n address = address_parser(address_str)\n\n connection = protocol_cls(loop, address)\n\n return connection", "def __init__(self, path):\n self.path = path\n self.fqdn = getfqdn()\n print 'INFO: host name', self.fqdn\n assert not self.fqdn.startswith('127.0.0')\n self.port = None\n self.url = None\n self.process = None\n print 'INFO: constructed web server for', self.fqdn", "def __init__(self, host='localhost', port=9090):\n self.host = host\n self.port = port\n self._stream = None\n self._io_loop = ioloop.IOLoop.current()\n self._timeout_secs = None", "def __init__(self, port_num=0):\n address = ('0.0.0.0', port_num)\n HTTPServer.__init__(self, address, self.HANDLER_CLASS)\n\n # Create a dict to store configuration values set by the client\n self.config = dict()\n\n # Start the server in a separate thread\n server_thread = threading.Thread(target=self.serve_forever)\n server_thread.daemon = True\n server_thread.start()\n\n # Log the port we're using to help identify port conflict errors\n LOGGER.debug(f'Starting service on port {self.port}')", "def __init__(self, host, port):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((host, port))\n\n self.server = self.sock.makefile('w')\n\n self.disconnected = False", "def __init__(self, port, host='', ping_timer=25):\n # TODO get these values from config\n self.host = host\n self.port = port\n self.socket = None\n self.all_connections = []\n self.all_clients = {}\n self.ping_timer_time = ping_timer", "def __init__(self, server_addr, server_port):" ]
[ "0.74562943", "0.6506829", "0.63234913", "0.63234913", "0.63085514", "0.621488", "0.60746145", "0.6005634", "0.59841245", "0.5979701", "0.5968453", "0.5921957", "0.5919456", "0.58944875", "0.58941567", "0.5850365", "0.58042604", "0.5793125", "0.5780519", "0.5780519", "0.5779045", "0.5765875", "0.5758877", "0.57411504", "0.5736077", "0.5721256", "0.57035196", "0.5692642", "0.5692463", "0.5682872" ]
0.7386842
1
Function called to close down event router.
def close(self): self._relay.close() super(EventRouterHTTPC, self).close() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closeEvent(self, event):\n\n sys.exit()", "def on_closing_event(self):\n self.exit_event(None)", "def closeEvent(self, event):\n sys.exit(0)", "def close(self) -> None:\n self.relay(\"close\")()", "def closeEvent(self, event):\n self.exit()\n event.accept()", "def handle_close(self):\n print(self.addr, \"bye\")\n self.close()", "def quit_application(self, event):\n self.Close()\n server.closeSocket()", "def close(self):\n self._udp_handler.send('exit'.encode(encoding='utf-8'))", "def closeEvent(self, event):\r\n self.app.stop()\r\n event.accept()", "def _onExit(self, event):\n self.Close(True)", "def OnClose(self, event):\r\n if self.worker: #stop main GPIB thread\r\n self.worker.abort()\r\n time.sleep(0.3)\r\n self.Destroy()", "def closeEvent(self, event):\n self.device.disconnect()\n event.accept()", "def outCloseEvent(self):\r\n pass", "def close(self):\n Trace(\"%s close\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n self._httpcon.close()\n self._closing = True\n self._event.set()\n self._queueEvent.set()\n self._queue.put([\"closedown\",[]])\n self._thread.join()\n Trace(\"%s closed\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n return", "def closeEvent(self, event):\n self._parent.quit_application(event)", "def closeEvent(self, event):\n logger.info('see here, we can just quit application')\n QtCore.QCoreApplication.quit()", "def close(self):\n self.pi.set_watchdog(self.gpio, 0)\n if self.either_edge_cb:\n self.either_edge_cb.cancel()\n self.either_edge_cb = None", "def onClose(self, event): \n \n self.Destroy()\n return", "def OnExit(self, event):\r\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def closeEvent(self, event):\n event.accept() # let the window close\n self.returnHome()", "def close(self):\n self.closecallback()\n self.destroy()", "def close(self):\n self.exit()", "def on_exit(self, event):\n # Close server\n if hasattr(self, 'webapp'):\n requests.get(ROOT_URL + '/shutdown')\n self.webapp = None\n\n # Close app\n sys.exit()", "def on_close(self):\n print('[INFO] closing...')\n self.stopEvent.set()\n del self.tello\n self.root.quit()", "def closeEvent(self, event):\n self.is_active = False\n app._in_event_loop = False\n super()", "def close(self):\n self.call('close')" ]
[ "0.71471226", "0.71452016", "0.70188725", "0.7006168", "0.6977439", "0.6927479", "0.68837065", "0.6871763", "0.68616617", "0.68096364", "0.6793978", "0.6784437", "0.67712516", "0.6754609", "0.6701007", "0.6697934", "0.6681519", "0.6672537", "0.6667044", "0.66509724", "0.66509724", "0.66509724", "0.66509724", "0.6642087", "0.6641009", "0.6638897", "0.66380095", "0.66341525", "0.66270345", "0.66220933" ]
0.73405606
0
Initialize a new HTTP client event passing object An HTTP client is associated with an existing event router, and sends all messages received from that router to the HTTP connection, and forwards all messages received from the HTTP connection to the router. Interaction with the indicated EventRouter object takes place primarily through the 'receive' methods of this class and the supplied router. Because messages received from HTTP are sent onwards using the normal forwarding mechanisms, this class must perform loopdetection to stop events being bounced back to the HTTP connection.
def __init__(self, router, uri=None, host='', port=8082, simplex=False): super(EventRelayHTTPC, self).__init__(uri) self._router = router self._queue = Queue() self._event = threading.Event() self._closing = False self._queueEvent = threading.Event() self._simplex = simplex # Have 'router' send all subscriptions events to this object router.routeEventFrom(None, None, self) router.doSubscribeRequest(self, -1, None, None) # Create HTTP "connection", and start thread to respond to new events from it. self._httpcon = httplib.HTTPConnection(host=host, port=port) self._thread = threading.Thread(name=uri, target=self.processEvent) self._thread.start() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, uri=None, host='', port=8082, simplex=False):\n super(EventRouterHTTPC, self).__init__(uri)\n relayuri = self.getUri()+\"/HTTPC\"\n self._relay = EventRelayHTTPC(self, relayuri, host, port, simplex)\n return", "def __init__(self, router):\n self._router = router", "def __init__(self):\n\n self.loop = asyncio.get_event_loop()\n self.aiohttp = web.Application(\n loop=self.loop,\n middlewares=[unhandled_route],\n )\n self.client = ClientSession()\n self.ws = WebSocketHandler(self)\n self.cert = self._load_ssl_certificate()\n\n self.config()", "def __init__(self, router):\n\n self.router = router", "def __init__(self, remote_address):\n self._local_address = None\n self.remote_address = remote_address\n self._connection_event = gevent.event.Event()\n\n self._on_connect_handlers = list()\n self._on_disconnect_handlers = list()\n\n self._socket = None\n self._reader = None\n self._writer = None\n\n self._read_lock = gevent.lock.RLock()\n self._write_lock = gevent.lock.RLock()", "def __init__(self, ae, address, ssl_context=None):\n self.ae = ae\n self.ssl_context = ssl_context\n self.allow_reuse_address = True\n\n TCPServer.__init__(\n self, address, RequestHandler, bind_and_activate=True\n )\n\n self.timeout = 60", "def processEvent(self):\n # Note: break out of event dispatch loop when closedown event is received\n # and closing flag is set. This is to prevent DoS attack by faked closedown\n # event type, and to ensure that prior events received are all processed.\n delay_on_error_min = 0.125 # Back off retry interval on error..\n delay_on_error_max = 20.0 # ..\n delay_on_error = delay_on_error_min # ..\n while True:\n if delay_on_error < delay_on_error_max:\n delay_on_error *= 2\n try:\n # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex\n # then the post methods will be delayed if nothing is sent down to the client\n # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py\n if self._simplex == True:\n self._queueEvent.wait()\n self._queueEvent.clear()\n \n if not self._queue.empty():\n Trace(\"%s queue.get ...\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n ###msgbody = self._queue.get()\n ###Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n ###self._event.set()\n msgbody = self.getQueuedItem()\n [typ,env] = msgbody\n if typ == \"closedown\":\n if self._closing: break\n else:\n # process request as an HTTP POST request\n data = makeEnvelopeData(env)\n headers = { \"Content-type\": \"text/plain\",\n \"Accept\": \"text/plain\",\n \"Content-length\": str(len(data)) }\n self._httpcon.request(\"POST\", \"/request_path_ignored\", data, headers)\n response = self._httpcon.getresponse()\n delay_on_error = delay_on_error_min\n elif self._simplex == False:\n # Nothing in queue:\n # issue a GET for incoming events\n _log.info(\"%s HTTP get ...\"%(self.getUri()))\n headers = { \"Accept\": \"text/plain\" }\n self._httpcon.request(\"GET\", \"/request_path_ignored\", None, headers)\n response = self._httpcon.getresponse()\n if response.status == 200:\n delay_on_error = delay_on_error_min\n msgbody = response.read()\n Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n # Parse message and act accordingly\n msgdata = parseMessageData(msgbody)\n Trace(\"%s get msgdata: %s\"%(self.getUri(),str(msgdata)), \"EventLib.EventRelayHTTPC\")\n if msgdata == None:\n #TODO: Log \"Request body malformed\"\n pass\n elif msgdata[0] == \"forward\":\n # msgdata = [\"forward\", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']]\n event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3])\n env = constructEnvelope(msgdata[1][0], event)\n self.forward(event, env)\n elif msgdata[0] == \"idle\":\n # Idle response gives client a chance to send if anything is queued\n pass\n else:\n #TODO: handle closedown message?\n Warn( \"%s Request body unrecognized option: %s\"%(self.getUri(),msgdata[0]), \"EventRelayHTTPC\")\n pass\n elif response.status == 503:\n Trace( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n # Remote end closed down\n break\n else:\n # TODO: (log error response)\n Warn( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n \n except httplib.BadStatusLine, e:\n # This can happen at closedown\n Info( \"%s processEvent bad response: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.CannotSendRequest, e:\n # This can happen at closedown\n Info( \"%s Cannot send request: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.ResponseNotReady, e:\n # This can happen at startup and sometimes other times:\n # maybe multiple requests on a single HTTP connection object?\n Info( \"%s Response not ready: (%s)\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except socket.error, e:\n Warn( \"%s Socket error: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n return", "def __init__(\n self,\n client_id: str,\n loop: asyncio.BaseEventLoop = None,\n pipe: int = 0,\n handler: callable = None\n ):\n self._rpc = AioClient(client_id, loop=loop, pipe=pipe, handler=handler)\n self.loop = self._rpc.loop\n self._status = {'pid': os.getpid()}\n self.updating_loop = None", "def __init__(self, application):\n self.app = application\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.response_headers = \"\"\n self.times = 0", "def __init__(self, params={}):\n self.lt_ses = lt.session() # pylint: disable=no-member\n self.lt_ses.listen_on(6881, 6891)\n\n self.params = params\n self.queue = deque()\n self.stream_thread = None\n self.handle = None", "def __init__(self):\n # Create a TCP/IP socket\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def __init__(self, host, port=None, strict=None, \r\n timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\r\n source_address=None,\r\n username=None, password=None,\r\n certChain=None, privateKey=None,\r\n checker=None,\r\n settings=None,\r\n ignoreAbruptClose=False, \r\n anon=False):\r\n if source_address:\r\n httplib.HTTPConnection.__init__(self, host, port, strict,\r\n timeout, source_address)\r\n if not source_address:\r\n httplib.HTTPConnection.__init__(self, host, port, strict,\r\n timeout)\r\n self.ignoreAbruptClose = ignoreAbruptClose\r\n ClientHelper.__init__(self,\r\n username, password, \r\n certChain, privateKey,\r\n checker,\r\n settings, \r\n anon)", "def __init__(self, assoc, client_socket=None, address=('', 0)):\n self._assoc = assoc\n\n if client_socket is not None and address != ('', 0):\n LOGGER.warning(\n \"AssociationSocket instantiated with both a 'client_socket' \"\n \"and bind 'address'. The original socket will not be rebound\"\n )\n\n if client_socket is None:\n self.socket = self._create_socket(address)\n self._is_connected = False\n else:\n self.socket = client_socket\n self._is_connected = True\n # Evt5: Transport connection indication\n self.event_queue.put('Evt5')\n\n self.tls_args = None\n self.select_timeout = 0.5", "def __init__(self, event_type, source, target, orcbot, raw, data=None):\n self.event_type = event_type\n self.source = source\n self.target = target\n self.data = data\n self.socket = source\n self.message = raw\n self.orcbot_socket = orcbot", "def initialize_networkHandler(self):\n\t\tself.networkHandler = NetworkHandler(\n\t\t\tself.callbackQueue,\n\t\t\tself.received_order,\n\t\t\tself.set_light_callback,\n\t\t\tself.newOrderQueue,\n\t\t\tself.startedOrderQueue,\n\t\t\tself.lost_connection\n\t\t\t)", "def connect(cls, url=None, router=None, timeout=10, ssl_domain=None, sasl=None, edge_router=None):\n url_ = Url(url) # Convert string to Url class.\n\n if url_.path is not None:\n path = url_.path\n elif router:\n path = '_topo/0/%s/$management' % router\n elif edge_router:\n path = '_edge/%s/$management' % edge_router\n else:\n path = u'$management'\n connection = BlockingConnection(url,\n timeout=timeout,\n ssl_domain=ssl_domain,\n sasl_enabled=bool(ssl_domain or sasl),\n allowed_mechs=str(sasl.mechs) if sasl and sasl.mechs is not None else None,\n user=str(sasl.user) if sasl and sasl.user is not None else None,\n password=str(sasl.password) if sasl and sasl.password is not None else None)\n try:\n return cls(connection, path)\n except Exception:\n # ownership of connection has not been given to a new Node; close the connection\n connection.close()\n raise", "def __init__(self, client_id, rhost, rport, lhost, lport, map):\n asyncore.dispatcher.__init__(self, map=map)\n self.client_id = client_id\n self.lhost = lhost\n self.lport = lport\n self.rhost = rhost\n self.rport = rport\n self.molo_tcp_pack = MoloTcpPack()\n self.tranparency = None\n self.append_recv_buffer = None\n self.append_send_buffer = None\n self.append_connect = None\n self.client_token = None\n self.clear()", "def handle_client(self):\n e = threading.Event()\n reg_t = threading.Thread(target=self.handle_reg_client, args=(e,))\n stream_t = threading.Thread(target=self.handle_stream_client,\n args=(e,))\n reg_t.start()\n stream_t.start()", "def receive(self, fromrouter, envelope):\n event = envelope.unWrap(self.getUri())\n if event:\n Trace(\"%s receive %s from %s\"%(self.getUri(),event,fromrouter), \"EventLib.EventRelayHTTPC\")\n return self.queueItem([\"forward\",envelope])\n return makeDeferred(StatusVal.OK)", "def __init__(self):\r\n self._map1 = {\r\n \"CIRC\" : self.circ_status_event,\r\n \"STREAM\" : self.stream_status_event,\r\n \"ORCONN\" : self.or_conn_status_event,\r\n \"STREAM_BW\" : self.stream_bw_event,\r\n \"BW\" : self.bandwidth_event,\r\n \"DEBUG\" : self.msg_event,\r\n \"INFO\" : self.msg_event,\r\n \"NOTICE\" : self.msg_event,\r\n \"WARN\" : self.msg_event,\r\n \"ERR\" : self.msg_event,\r\n \"NEWDESC\" : self.new_desc_event,\r\n \"ADDRMAP\" : self.address_mapped_event,\r\n \"NS\" : self.ns_event,\r\n \"NEWCONSENSUS\" : self.new_consensus_event,\r\n \"BUILDTIMEOUT_SET\" : self.buildtimeout_set_event,\r\n \"GUARD\" : self.guard_event,\r\n \"TORCTL_TIMER\" : self.timer_event\r\n }\r\n self.c = None # Gets set by Connection.set_event_hanlder()\r\n self.pre_listeners = []\r\n self.post_listeners = []", "def __init__(self, url, routing_key, log_file='/dev/null', exchange='yacamc_exchange', exchange_type='direct',\n queue=None, acked=True, sender=False, otq = False, log_level=logging.FATAL):\n\n if queue is None:\n queue = routing_key\n self.exchange = exchange\n self.exchange_type = exchange_type\n self.queue = queue\n self.routing_key = routing_key\n self._url = url\n self.acked = acked\n self.otq = otq\n\n self.cb = None\n\n self._connection = None\n self._channel = None\n self._closing = False\n\n log_format = '%(levelname) -10s %(asctime)s %(name) -30s %(funcName) -35s %(lineno) -5d: %(message)s'\n handler = logging.FileHandler(log_file)\n logging.basicConfig(level=log_level, format=log_format)\n self.logger = logging.getLogger(__name__)\n self.logger.addHandler(handler)\n\n # used only for sending\n self._deliveries = []\n self._acked = 0\n self._nacked = 0\n self._message_number = 0\n self._stopping = False\n self._done_sending = False\n self.message = \"\"\n self.sender = sender\n\n # self.run()\n # self._connection = self.connect()", "def __init__(self, client: com.Bot):\n self._listening_components = []\n \"\"\"A list of components that are listening for interaction\"\"\"\n self._discord = client\n self._discord.add_listener(self._on_socket_response, 'on_socket_response')", "def __init__(self):\r\n self.client_socket = socket.socket() # the socket of the client.\r\n self.communicator = Communicator()\r\n self.events_handler = EventsHandler(self.client_socket)\r\n self.running = True\r\n self.display_resolution = DEFAULT_DISPLAY_RESOLUTION\r\n self.screen = self.get_display()", "def __init__(self):\r\n self._map1 = {\r\n \"CIRC\" : self.circ_status_event,\r\n \"STREAM\" : self.stream_status_event,\r\n \"ORCONN\" : self.or_conn_status_event,\r\n \"STREAM_BW\" : self.stream_bw_event,\r\n \"BW\" : self.bandwidth_event,\r\n \"DEBUG\" : self.msg_event,\r\n \"INFO\" : self.msg_event,\r\n \"NOTICE\" : self.msg_event,\r\n \"WARN\" : self.msg_event,\r\n \"ERR\" : self.msg_event,\r\n \"NEWDESC\" : self.new_desc_event,\r\n \"ADDRMAP\" : self.address_mapped_event,\r\n \"NS\" : self.ns_event,\r\n \"NEWCONSENSUS\" : self.new_consensus_event,\r\n \"BUILDTIMEOUT_SET\" : self.buildtimeout_set_event,\r\n \"GUARD\" : self.guard_event,\r\n \"TORCTL_TIMER\" : self.timer_event\r\n }\r\n self.parent_handler = None\r\n self._sabotage()", "def __init__(self, test_stream=None, no_delay=False, window=None, server=None):\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server", "def __init__(self, client_id: str):\n\n self._cs = aiohttp.ClientSession(\n loop=asyncio.get_event_loop(),\n raise_for_status=True,\n headers={\"Client-ID\": client_id},\n )", "def __init__(self, loop=None):\n object.__setattr__(self, '_loop', loop or get_event_loop())", "def on_open(self, request):\n\n # Find the right endpoint and create th connection\n dest = destinations[self.endpoint]\n\n name = self.session.handler.name if self.session.handler else '??'\n logger.info('New %s client for endpoint %s on port %s' %\n (name, self.endpoint, dest[1]))\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n self.endpoint_stream = iostream.IOStream(s)\n self.endpoint_stream.connect(dest, self.on_endpoint_connected)", "def __init__(self):\n\n\t\t#: Create the client and connect it to the host server.\n\t\tself.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\t\t#: Create a seperate thread to control listening to messages\n\t\t#: coming from the server.\n\t\tself.listen_thread = threading.Thread(target=self.listen)\n\n\t\t#: Create a seperate thread to control displaying messages.\n\t\t#: Handling this seperatedly from listening for messages\n\t\t#: ensures that messages aren't lost in the time it takes\n\t\t#: for a message to be displayed.\n\t\tself.messages = []\n\t\tself.display_thread = threading.Thread(target=self.display_messages)\n\n\t\t#: Have a most recent message for testing purposes.\n\t\tself.most_recent_message = \"\"\n\n\t\tself.joined = True\n\n\t\t#: Used to ensure you: doesnt appear twice.\n\t\tself.displayed_you = False", "def __init__(self, notification_handler=None, error_handler=None):\n self._socket = None\n self._thread = None\n self._cur_socket_timeout = 0\n self._next_request_id = 0\n self._notification_handler = notification_handler\n self._error_handler = error_handler" ]
[ "0.61509657", "0.5864122", "0.57680523", "0.5765937", "0.5636957", "0.542871", "0.53911084", "0.5345458", "0.52738434", "0.52246845", "0.517456", "0.51684", "0.51025426", "0.5091683", "0.50771654", "0.5053803", "0.50480044", "0.50263023", "0.501474", "0.49918574", "0.49689016", "0.49622035", "0.49477503", "0.4925583", "0.49240094", "0.49104512", "0.4904054", "0.48972535", "0.48854524", "0.48783186" ]
0.74102676
0
Add item to the queue, and return a deferred object that fires when an item is removed (or the queue is empty).
def queueItem(self, item): Trace("%s queueItem (%s)"%(self.getUri(),item), "EventLib.EventRelayHTTPC") if not self._closing: self._queue.put(item) self._queueEvent.set() return makeQueueDeferred(StatusVal.OK, self._queue, self._event) return makeDeferred(StatusVal.OK)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, item):\n completeDeferred = defer.Deferred()\n self.queue.append((item, completeDeferred))", "def add(self, item: T) -> None:\n self._queue.append(item)\n if not self.is_empty():\n self._queue.sort(reverse=True)", "def enqueue(self, item):\n self.queue.append(item)", "def enqueue(self, item):\n self.queue.append(item)", "def enqueue(self, item):\n self._queue.append(item)", "def put(self, item: Any):\n has_item = True\n with self._lock:\n if item not in self._items:\n self._items.add(item)\n has_item = False\n if not has_item:\n self._queue.put(item)", "def enqueue(self, item):\n self.__queue.insert(0, item)", "def put_nowait(self, item: _T) -> None:\n self._consume_expired()\n if self._getters:\n assert self.empty(), \"queue non-empty, why are getters waiting?\"\n getter = self._getters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(getter, self._get())\n elif self.full():\n raise QueueFull\n else:\n self.__put_internal(item)", "def getQueuedItem(self):\n Trace(\"%s getQueuedItem ...\"%(self.getUri()), context=\"EventLib.EventRelayHTTPC\")\n item = self._queue.get()\n Trace(\"%s getQueuedItem (%s)\"%(self.getUri(),item), context=\"EventLib.EventRelayHTTPC\")\n self._event.set()\n return item", "def _put(self, item, queue):", "def enqueue(self, item):\n self.list.append(item)", "def put_nowait(self, item):\r\n if self.full():\r\n raise QueueFull\r\n self._put(item)\r\n self._unfinished_tasks += 1\r\n self._finished.clear()\r\n self._wakeup_next(self._getters)", "def put(\n self, item: _T, timeout: Optional[Union[float, datetime.timedelta]] = None\n ) -> \"Future[None]\":\n future = Future() # type: Future[None]\n try:\n self.put_nowait(item)\n except QueueFull:\n self._putters.append((item, future))\n _set_timeout(future, timeout)\n else:\n future.set_result(None)\n return future", "def deque_non_blocking_put(self, item):\n try:\n self.q.put(item, block=False)\n return True\n except queue.Full:\n try:\n _ = self.q.get(block=False)\n dropped = False\n except queue.Empty:\n dropped = True\n # TODO - could crash due to a race condition, could be solved with a lock\n self.q.put(item, block=False)\n return dropped", "def add_to_queue(self, msg):\n if not self.queue.full():\n self.queue.put(msg)", "def enqueue(self, item):\n\n self.__items__.append(item)", "def enqueue(Q, x):\n # Q.append(x)\n Q.put_nowait(x)\n if debug: \n print(\"enqueue\", x, \":\", end=\" \")\n show_queue(Q)\n return Q", "def enqueue(self, item):\n\t\tself.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def append ( self , item ) :\n self.cond.acquire()\n try:\n if self.closed :\n raise Exception( \"Trying to append to a closed queue\" )\n else :\n self.weight += int( item['size'] )\n self.push( item )\n self.cond.notify()\n finally:\n self.cond.release()", "def push(self, value):\n if self.please_stop and not self.allow_add_after_close:\n Log.error(\"Do not push to closed queue\")\n\n with self.lock:\n self._wait_for_queue_space()\n if not self.please_stop:\n self.queue.appendleft(value)\n return self", "def add(self, item):\n self.sleeping.reveille() # wake items whose sleep timer has expired\n self.stack.push(item)", "def enqueue(self, x):\r\n self.queue.append(x)\r\n return self.queue", "def put(self, item):\r\n while self.full():\r\n putter = self._loop.create_future()\r\n self._putters.append(putter)\r\n try:\r\n yield from putter\r\n except:\r\n putter.cancel() # Just in case putter is not done yet.\r\n if not self.full() and not putter.cancelled():\r\n # We were woken up by get_nowait(), but can't take\r\n # the call. Wake up the next in line.\r\n self._wakeup_next(self._putters)\r\n raise\r\n return self.put_nowait(item)", "def __add__(self, value):\n self.queue.append(value)", "def non_blocking_put(self, item):\n try:\n self.q.put(item, block=False)\n return True\n except queue.Full:\n return False", "def add(self, item):\n if self.has_item(item):\n return\n\n self.cache.append(item)\n\n if self.size() > self.max_size:\n self.cache.popleft()", "def add_message_to_queue(self, message):\n\t\t\t\tself.message_queue.append(message)\n\t\t\t\treturn self.message_queue", "def maybe_enqueue(self):\n if len(self._vals) > 0:\n self.enqueued = True\n return self._queue.enqueue(self._vals)\n else:\n return None" ]
[ "0.809805", "0.66659725", "0.6658853", "0.6658853", "0.649351", "0.6432891", "0.6412939", "0.6350026", "0.6251586", "0.623737", "0.6233821", "0.6162664", "0.6149751", "0.6137725", "0.6091136", "0.60882497", "0.60764444", "0.6054781", "0.5985582", "0.5985582", "0.598518", "0.5972382", "0.5959985", "0.594677", "0.5944127", "0.59432197", "0.5928539", "0.5911735", "0.5863586", "0.58527267" ]
0.6929844
1
Wait for an item to be queued, then return it.
def getQueuedItem(self): Trace("%s getQueuedItem ..."%(self.getUri()), context="EventLib.EventRelayHTTPC") item = self._queue.get() Trace("%s getQueuedItem (%s)"%(self.getUri(),item), context="EventLib.EventRelayHTTPC") self._event.set() return item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except queue.Empty:\n return None\n return item", "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except Queue.Empty:\n return None\n return item", "def worker_take_item(self):\r\n worker = self.get_waiting_worker()\r\n if worker:\r\n worker.take_item(self.item)\r\n self.item = Item.E\r\n return worker", "def get_nowait(self):\r\n if self.empty():\r\n raise QueueEmpty\r\n item = self._get()\r\n self._wakeup_next(self._putters)\r\n return item", "def get(self):\n with self.__lock:\n while True:\n try:\n job = self.__queue.get(False)\n self.__lock.notify_all()\n return job\n except Queue.Empty:\n self.__lock.wait()", "def get_nowait(self) -> _T:\n self._consume_expired()\n if self._putters:\n assert self.full(), \"queue not full, why are putters waiting?\"\n item, putter = self._putters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(putter, None)\n return self._get()\n elif self.qsize():\n return self._get()\n else:\n raise QueueEmpty", "def get_next_item(self, timeout=None):\n if self.current_item is not None:\n raise error_classes.UVMSequenceError(\"You must call item_done() before calling get_next_item again\")\n self.current_item = self.req_q.get(timeout=timeout)\n return self.current_item", "def _wait_for_event_in_queue(self):\n try:\n event = self._queue.get(timeout=SendTelemetryEventsHandler._MAX_TIMEOUT)\n self._queue.task_done()\n except Empty:\n # No elements in Queue, return None\n event = None\n\n return event", "def get(self):\n\t\ttry:\n\t\t\tself.logger.debug('Im trying to get item from queue')\n\t\t\titem = self.queue.get()\n\t\t\tself.logger.debug('Recevie item from queue %s'%(item))\n\t\t\treturn True, item\n\t\texcept Exception, e:\n\t\t\tself.logger.error('Error method get, error: %s'%(e),exc_info=True)\n\t\t\treturn False, None", "def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None", "def _wait_queue(self):\n while True:\n time.sleep(0.1)\n if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\n return", "def wait(self, timeout=None):\r\n with self._lock:\r\n if self._finished:\r\n raise RuntimeError(\"wait() does not support re-entry!\")\r\n if not self._started:\r\n for thread in self._wait_events:\r\n thread.timeout = timeout\r\n thread.start()\r\n self._started = True\r\n try:\r\n if self._queue.get(timeout=timeout):\r\n return True\r\n return False\r\n except Empty:\r\n return False\r\n finally:\r\n with self._lock:\r\n self._finished = True", "def dequeue(self, timeout=0):\n result = self.connection.dequeue_any([self], timeout)\n if result:\n job, queue = result\n return job\n else:\n return None", "def queueItem(self, item): \n Trace(\"%s queueItem (%s)\"%(self.getUri(),item), \"EventLib.EventRelayHTTPC\")\n if not self._closing:\n self._queue.put(item)\n self._queueEvent.set()\n return makeQueueDeferred(StatusVal.OK, self._queue, self._event)\n return makeDeferred(StatusVal.OK)", "def wait_for_queue(self, q, stop=True):\n return _loader.wait_for_queue(q, stop=stop)", "def get(self):\r\n try:\r\n # get with block=False returns an item if one\r\n # is immediately available, else raises the Empty exception\r\n return self._queue.get(block=False)\r\n except queue.Empty:\r\n return self._create_connection()", "def get(self, block=True, timeout=None):\n return self.queue.get(block, timeout)", "def wait(self):\n self.queue.join()", "def pollTillAvailable(self):\n item = self.getItem()\n while item is None:\n item = self.getItem()\n\n return item", "def wait_for_message(self, tag, timeout=None):\n def done_check():\n if self._message_queue.setdefault(tag,[]):\n value=heapq.heappop(self._message_queue[tag])[-1]\n return True,value\n return False,None\n return self._wait_in_process_loop(done_check,timeout=timeout)", "def q_get(self):\n while not self.stopped():\n try:\n return self.in_q.get(timeout=self.heart_beat)\n except queue.Empty:\n pass", "def _get_nowait(self):\n # Fulfills a waiting producer, returning its value, or raising Empty if\n # no fulfillable producers are waiting.\n def fulfill_waiting_producer():\n while True:\n if self._waiting_producers:\n produce_wish = self._waiting_producers.pop(0)\n with produce_wish.group.lock:\n if not produce_wish.group.fulfilled:\n return produce_wish.fulfill()\n else:\n raise Empty()\n\n if self._buf is not None and not self._buf.empty:\n value = self._buf.pop()\n try:\n # Cycles a producer's value onto the buffer\n produced = fulfill_waiting_producer()\n self._buf.push(produced)\n except Empty:\n pass\n return value\n else:\n return fulfill_waiting_producer()", "def put(self, item):\r\n while self.full():\r\n putter = self._loop.create_future()\r\n self._putters.append(putter)\r\n try:\r\n yield from putter\r\n except:\r\n putter.cancel() # Just in case putter is not done yet.\r\n if not self.full() and not putter.cancelled():\r\n # We were woken up by get_nowait(), but can't take\r\n # the call. Wake up the next in line.\r\n self._wakeup_next(self._putters)\r\n raise\r\n return self.put_nowait(item)", "def _wait_empty(self):\n while True:\n if self.queue.empty():\n # We still have to wait for the last queue item being processed\n # (queue.empty() returns True before queue.task_done() is\n # called).\n self.queue.join()\n return\n time.sleep(1)", "def put(\n self, item: _T, timeout: Optional[Union[float, datetime.timedelta]] = None\n ) -> \"Future[None]\":\n future = Future() # type: Future[None]\n try:\n self.put_nowait(item)\n except QueueFull:\n self._putters.append((item, future))\n _set_timeout(future, timeout)\n else:\n future.set_result(None)\n return future", "def worker_process(self, item):\n g_sleep()\n return item", "def put_nowait(self, item: _T) -> None:\n self._consume_expired()\n if self._getters:\n assert self.empty(), \"queue non-empty, why are getters waiting?\"\n getter = self._getters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(getter, self._get())\n elif self.full():\n raise QueueFull\n else:\n self.__put_internal(item)", "def ztest_get_item(self):\n \n queue = NMSQueue()\n \n result_set = queue.get_items_with_priority(1,1,0,1)\n \n for item in result_set:\n print(\"\\nItem = %s\\n\" % (item) )\n newitem = queue.get_item(item.uuid)\n print(\"\\nRetrieve the same from queue Item = %s\\n\" % (newitem) )", "def timeout_get(self, timeout):\n try:\n return self.q.get(timeout=timeout)\n except queue.Empty:\n time.sleep(0)\n return None", "def get(self) -> Any:\n return self._queue.get()" ]
[ "0.69198966", "0.69057333", "0.6884813", "0.6864645", "0.68376034", "0.6821141", "0.6660425", "0.6638609", "0.65500504", "0.6460967", "0.6446401", "0.63761413", "0.63736504", "0.6362188", "0.6324351", "0.62841207", "0.6243727", "0.61811554", "0.6120136", "0.61119175", "0.60853547", "0.60850537", "0.6048267", "0.6044677", "0.60400003", "0.6034453", "0.60216844", "0.59891033", "0.59822494", "0.5976261" ]
0.734153
0
Recursively load all teachers that can be found in the current experiment's directory.
def load_teachers(self): # Get the experiment's directory to load from ex_dir = ask_for_experiment(max_display=10, env_name=self.env_real.name, perma=False) self.load_teacher_experiment(ex_dir) if len(self.teacher_policies) < self.num_teachers: print( f"You have loaded {len(self.teacher_policies)} teachers - load at least {self.num_teachers - len(self.teacher_policies)} more!" ) self.load_teachers()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_teacher_experiment(self, exp: Experiment):\n _, _, extra = load_experiment(exp)\n self.unpack_teachers(extra)", "def preload_all_problems(self):\n for _, _, filenames in os.walk(self.problemDir):\n for filename in filenames:\n if filename[-3:] == \".py\" and filename != \"__init__.py\":\n self.load_problem_file(filename[0:-3])", "def loadallskills(self):\r\n for skill in os.listdir( os.path.join( es.getAddonPath( info.basename ), \"skills\" )):\r\n es.load(\"%s/skills/%s\" % (info.basename, skill))", "def load_training():\n for can in candidates:\n trainings[can] = []\n for subdir, dirs, files in os.walk(os.path.join(corpus_dir, can)):\n for doc in files:\n trainings[can].append(doc)", "def _load_dirs(self):\n rootdirs = self._docset.get_compounds(xml.Directory,\n lambda x: x.get_parent() is None)\n for dirdoc in rootdirs:\n self._load_dir(dirdoc, None)", "def load_test_users():\n return [load_test_angel(), load_test_troublemaker(), load_test_rebel()]", "def load_fixtures(self):\n for fixture_dir in settings.FIXTURE_DIRS:\n fixture_dir = os.path.join(fixture_dir, self.filesystem_name)\n for (root, dirs, files) in os.walk(fixture_dir):\n for file in files:\n full_file_path = os.path.join(root, *dirs, file)\n with open(full_file_path, 'rb') as f:\n self.save(os.path.relpath(full_file_path, fixture_dir), f)", "def _load_module_recursive(self, dir) :\t\n\t\tfor filepath in os.listdir(dir) :\n\t\t\tfullpath = os.path.join(dir, filepath)\n\n\t\t\tif os.path.isdir(fullpath) :\n\t\t\t\tself._load_module_recursive(fullpath)\n\n\t\t\telif os.path.splitext(filepath)[1] == '.py' :\n\t\t\t\tutils.load_module(fullpath, self.settings.ROOT_PATH)", "def __init__(self, *paths, **kwargs):\n trajectories = load_trajectories(*paths, **kwargs)\n super().__init__(trajectories, **kwargs)", "def load_data(self) -> None:\n self.paths: List[str] = []\n self.durations: List[float] = []\n self.transcriptions: List[str] = []\n\n def raise_(err):\n \"\"\"raises error if problem during os.walk\"\"\"\n raise err\n\n for subset in self.subsets:\n subset_path = os.path.join(self.root, self.base_dir, subset)\n for root, dirs, files in os.walk(subset_path, onerror=raise_):\n if not files:\n continue\n matches = fnmatch.filter(files, \"*.trans.txt\")\n assert len(matches) == 1, \"> 1 transcription file found\"\n self._parse_transcription_file(root, matches[0])\n\n self._sort_by_duration()", "def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")", "def discover_examples():\n root = './examples'\n for filename in os.listdir(root):\n if os.path.splitext(filename)[1] == '.py':\n yield os.path.join(root, filename)", "def loadTrie(self):\n for file in self._gram_files:\n trie_file = getTrieFile(os.path.basename(file), self._pickle_dir)\n with open(trie_file, 'rb') as fd:\n self._tries.append(pickle.load(fd))", "def prune_teachers(self):\n self.teacher_policies = self.teacher_policies[: self.num_teachers]\n self.teacher_envs = self.teacher_envs[: self.num_teachers]\n self.teacher_expl_strats = self.teacher_expl_strats[: self.num_teachers]\n self.teacher_critics = self.teacher_critics[: self.num_teachers]\n self.teacher_ex_dirs = self.teacher_ex_dirs[: self.num_teachers]", "def _load_trials(self) -> hyperopt.Trials:\n if os.path.isfile(self.trials_path):\n trials = pickle.load(open(self.trials_path, \"rb\"))\n else:\n trials = hyperopt.Trials()\n return trials", "def imdb_load():\n for root, dirs, filenames in os.walk(os.path.dirname(__file__) + \"/imdb\"):\n for file_name in filenames:\n if file_name.find(\".json\") > 0:\n Movie.imdb_load_file(os.path.dirname(__file__) + \"/imdb/\" + file_name)\n return Movie.__movies", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def _load_templates(cls):\n if cls._raw_templates is None:\n cls._raw_templates = fetch_rrlyrae_templates()", "def load_tests(loader, suite, pattern):\n tests = loader.discover(start_dir=here.parents[0], pattern=pattern)\n suite.addTests(tests)\n return suite", "def get_teachers(self):\n query = Teacher.all().order('teacher')\n return query.fetch()", "def iter_dir(tree, path):\n for f in os.listdir(path):\n if os.path.isfile(path + '/' + f + '/__init__.py'):\n tree[f] = None\n elif os.path.isdir(path + '/' + f):\n tree[f] = {}\n SnakeWM.iter_dir(tree[f], path + '/' + f)", "def get_test_examples(self, data_dir):\n \n raise NotImplementedError()", "def loadFiles():\n all_chapters = []\n for name in names:\n f = open(file_path + name, \"r\", encoding=\"utf-8\")\n html_file = f.read()\n f.close()\n chap_text = extractText(html_file)\n new_text = cleanText(chap_text)\n all_chapters.append(new_text)\n concatenated_chapters = \" \".join(all_chapters)\n return concatenated_chapters", "def loadDirectory(self, dirname):\r\n cachelist=os.listdir(dirname)\r\n testlist=fnmatch.filter(cachelist,'*.hdf5')\r\n \r\n for file_ in testlist:\r\n print(\"Using {0}\".format(file_))\r\n \r\n files = [h5py.File(os.path.join(dirname, fn),'r') for fn in testlist]\r\n return files", "def tubs_from_directory(tub_dir, verbose=False):\n tubs = []\n count = 0\n root_path = Path(tub_dir)\n for item in root_path.iterdir():\n if item.is_dir():\n try:\n t = Tub(str(item),read_only=True)\n count += len(t)\n except FileNotFoundError as ex:\n continue\n except ValueError as ex:\n # In case the catalog file is empty\n continue\n tubs.append(t)\n if verbose:\n print( f\"Loaded {count} records.\" )\n\n return tubs", "def preload_all(self):\n for tp in self.tps:\n for f in self.featurefiles + self.maskfiles:\n file = os.path.join(tp, f)\n print('preloading {}'.format(file))\n self.load(file, lazy=False)", "def update_treemakers():\n global treemakers\n treemakers = {}\n for module_filename in glob(os.path.join(hax.hax_dir + '/treemakers/*.py')):\n module_name = os.path.splitext(os.path.basename(module_filename))[0]\n if module_name.startswith('_'):\n continue\n\n # Import the module, after which we can do hax.treemakers.blah\n __import__('hax.treemakers.%s' % module_name, globals=globals())\n\n # Now get all the treemakers defined in the module\n for tm_name, tm in inspect.getmembers(getattr(hax.treemakers, module_name),\n lambda x: type(x) == type and issubclass(x, TreeMaker)):\n if tm_name == 'TreeMaker':\n # This one is the base class; we get it because we did from ... import TreeMaker at the top of the file\n continue\n if tm_name in treemakers:\n raise ValueError(\"Two treemakers named %s!\" % tm_name)\n treemakers[tm_name] = tm", "def find_user_templates(self):\n\n # a list to store file names in\n local_templates = []\n\n # loop through the directory content\n for name in os.listdir(self._template_directory):\n # check to see if it is a directory and not in the database\n if (os.path.isdir(os.path.join(self._template_directory, name)) and\n name not in self._templates):\n # add it to the list\n local_templates.append(name)\n\n return local_templates" ]
[ "0.66310847", "0.59174895", "0.5706464", "0.56006503", "0.5449008", "0.5424291", "0.54034406", "0.53829217", "0.5337041", "0.53273314", "0.53095233", "0.5307464", "0.52964854", "0.5285679", "0.52551943", "0.5226202", "0.5178625", "0.5178625", "0.51691693", "0.5146963", "0.51436335", "0.513781", "0.5132956", "0.51197994", "0.50782996", "0.50781155", "0.506339", "0.50611657", "0.5055892", "0.5051056" ]
0.7492254
0
Load teachers from PDDRTeachers experiment.
def load_teacher_experiment(self, exp: Experiment): _, _, extra = load_experiment(exp) self.unpack_teachers(extra)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_teachers(self):\n # Get the experiment's directory to load from\n ex_dir = ask_for_experiment(max_display=10, env_name=self.env_real.name, perma=False)\n self.load_teacher_experiment(ex_dir)\n if len(self.teacher_policies) < self.num_teachers:\n print(\n f\"You have loaded {len(self.teacher_policies)} teachers - load at least {self.num_teachers - len(self.teacher_policies)} more!\"\n )\n self.load_teachers()", "def load_tamper(self, dataset_dir, subset):\n # Add classes. We have one class.\n # Naming the dataset nucleus, and the class nucleus\n self.add_class(\"tampers\", 1, \"tampers\")\n\n # Which subset?\n # \"val\": use hard-coded list above\n # \"train\": use data from stage1_train minus the hard-coded list above\n # else: use the data from the specified sub-directory\n # assert subset in [\"train\", \"val\", \"stage1_train\", \"stage1_test\", \"stage2_test\"]\n # subset_dir = \"stage1_train\" if subset in [\"train\", \"val\"] else subset\n dataset_dir = os.path.join(dataset_dir, subset, 'images')\n if subset == \"val\" or subset == \"test\":\n image_ids = next(os.walk(dataset_dir))[2]\n else:\n # Get image ids from directory names\n image_ids = next(os.walk(dataset_dir))[2]\n \n\n # dircopy_move = '/data/twj/copy-move/data_zoo/dataset/images/train'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # dirnew_splicing = '/data/tamper'\n # image_ids_new_splicing = next(os.walk(os.path.join(dirnew_splicing, 'images')))[2]\n\n # dircopy_move = '/home/as/deeplab/wpmrcnn/ca2new/test'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n \n # dircopy_move = '/data/gy/ca2att/train3'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # # # dirtxt_sp = '/data/gy/tamperpre/train'\n # # # image_ids_txt_sp = next(os.walk(os.path.join(dirtxt_sp, 'images')))[2]\n\n # dirnew_sp = '/data/gy/c2newsp/train'\n # image_ids_new_sp = next(os.walk(os.path.join(dirnew_sp, 'images')))[2]\n\n # Add images\n for image_id in image_ids:\n self.add_image(\n \"tampers\",\n image_id=image_id[:-4],\n path=os.path.join(dataset_dir, image_id))\n\n # for image_id in image_ids_copy_move:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dircopy_move, 'images', image_id))\n\n # for image_id in image_ids_new_splicing:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_splicing, 'images', image_id))\n\n # # for image_id in image_ids_txt_sp:\n # # self.add_image(\n # # \"tampers\",\n # # image_id=image_id[:-4],\n # # path=os.path.join(dirtxt_sp, 'images', image_id))\n\n # for image_id in image_ids_new_sp:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_sp, 'images', image_id))", "def test_pytd_teacher(self):\n defaults = parser_defaults.copy()\n defaults['datatype'] = 'train:stream'\n defaults['image_mode'] = 'ascii'\n\n with testing_utils.capture_output():\n # Get processed act from agent\n parser = display_setup_args()\n defaults['pytorch_teacher_dataset'] = 'flickr30k'\n del defaults['pytorch_teacher_task']\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n teacher = create_task_agent_from_taskname(opt)[0]\n pytorch_teacher_act = teacher.act()\n\n parser = display_setup_args()\n defaults['task'] = 'flickr30k'\n del defaults['pytorch_teacher_dataset']\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n teacher = create_task_agent_from_taskname(opt)[0]\n regular_teacher_act = teacher.act()\n\n keys = set(pytorch_teacher_act.keys()).intersection(\n set(regular_teacher_act.keys()))\n self.assertTrue(len(keys) != 0)\n for key in keys:\n self.assertTrue(pytorch_teacher_act[key] == regular_teacher_act[key],\n 'PytorchDataTeacher does not have the same value '\n 'as regular teacher for act key: {}'.format(key))", "def train_teacher (nb_teachers, teacher_id):\n # Load the dataset\n X_train, X_test, y_train, y_test = models.get_dataset()\n\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n print(y_test.shape)\n \n # Retrieve subset of data for this teacher\n data, labels = partition.partition_dataset(X_train,\n y_train,\n nb_teachers,\n teacher_id)\n\n print(\"Length of training data: \" + str(len(labels)))\n\n # Define teacher checkpoint filename and full path\n\n filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'\n filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'\n \n # Perform teacher training need to modify \n \n\n # Create teacher model\n model, opt = models.create_two_layer_mlp(46) # num of cols\n model.compile(loss='binary_crossentropy',\n optimizer=\"Adam\",\n metrics=['accuracy'])\n model, hist = models.training(model, data, X_test, labels, y_test,filename)\n\n #modify\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\n model.save_weights(filename2)\n print(\"Saved model to disk\")\n return True", "def unpack_teachers(self, extra: dict):\n self.teacher_policies.extend(extra[\"teacher_policies\"])\n self.teacher_envs.extend(extra[\"teacher_envs\"])\n self.teacher_expl_strats.extend(extra[\"teacher_expl_strats\"])\n self.teacher_critics.extend(extra[\"teacher_critics\"])\n self.teacher_ex_dirs.extend(extra[\"teacher_ex_dirs\"])", "def get_test_loader(id_list = './data/sample_submission.csv', root_dir = './data/test/'):\n data = HumanProteinDataset(id_list, root_dir, transform = transforms.Compose([\n Rescale((256, 256)), \n ToTensor()\n ]))\n\n indices = np.arange(len(data))\n dataloader_test = DataLoader(data, batch_size=10, num_workers=5)\n\n return dataloader_test", "def prune_teachers(self):\n self.teacher_policies = self.teacher_policies[: self.num_teachers]\n self.teacher_envs = self.teacher_envs[: self.num_teachers]\n self.teacher_expl_strats = self.teacher_expl_strats[: self.num_teachers]\n self.teacher_critics = self.teacher_critics[: self.num_teachers]\n self.teacher_ex_dirs = self.teacher_ex_dirs[: self.num_teachers]", "def _load_trials(self) -> hyperopt.Trials:\n if os.path.isfile(self.trials_path):\n trials = pickle.load(open(self.trials_path, \"rb\"))\n else:\n trials = hyperopt.Trials()\n return trials", "def load_primers(tsv_filename):\n answer = []\n with open(tsv_filename) as handle:\n for line in handle:\n if line.startswith(\"#\"):\n continue\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n if len(parts) == 2:\n left, right = parts\n name = f\"P{len(answer)}\"\n else:\n name, left, right = parts[:3]\n answer.append((name, left, right))\n return answer", "def get_teacher_assign():\n assignment_data = query_db(\n \"SELECT assignments.id, assignments.name, assignments.due_date \"\n \"FROM assignments JOIN topics ON assignments.topic_id=topics.id \"\n \"JOIN classes ON topics.class_id=classes.id WHERE teacher_id=?;\",\n [flask.session[\"id\"]],\n )\n assignments = []\n for assignment in assignment_data:\n assignment_dict_teach = {}\n assignment_dict_teach[\"id\"] = assignment[0]\n assignment_dict_teach[\"name\"] = assignment[1]\n assignment_dict_teach[\"due_date\"] = assignment[2]\n assignments.append(assignment_dict_teach)\n return assignments", "def load_people(self, file_path):\n pass", "def get_train(self, even=None):\n\n #self.images, self.labels, self.traces = trace_data.get_my_teacher()\n _, self.images, self.labels, self.traces, _ = trace_data.get_my_teacher()\n #print(self.labels)\n self.length = len(self.images)\n self.create_teacher()", "def LoadTeacherModels(lang):\n\n # load Trained teacher model parameters\n log_dir = 'data/logs'\n with open(log_dir + '/' + lang + '_model_params', 'rb') as fp:\n params = pickle.load(fp)\n\n model_args = params['args']\n\n if model_args.use_colab is None:\n OUTPUT_DIR = 'ckpts/' + model_args.lang\n if not os.path.isdir(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)\n else:\n from google.colab import drive\n\n drive.mount('/content/gdrive')\n OUTPUT_DIR = '/content/gdrive/My Drive/ckpts/' + model_args.lang\n if not os.path.isdir(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)\n\n if model_args.enc_type == 'gat' and model_args.dec_type == 'transformer':\n models = {}\n OUTPUT_DIR += '/' + model_args.enc_type + '_' + model_args.dec_type\n\n # Load the vocabs\n with open('vocabs/' + model_args.model + '/' +\n lang + '/' + model_args.opt + '_src_vocab', 'rb') as fp:\n src_vocab = pickle.load(fp)\n # loading the target vocab\n model_args.sentencepiece = 'False'\n if model_args.sentencepiece == 'True':\n sp = spm.SentencePieceProcessor()\n sp.load('vocabs/' + model_args.model + '/' +\n lang + '/' + 'train_tgt.model')\n tgt_vocab = sp\n else:\n tgt_vocab = src_vocab\n\n print('Loaded ' + lang + ' Parameters..')\n model = TransGAT(params['args'], params['src_vocab_size'], src_vocab,\n params['tgt_vocab_size'], tgt_vocab)\n # Load the latest checkpoints\n optimizer = tf.train.AdamOptimizer(beta1=0.9, beta2=0.98,\n epsilon=1e-9)\n\n ckpt = tf.train.Checkpoint(\n model=model,\n optimizer=optimizer\n )\n\n ckpt_manager = tf.train.CheckpointManager(ckpt, OUTPUT_DIR, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()\n\n print('Loaded ' + lang + ' Teacher model !')\n\n return model", "def get_teachers(self):\n query = Teacher.all().order('teacher')\n return query.fetch()", "def load_topics():\n\n print \"Importing topics...\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate retailers\n Topic.query.delete()\n\n # Read CSV file\n with open(\"seed_data/topics.csv\") as source_file:\n example_data = list(csv.reader(source_file))\n\n # skip header row for populating db\n for list_item in example_data[1:]:\n topic = Topic(topic_title=list_item[1])\n\n # Add the current retailer to the session\n db.session.add(topic)\n\n # Commit the db.session changes to the database\n db.session.commit()", "def ensemble_preds(dataset, nb_teachers, stdnt_data):\n\n # Compute shape of array that will hold probabilities produced by each\n # teacher, for each training point, and each output class\n result_shape = (nb_teachers, len(stdnt_data), FLAGS.nb_labels)\n\n # Create array that will hold result\n result = np.zeros(result_shape, dtype=np.float32)\n\n # Get predictions from each teacher\n for teacher_id in xrange(nb_teachers):\n # Compute path of checkpoint file for teacher model with ID teacher_id\n if FLAGS.deeper:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '_deep.ckpt-' + str(FLAGS.teachers_max_steps - 1) #NOLINT(long-line)\n else:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '.ckpt-' + str(FLAGS.teachers_max_steps - 1) # NOLINT(long-line)\n\n # Get predictions on our training data and store in result array\n result[teacher_id] = deep_cnn.softmax_preds(stdnt_data, ckpt_path)\n\n # This can take a while when there are a lot of teachers so output status\n print(\"Computed Teacher \" + str(teacher_id) + \" softmax predictions\")\n\n return result", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def read_triplets(seed_candidates):\n if \"pickle\" in seed_candidates:\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n with open(file_name, 'rb') as f:\n data = pickle.load(f)\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n with open(seed_candidates, 'rb') as f:\n data = pickle.load(f)\n new_data = []\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n # idx = int(dd[0][10:])\n # new_data.append((idx, dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n column_names = ['evtid', 'h1', 'h2', 'h3']\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n df_seed_tmp = pd.read_csv(file_name, header=None, names=column_names,)\n new_data.append(df_seed_tmp)\n df_seed = pd.concat(new_data)\n else:\n df_seed = pd.read_csv(seed_candidates, header=None,\n names=column_names)\n return df_seed", "def test_pyt_multitask(self):\n\n def run_display_test(defaults, ep_and_ex_counts):\n with testing_utils.capture_output() as f:\n parser = display_setup_args()\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n display_data(opt)\n str_output = f.getvalue()\n self.assertTrue(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n ep_and_ex_counts[0], ep_and_ex_counts[1]\n ) in str_output,\n 'PytorchDataTeacher multitasking failed with '\n 'following args: {}'.format(opt)\n )\n\n task1 = 'babi:task1k:1'\n task2 = 'babi:task1k:2'\n dataset1 = 'flickr30k'\n dataset2 = 'vqa_v1'\n\n # Expected example and episode counts\n eps_and_exs_counts = [\n (1800, 1800),\n (1080, 1800),\n (29900, 29900),\n (29180, 29900),\n (277349, 277349)\n ]\n defaults = parser_defaults.copy()\n\n # 1.\n defaults['pytorch_teacher_task'] = '{},{}'.format(task1, task2)\n run_display_test(defaults, eps_and_exs_counts[0])\n\n # 2.\n defaults['pytorch_teacher_task'] = task1\n defaults['task'] = task2\n run_display_test(defaults, eps_and_exs_counts[1])\n\n # 3.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = dataset1\n run_display_test(defaults, eps_and_exs_counts[2])\n\n # 4.\n del defaults['pytorch_teacher_task']\n defaults['task'] = task1\n run_display_test(defaults, eps_and_exs_counts[3])\n\n # 5.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = '{},{}'.format(dataset1, dataset2)\n run_display_test(defaults, eps_and_exs_counts[4])", "def load_ptb_dataset(name='ptb', path='raw_data'):\n path = os.path.join(path, name)\n logging.info(\"Load or Download Penn TreeBank (PTB) dataset > {}\".format(path))\n\n # Maybe dowload and uncompress tar, or load exsisting files\n maybe_download_and_extract(PTB_FILENAME, path, PTB_URL, extract=True)\n\n data_path = os.path.join(path, 'simple-examples', 'data')\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id = nlp.build_vocab(nlp.read_words(train_path))\n\n train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)\n valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)\n test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)\n vocab_size = len(word_to_id)\n\n # logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>']\n # logging.info(train_data) # ... 214, 5, 23, 1, 2]\n # logging.info(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... }\n # logging.info(vocabulary) # 10000\n # exit()\n return train_data, valid_data, test_data, vocab_size", "def loadTrainer(path):\n\tray.shutdown()\n\tray.init()\n\tconfig = createConfig()\n\ttrainer = dqn.DQNTrainer(config=config, env=HiLoPricingEnv)\n\ttrainer.restore(path)\n\treturn trainer", "def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def TestTeacher(self, model_name, **kwargs):\n batch_size = kwargs.pop(\"batch_size\", 256)\n model_save_path = kwargs.pop(\"model_save_path\", \"./checkpoints/teacher/\")\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/teacher\")\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(model_save_path):\n raise RuntimeError(\"No pretrained model exists in '{}'\".format(model_save_path))\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n\n tf.reset_default_graph()\n\n # Get dataset\n test_data, test_label = self.data_manager.test_data, self.data_manager.test_label\n num_test_data = test_data.shape[0]\n\n X = tf.placeholder(test_data.dtype, shape=[None]+list(test_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(test_label.dtype, shape=[None]+list(test_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n\n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n batched_dataset = dataset.batch(batch_size)\n \n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the teacher model\n with tf.variable_scope('teacher_model'):\n logits, probs = self.teacher_model(batch_data, is_train=is_train)\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n saver = tf.train.Saver()\n\n # Test process\n with tf.Session() as sess:\n sess.run(iterator.initializer, feed_dict={X:test_data, y:test_label})\n saver.restore(sess, model_save_path)\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_test_data\n if verbose:\n print(\"Test accuracy: {}\".format(acc))\n break\n acc_hist = [acc]\n self._writeRecord(record_save_path, \"{}_test_accuracy\".format(model_name), acc_hist)", "def load_examples_data(dataset_name):\n dataset_name = dataset_name.strip().lower()\n if dataset_name.lower() not in ['pokemon', 'hanzi', 'animals', 'nsfw', 'simpsons', 'horse2zebra', 'people',\n 'autodrive', 'superresolution', 'anpr', 'beauty','antisproofing','facelandmarks','dogs-vs-cats','chinese']:\n raise ValueError('Not a valid dataset_name.')\n dataset_name = 'examples_' + dataset_name\n dirname = os.path.join(_trident_dir, dataset_name)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n is_internet_ok = is_connected()\n if dataset_name == 'examples_pokemon':\n is_download=download_file_from_google_drive('1U-xc54fX9j9BcidvRa0ow6qjssMlSF2A', dirname, 'pokemon.tar')\n tar_file_path = os.path.join(dirname, 'pokemon.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n extract_path = os.path.join(dirname, 'pokemon')\n dataset = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n print('get pokemon images :{0}'.format(len(dataset)))\n return dataset\n\n\n elif dataset_name == 'examples_hanzi':\n download_file_from_google_drive('13UEzSG0az113gpRPKPyKrIE2HDaA2P4H', dirname, 'hanzi.tar')\n tar_file_path = os.path.join(dirname, 'hanzi.tar')\n extract_path = os.path.join(dirname, 'hanzi')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, os.path.join(dirname, 'train'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset_test = load_folder_images(dataset_name, os.path.join(dirname, 'test'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset.testdata = dataset_test.traindata\n dataset.class_names['zh-cn'] = dataset.class_names['en-us']\n return dataset\n\n elif dataset_name == 'examples_animals':\n download_file_from_google_drive('19Cjq8OO6qd9k9TMZxlPjDpejDOdiHJoW', dirname, 'animals.tar')\n tar_file_path = os.path.join(dirname, 'animals.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, dirname, folder_as_label=True)\n return dataset\n elif dataset_name == 'examples_nsfw':\n tar_file_path = os.path.join(dirname, 'nsfw.tar')\n if os.path.exists(tar_file_path) and get_file_create_time(tar_file_path)<datetime.datetime(2021, 2, 20, 0, 0, 0).timestamp():\n os.remove(tar_file_path)\n if os.path.exists(os.path.join(dirname,'porn_detection_data.pkl')):\n os.remove(os.path.join(dirname,'porn_detection_data.pkl'))\n _delete_h(dirname)\n download_file_from_google_drive('1EXpV2QUrSFJ7zJn8NqtqFl1k6HvXsUzp', dirname, 'nsfw.tar')\n\n extract_path = os.path.join(dirname, 'nsfw')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n folders = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']\n data=unpickle(os.path.join(dirname,'porn_detection_data.pkl'))\n\n trainData = []\n testData = []\n trainLabel = []\n testLabel = []\n for n in range(5):\n folder=folders[n]\n trainData.extend(data[folder]['train'])\n trainLabel.extend([n]*len(data[folder]['train']))\n testData.extend(data[folder]['test'])\n testLabel.extend([n] * len(data[folder]['test']))\n\n trainarray = ImageDataset(trainData,object_type=ObjectType.rgb)\n trainlabel = LabelDataset(trainLabel,object_type=ObjectType.classification_label)\n train_iter = Iterator(data=trainarray, label=trainlabel)\n\n testarray = ImageDataset(testData,object_type=ObjectType.rgb)\n testlabel = LabelDataset(testLabel,object_type=ObjectType.classification_label)\n test_iter = Iterator(data=testarray, label=testlabel)\n print('training images: {0} test images:{1}'.format(len(trainarray), len(testarray)))\n\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n dataset.binding_class_names(['drawing', 'hentai', 'neutral', 'porn', 'sexy'], 'en-us')\n dataset.binding_class_names(['绘画', '色情漫画', '中性', '色情', '性感'], 'zh-cn')\n dataset.binding_class_names(['繪畫', '色情漫畫', '中性', '色情', '性感'], 'zh-tw')\n dataset.scenario = 'train'\n return dataset\n elif dataset_name == 'examples_simpsons':\n download_file_from_google_drive('1hGNFbfBv3EZ4nx4Qod6PtSYzO8H4QIxC', dirname, 'simpsons.tar')\n tar_file_path = os.path.join(dirname, 'simpsons.tar')\n extract_path = os.path.join(dirname, 'simpsons')\n extract_archive(tar_file_path, extract_path, archive_format='tar')\n data_provider = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n data_provider.traindata.unpair = RandomNoiseDataset(shape=(100), random_mode='normal')\n print('get simpsons images :{0}'.format(len(data_provider.traindata.data.items)))\n return data_provider\n elif dataset_name == 'examples_horse2zebra':\n download_file_from_google_drive('1pqj-T90Vh4wVNBV09kYZWgVPsZUA2f7U', dirname, 'horse2zebra.tar')\n tar_file_path = os.path.join(dirname, 'horse2zebra.tar')\n extract_path = os.path.join(dirname, 'horse2zebra')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n trainA = ImageDataset(list_images(os.path.join(dirname, 'trainA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n trainB = ImageDataset(list_images(os.path.join(dirname, 'trainB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testA = ImageDataset(list_images(os.path.join(dirname, 'testA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testB = ImageDataset(list_images(os.path.join(dirname, 'testB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n train_iter = Iterator(data=trainA, unpair=trainB)\n test_iter = Iterator(data=testA, unpair=testB)\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n print('get horse2zebra images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_people':\n download_file_from_google_drive('1H7mJJfWpmXpRxurMZQqY4N_UXWLbQ2pT', dirname, 'people.tar')\n tar_file_path = os.path.join(dirname, 'people.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'imgs', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs=list(sorted(imgs))\n masks = list(sorted(masks))\n # make_dir_if_need(os.path.join(dirname, 'trimap'))\n # for i in range(len(masks)):\n # mask=mask2array(masks[i])\n # trimap=mask2trimap(mask)\n # save_mask(trimap,masks[i].replace('masks','trimap'))\n # print('trimap',len(masks))\n\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb)\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.binary_mask)\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get people images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_autodrive':\n download_file_from_google_drive('1JqPPeHqhWLqnI6bD8nuHcVx-Y56oIZMK', dirname, 'autodrive.tar')\n tar_file_path = os.path.join(dirname, 'autodrive.tar')\n extract_path = os.path.join(dirname, 'autodrive')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'images', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs = list(sorted(imgs))\n masks = list(sorted(masks))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb,symbol='image')\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.color_mask,symbol='mask')\n\n def parse_code(l):\n if len(l.strip().split(\"\\t\")) == 2:\n a, b = l.replace('\\t\\t', '\\t').strip().split(\"\\t\")\n return tuple(int(i) for i in b.split(' ')), a\n\n label_codes, label_names = zip(\n *[parse_code(l) for l in open(os.path.join(dirname, \"label_colors.txt\")).readlines()])\n for i in range(len(label_codes)):\n mskdata.palette[label_names[i]] = label_codes[i]\n\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get autodrive images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_superresolution':\n download_file_from_google_drive('1v1uoymrWI_MLSiGvSGW7tWJYSnzzXpEQ', dirname, 'superresolution.tar')\n tar_file_path = os.path.join(dirname, 'superresolution.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs.extend(glob.glob(os.path.join(dirname, '*.bmp')))\n imgs = list(sorted(imgs))\n\n print('get super resolution images :{0}'.format(len(imgs)))\n\n imgdata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='lr')\n labeldata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='hr')\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=labeldata))\n return dataset\n elif dataset_name == 'examples_beauty':\n download_file_from_google_drive('1aJhxN9IqsxuayhRTm-gmxk6PiLe5wm9X', dirname, 'beauty.tar')\n tar_file_path = os.path.join(dirname, 'beauty.tar')\n\n extract_archive(tar_file_path, dirname, archive_format='tar')\n # 讀取圖片數據\n images_dict = {}\n with open(os.path.join(dirname, 'images_dict.pkl'), 'rb') as fp:\n images_dict = pickle.load(fp)\n\n f = open(os.path.join(dirname, 'All_Ratings.txt'), encoding='utf-8-sig').readlines()\n imgs = []\n landmarks = []\n ratings = []\n for row in f:\n data = row.strip().split('\\t')\n if 'images\\\\' + data[0] in images_dict:\n img = images_dict['images\\\\' + data[0]][0]\n img = img.transpose([2, 0, 1])[::-1].transpose([1, 2, 0])\n imgs.append(img)\n landmark = images_dict['images\\\\' + data[0]][1].astype(np.float32)\n landmarks.append(landmark)\n rating = (float(data[1])) / 5.00\n ratings.append(rating)\n print('{0} faces loaded...'.format(len(imgs)))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb, symbol='faces')\n landmarkdata = LandmarkDataset(landmarks=landmarks, object_type=ObjectType.landmarks, symbol='target_landmarks')\n labeldata = LabelDataset(data=ratings,object_type=ObjectType.classification_label, symbol='target_beauty')\n data_provider = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=Dataset.zip(landmarkdata,labeldata)))\n return data_provider\n\n elif dataset_name == 'examples_facelandmarks':\n download_file_from_google_drive('1GtswQBAHPa_bXaB4tW2uOOQ8Lxfz2L5B', dirname, 'ibug_300W.tar')\n tar_file_path = os.path.join(dirname, 'ibug_300W.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n root_dir=os.path.join(dirname, 'ibug_300W_large_face_landmark_dataset')\n image_paths = {}\n landmarks = {}\n crops = {}\n\n for mode in ['train','test']:\n make_dir_if_need(os.path.join(dirname, 'crops',mode))\n tree = ElementTree.parse(os.path.join(root_dir, 'labels_ibug_300W_{0}.xml'.format(mode)))\n root = tree.getroot()\n image_paths[mode]=[]\n landmarks[mode] = []\n crops[mode] = []\n\n offset=5\n for j in tqdm(range(len(root[2]))):\n try:\n filename=root[2][j]\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmark=np.asarray(landmark)\n\n crop = filename[0].attrib\n for k in crop.keys():\n crop[k] = int(crop[k]) if isinstance(crop[k], str) else crop[k]\n for k in crop.keys():\n if k=='top' and int(landmark[:,1].min())<int(crop[k]):\n crop[k] = int( landmark[:,1].min())\n crop[ 'height']+=crop[k]-int(landmark[:,1].min())\n elif k=='left' and int(landmark[:,0].min())<int(crop[k]):\n crop[k] = int( landmark[:,0].min())\n crop['width']+= crop[k] - int(landmark[:, 0].min())\n elif k == 'width' and int(landmark[:, 0].max()-landmark[:, 0].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 0].max()-landmark[:, 0].min())\n elif k == 'height' and int(landmark[:, 1].max()-landmark[:, 1].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 1].max()-landmark[:, 1].min())\n\n crop['left']-=offset\n crop['top'] -= offset\n crop['width'] += 2*offset\n crop['height'] += 2*offset\n\n\n landmark[:,0]-=crop['left']\n landmark[:, 1] -= crop['top']\n\n\n if not os.path.exists(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j))):\n im=image2array(os.path.join(root_dir, filename.attrib['file']))\n if im.ndim==2:\n im=cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)\n im=im[crop['top']:min(crop['top']+crop['height'],im.shape[0]),crop['left']:min(crop['left']+crop['width'],im.shape[1]),:]\n\n if max(im.shape[:2])/max(min(im.shape[:2]),0)<=5:\n\n array2image(im).save(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n del im\n else:\n #im = image2array(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n\n if j%100==0:\n gc.collect()\n except Exception as e:\n pass\n\n print('ibug 300w train dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['train']),len(landmarks['train'])))\n print('ibug 300w test dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['test']), len(landmarks['test'])))\n imdata=ImageDataset(images=image_paths['train'],symbol='faces',object_type=ObjectType.rgb)\n landmarkdata = LandmarkDataset(landmarks=landmarks['train'], symbol='landmarks',object_type=ObjectType.landmarks)\n imtestdata = ImageDataset(images=image_paths['test'], symbol='faces',object_type=ObjectType.rgb)\n landmarktestdata = LandmarkDataset(landmarks=landmarks['test'], symbol='landmarks',object_type=ObjectType.landmarks)\n data_provider=DataProvider(traindata=Iterator(data=imdata,label=landmarkdata),testdata=Iterator(data=imtestdata,label=landmarktestdata))\n return data_provider\n\n elif dataset_name == 'examples_antisproofing':\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name,os.path.join(dirname, 'antisproofing'))\n return data_provider\n elif dataset_name == 'examples_anpr':\n download_file_from_google_drive('1uGBd8tXlP0TZAXNgrR6H0jl5MXj7VPbN', dirname, 'anpr.tar')\n tar_file_path = os.path.join(dirname, 'anpr.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs = list(sorted(imgs))\n\n # CCPD (Chinese City Parking Dataset, ECCV) and PDRC (license Plate Detection and Recognition Challenge)\n # https://github.com/detectRecog/CCPD\n provinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\",\n \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'O']\n ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\n def lp2char(lp):\n cols = lp.split('_')\n charstring = ''\n for i in range(len(cols)):\n if i == 0:\n charstring += provinces[int(cols[i])]\n elif i == 1:\n charstring += alphabets[int(cols[i])]\n else:\n charstring += ads[int(cols[i])]\n return charstring\n\n width = 720\n height = 1160\n for im_path in imgs:\n lbl = im_path.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\n charstring = lp2char(lbl)\n iname = im_path.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\n box = [leftUp[0], leftUp[1], rightDown[0], rightDown[1]]\n ori_w, ori_h = [float(int(el)) for el in [width, height]]\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, os.path.join(dirname, 'antisproofing'))\n return data_provider\n\n\n\n elif dataset_name == 'examples_dogs-vs-cats':\n download_file_from_google_drive('10czW0On7eIXkPP-MuQ-IRxMWdTizWjNC', dirname, 'dogs-vs-cats.tar')\n tar_file_path = os.path.join(dirname, 'dogs-vs-cats.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, dirname)\n return data_provider\n elif dataset_name == 'examples_chinese':\n to_half=ToHalfWidth()\n to_sc=ChineseConvert(convert_to='simplified')\n download_file_from_google_drive('1yzRzXpLuhSUxnixqCgpbdTk16ajnTEWF', dirname, 'chinese.tar')\n tar_file_path = os.path.join(dirname, 'chinese.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n\n as_train = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_training.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000' ,'|'))).splitlines()\n cityu_train =remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_training.utf8'), encoding='utf-8-sig').read().strip().replace(' ','|'))).splitlines()\n\n as_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_testing_gold.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000', '|'))).splitlines()\n cityu_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_test_gold.utf8'), encoding='utf-8-sig').read().strip().replace(' ', '|'))).splitlines()\n\n\n data = as_train + cityu_train # 把兩個語料合併\n test_data=as_test + cityu_test # 把兩個語料合併\n\n\n raw_data_train = [row.strip('\\n').strip('\\r') for row in data] # 移除分行字元\n raw_data_test = [row.strip('\\n').strip('\\r') for row in test_data] # 移除分行字元\n\n process_data_train=[]\n process_seg_label_train = []\n process_simplifided_label_train = []\n process_traditional_label_train = []\n\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n for k in tqdm(range(len(raw_data_train))):\n row=raw_data_train[k]\n if row.startswith('∥'):\n row=row[1:]\n words=row.replace('||','|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_train.append(word[i])\n #tmp_simplifided_label_train.append(to_half(to_sc(word[i])))\n #轉換為BMES\n\n if i==0 and len(word)>1: #B 是一個詞的開始\n tmp_seg_label_train.append('B')\n elif i==len(word)-1 and len(word)>=2 and tmp_seg_label_train[-1] in ['B','M']: #E 是一個詞的結束\n tmp_seg_label_train.append('E')\n elif len(word)==1 and i==0: #S 自己就是一個單詞\n tmp_seg_label_train.append('S')\n elif len(word)>=3 and tmp_seg_label_train[-1] in ['B','M']: #M 是一個詞的中間\n tmp_seg_label_train.append('M')\n\n if len(tmp_seg_label_train)>0 and tmp_seg_label_train[-1] in ['E','S']:\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)) and k+1<len(words):\n if word in [ '。','﹖']:\n pass\n\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_train.append(' ')\n tmp_seg_label_train.append('S')\n\n if (k+1<len(raw_data_train) and not raw_data_train[k+1].startswith( '」')) and words[-1] in [ '。','﹖']:\n #process_traditional_label_train.append(tmp_data_train)\n\n tmp_data_train=to_half(''.join(tmp_data_train))\n tmp_seg_label_train = ''.join(tmp_seg_label_train)\n # if len(tmp_data_train)!=len(tmp_seg_label_train):\n # print('')\n tmp_simplifided_label_train =to_sc(tmp_data_train)\n\n process_data_train.append(tmp_data_train)\n process_seg_label_train.append(tmp_seg_label_train)\n process_simplifided_label_train.append(tmp_simplifided_label_train)\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n # else:\n # tmp_data_train.append('\\n')\n # tmp_simplifided_label_train.append('\\n')\n # tmp_seg_label_train.append('\\n')\n corpus=process_data_train\n seg_corpus=process_seg_label_train\n simplifided_corpus =process_simplifided_label_train\n\n process_data_test = []\n process_seg_label_test = []\n process_simplifided_label_test = []\n process_traditional_label_test = []\n print('generate test labels')\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n for k in tqdm(range(len(raw_data_test))):\n row=raw_data_test[k]\n if row.startswith('∥'):\n row=row[1:]\n words = row.replace('||', '|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_test.append(word[i])\n # tmp_simplifided_label_test.append(to_half(to_sc(word[i])))\n # 轉換為BMES\n\n if i == 0 and len(word) > 1: # B 是一個詞的開始\n tmp_seg_label_test.append('B')\n elif i == len(word) - 1 and len(word) >= 2 and tmp_seg_label_test[-1] in ['B', 'M']: # E 是一個詞的結束\n tmp_seg_label_test.append('E')\n elif len(word) == 1 and i == 0: # S 自己就是一個單詞\n tmp_seg_label_test.append('S')\n elif len(word) >= 3 and tmp_seg_label_test[-1] in ['B', 'M']: # M 是一個詞的中間\n tmp_seg_label_test.append('M')\n\n if len(tmp_seg_label_test) > 0 and tmp_seg_label_test[-1] in ['E', 'S'] and k+1<len(words):\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)):\n if word in ['。', '﹖']:\n pass\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_test.append(' ')\n tmp_seg_label_test.append('S')\n\n if (k + 1 < len(raw_data_test) and not raw_data_test[k + 1].startswith('」')) and words[-1] in ['。', '﹖']:\n # process_traditional_label_test.append(tmp_data_test)\n\n tmp_data_test = to_half(''.join(tmp_data_test))\n tmp_seg_label_test = ''.join(tmp_seg_label_test)\n # if len(tmp_data_test)!=len(tmp_seg_label_test):\n # print('')\n tmp_simplifided_label_test = to_sc(tmp_data_test)\n\n process_data_test.append(tmp_data_test)\n process_seg_label_test.append(tmp_seg_label_test)\n process_simplifided_label_test.append(tmp_simplifided_label_test)\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n # else:\n # tmp_data_test.append('\\n')\n # tmp_simplifided_label_test.append('\\n')\n # tmp_seg_label_test.append('\\n')\n test_corpus = process_data_test\n test_seg_corpus = process_seg_label_test\n test_simplifided_corpus = process_simplifided_label_test\n\n\n data=TextSequenceDataset(corpus=corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_label = TextSequenceDataset(corpus=seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_label = TextSequenceDataset(corpus=simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_label = TextSequenceDataset(corpus= copy.deepcopy(corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n data_test=TextSequenceDataset(corpus=test_corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_test_label = TextSequenceDataset(corpus=test_seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_test_label = TextSequenceDataset(corpus=test_simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_test_label = TextSequenceDataset(corpus= copy.deepcopy(test_corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n\n chars = list(sorted(set(list( ''.join(corpus) +bpmf_phonetic+'\\n\\r\\t∥'+ ''.join(simplifided_corpus)+''.join(test_data)))))\n chars.insert(0, '[CLS]')\n chars.insert(1, '[SEP]')\n chars.insert(2, '[UNK]')\n chars.insert(3, '[PAD]')\n chars.insert(4, '[MASK]')\n\n data.vocabs =data_test.vocabs=simplifided_label.vocabs=simplifided_test_label.vocabs = chars\n data.text2index=data_test.text2index =simplifided_label.text2index=simplifided_test_label.text2index = dict((c, i) for i, c in enumerate(chars))\n data.index2text =data_test.index2text =simplifided_label.index2text=simplifided_test_label.index2text= dict((i, c) for i, c in enumerate(chars))\n traditional_label = copy.deepcopy(data)\n traditional_test_label = copy.deepcopy(data_test)\n traditional_label.object_type =traditional_test_label.object_type = ObjectType.sequence_label\n traditional_label.symbol =traditional_test_label.symbol = 'traditional_label'\n\n mask_label = copy.deepcopy(data)\n mask_test_label = copy.deepcopy(data_test)\n #mask_label.object_type =mask_test_label.object_type= ObjectType.corpus\n mask_label.symbol = mask_test_label.symbol = 'mask_label'\n\n\n\n nextword=copy.deepcopy(data)\n nextword_test = copy.deepcopy(data_test)\n nextword.object_type=nextword_test.object_type=ObjectType.sequence_label\n nextword.symbol=nextword_test.symbol='nextword_label'\n nextword.sequence_offset=nextword_test.sequence_offset=1\n\n label=ZipDataset(seg_label,nextword,simplifided_label,traditional_label,mask_label)\n label_test = ZipDataset(seg_test_label, nextword_test, simplifided_test_label, traditional_test_label, mask_test_label)\n provider=TextSequenceDataProvider(\n traindata=Iterator(data=data,label=label),\n testdata=Iterator(data=data_test,label=label_test))\n return provider\n #,sample_filter=lambda x:x[0][-1]==3\n else:\n return None", "def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()" ]
[ "0.7358244", "0.5859265", "0.5856855", "0.5826998", "0.5742936", "0.552974", "0.5405307", "0.5320931", "0.5187403", "0.5179637", "0.5173516", "0.5170551", "0.51504564", "0.51137877", "0.50919735", "0.5080042", "0.5077611", "0.50590116", "0.5048102", "0.5040496", "0.50286186", "0.50239044", "0.501151", "0.50064164", "0.4995266", "0.49907285", "0.498819", "0.498819", "0.498819", "0.498819" ]
0.7384021
0
Unpack teachers from PDDRTeachers experiment.
def unpack_teachers(self, extra: dict): self.teacher_policies.extend(extra["teacher_policies"]) self.teacher_envs.extend(extra["teacher_envs"]) self.teacher_expl_strats.extend(extra["teacher_expl_strats"]) self.teacher_critics.extend(extra["teacher_critics"]) self.teacher_ex_dirs.extend(extra["teacher_ex_dirs"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_teacher_experiment(self, exp: Experiment):\n _, _, extra = load_experiment(exp)\n self.unpack_teachers(extra)", "def prune_teachers(self):\n self.teacher_policies = self.teacher_policies[: self.num_teachers]\n self.teacher_envs = self.teacher_envs[: self.num_teachers]\n self.teacher_expl_strats = self.teacher_expl_strats[: self.num_teachers]\n self.teacher_critics = self.teacher_critics[: self.num_teachers]\n self.teacher_ex_dirs = self.teacher_ex_dirs[: self.num_teachers]", "def get_teacher_assign():\n assignment_data = query_db(\n \"SELECT assignments.id, assignments.name, assignments.due_date \"\n \"FROM assignments JOIN topics ON assignments.topic_id=topics.id \"\n \"JOIN classes ON topics.class_id=classes.id WHERE teacher_id=?;\",\n [flask.session[\"id\"]],\n )\n assignments = []\n for assignment in assignment_data:\n assignment_dict_teach = {}\n assignment_dict_teach[\"id\"] = assignment[0]\n assignment_dict_teach[\"name\"] = assignment[1]\n assignment_dict_teach[\"due_date\"] = assignment[2]\n assignments.append(assignment_dict_teach)\n return assignments", "def test_pytd_teacher(self):\n defaults = parser_defaults.copy()\n defaults['datatype'] = 'train:stream'\n defaults['image_mode'] = 'ascii'\n\n with testing_utils.capture_output():\n # Get processed act from agent\n parser = display_setup_args()\n defaults['pytorch_teacher_dataset'] = 'flickr30k'\n del defaults['pytorch_teacher_task']\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n teacher = create_task_agent_from_taskname(opt)[0]\n pytorch_teacher_act = teacher.act()\n\n parser = display_setup_args()\n defaults['task'] = 'flickr30k'\n del defaults['pytorch_teacher_dataset']\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n teacher = create_task_agent_from_taskname(opt)[0]\n regular_teacher_act = teacher.act()\n\n keys = set(pytorch_teacher_act.keys()).intersection(\n set(regular_teacher_act.keys()))\n self.assertTrue(len(keys) != 0)\n for key in keys:\n self.assertTrue(pytorch_teacher_act[key] == regular_teacher_act[key],\n 'PytorchDataTeacher does not have the same value '\n 'as regular teacher for act key: {}'.format(key))", "def load_teachers(self):\n # Get the experiment's directory to load from\n ex_dir = ask_for_experiment(max_display=10, env_name=self.env_real.name, perma=False)\n self.load_teacher_experiment(ex_dir)\n if len(self.teacher_policies) < self.num_teachers:\n print(\n f\"You have loaded {len(self.teacher_policies)} teachers - load at least {self.num_teachers - len(self.teacher_policies)} more!\"\n )\n self.load_teachers()", "def load_primers(tsv_filename):\n answer = []\n with open(tsv_filename) as handle:\n for line in handle:\n if line.startswith(\"#\"):\n continue\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n if len(parts) == 2:\n left, right = parts\n name = f\"P{len(answer)}\"\n else:\n name, left, right = parts[:3]\n answer.append((name, left, right))\n return answer", "def test_pyt_multitask(self):\n\n def run_display_test(defaults, ep_and_ex_counts):\n with testing_utils.capture_output() as f:\n parser = display_setup_args()\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n display_data(opt)\n str_output = f.getvalue()\n self.assertTrue(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n ep_and_ex_counts[0], ep_and_ex_counts[1]\n ) in str_output,\n 'PytorchDataTeacher multitasking failed with '\n 'following args: {}'.format(opt)\n )\n\n task1 = 'babi:task1k:1'\n task2 = 'babi:task1k:2'\n dataset1 = 'flickr30k'\n dataset2 = 'vqa_v1'\n\n # Expected example and episode counts\n eps_and_exs_counts = [\n (1800, 1800),\n (1080, 1800),\n (29900, 29900),\n (29180, 29900),\n (277349, 277349)\n ]\n defaults = parser_defaults.copy()\n\n # 1.\n defaults['pytorch_teacher_task'] = '{},{}'.format(task1, task2)\n run_display_test(defaults, eps_and_exs_counts[0])\n\n # 2.\n defaults['pytorch_teacher_task'] = task1\n defaults['task'] = task2\n run_display_test(defaults, eps_and_exs_counts[1])\n\n # 3.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = dataset1\n run_display_test(defaults, eps_and_exs_counts[2])\n\n # 4.\n del defaults['pytorch_teacher_task']\n defaults['task'] = task1\n run_display_test(defaults, eps_and_exs_counts[3])\n\n # 5.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = '{},{}'.format(dataset1, dataset2)\n run_display_test(defaults, eps_and_exs_counts[4])", "def get_teacher(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('teachers')]\n teacher_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n teacher_fields[key] = value\n teachers = [i for i in Teachers.select().filter(**teacher_fields)]\n # Expect single value if search by unique fields, list if by non-unique\n return teachers if len(teachers) > 1 else teachers[0] if len(teachers) == 1 else None", "def get_all_profesors(self) -> List[Teacher]:\n self.cursor.execute(\n f\"SELECT * FROM {self.table_name}\")\n \n teachers = []\n for teacher in self.cursor.fetchall():\n teacher_parsed = list(teacher[0:8]) + [json.loads(t) for t in teacher[8:]]\n teachers.append(Teacher.parse_tuple(teacher_parsed))\n \n return teachers", "def ensemble_preds(dataset, nb_teachers, stdnt_data):\n\n # Compute shape of array that will hold probabilities produced by each\n # teacher, for each training point, and each output class\n result_shape = (nb_teachers, len(stdnt_data), FLAGS.nb_labels)\n\n # Create array that will hold result\n result = np.zeros(result_shape, dtype=np.float32)\n\n # Get predictions from each teacher\n for teacher_id in xrange(nb_teachers):\n # Compute path of checkpoint file for teacher model with ID teacher_id\n if FLAGS.deeper:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '_deep.ckpt-' + str(FLAGS.teachers_max_steps - 1) #NOLINT(long-line)\n else:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '.ckpt-' + str(FLAGS.teachers_max_steps - 1) # NOLINT(long-line)\n\n # Get predictions on our training data and store in result array\n result[teacher_id] = deep_cnn.softmax_preds(stdnt_data, ckpt_path)\n\n # This can take a while when there are a lot of teachers so output status\n print(\"Computed Teacher \" + str(teacher_id) + \" softmax predictions\")\n\n return result", "def multiple_matlab_csv_to_teacher_data(short_runs_dirname):\n subdirname = 'Run-'\n data = None\n data_length = 0\n for i in range(10):\n dirname = os.path.join(short_runs_dirname, subdirname+str(i+1))\n run_data = matlab_csv_to_teacher_data(dirname)\n if i == 0:\n data = run_data\n else:\n for i, phoneme_data in enumerate(run_data):\n data[i] = np.vstack((data[i], phoneme_data))\n\n data_length += run_data[0].shape[0]\n\n for i, phoneme_data in enumerate(data):\n assert phoneme_data.shape[0] == data_length\n\n return data", "def get_teachers(self):\n query = Teacher.all().order('teacher')\n return query.fetch()", "def load_unpacker_dataset(sentences):\n return TFRecordDataset([path.join(TFRUDIR, sentence+'.tfr')\n for sentence in sentences])\\\n .map(\n lambda record: \\\n tf.parse_single_example(\n record,\n features={\n 's': tf.FixedLenFeature([], tf.string),\n 'l': tf.FixedLenFeature([NL], tf.float32),\n 't': tf.FixedLenFeature([NT], tf.float32)\n }\n )\n )\\\n .map(\n lambda feature: (feature['l'], feature['s'], feature['t'])\n )", "def train_teacher (nb_teachers, teacher_id):\n # Load the dataset\n X_train, X_test, y_train, y_test = models.get_dataset()\n\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n print(y_test.shape)\n \n # Retrieve subset of data for this teacher\n data, labels = partition.partition_dataset(X_train,\n y_train,\n nb_teachers,\n teacher_id)\n\n print(\"Length of training data: \" + str(len(labels)))\n\n # Define teacher checkpoint filename and full path\n\n filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'\n filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'\n \n # Perform teacher training need to modify \n \n\n # Create teacher model\n model, opt = models.create_two_layer_mlp(46) # num of cols\n model.compile(loss='binary_crossentropy',\n optimizer=\"Adam\",\n metrics=['accuracy'])\n model, hist = models.training(model, data, X_test, labels, y_test,filename)\n\n #modify\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\n model.save_weights(filename2)\n print(\"Saved model to disk\")\n return True", "def extract_tubelets(dname, gpu=-1, redo=False):\n d = GetDataset(dname)\n\n if gpu >= 0:\n caffe.set_mode_gpu()\n caffe.set_device(gpu)\n\n model_dir = os.path.join(os.path.dirname(__file__), '../models/ACT-detector/', dname)\n output_dir = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)\n \n # load the RGB network\n rgb_proto = os.path.join(model_dir, \"deploy_RGB.prototxt\")\n rgb_model = os.path.join(model_dir, \"../generated_AVA_iter_118662.caffemodel\")\n net_rgb = caffe.Net(rgb_proto, caffe.TEST, weights=rgb_model)\n \n # load the FLOW5 network\n flo_proto = os.path.join(model_dir, \"deploy_FLOW5.prototxt\")\n flo_model = os.path.join(model_dir, \"../generated_AVA_iter_59463.caffemodel\")\n net_flo = caffe.Net(flo_proto, caffe.TEST, weights=flo_model)\n\n vlist = d.test_vlist()\n for iv, v in enumerate(vlist):\n print(\"Processing video {:d}/{:d}: {:s}\".format( iv+1, len(vlist), v))\n h, w = d.resolution(v)\n \n # network output is normalized between 0,1 ; so we will multiply it by the following array\n resolution_array = np.array([w,h,w,h]*K, dtype=np.float32)\n \n # now process each frame\n for i in xrange(1, 1 + d.nframes(v) - K + 1):\n outfile = os.path.join(output_dir, d.frame_format(v,i) + \".pkl\")\n \n # skip if already computed\n if os.path.isfile(outfile) and not redo:\n continue\n \n # read the frames for the forward\n kwargs_rgb = {}\n kwargs_flo = {}\n for j in xrange(K):\n cap = cv2.VideoCapture(d.vidfile(v,0))\n #print(frame)\n #print(int(cap.get(7)))\n cap.set(1,i + j - 1)\n im = cap.read()[1]\n cap.release()\n #im = cv2.imread(d.imfile(v, i + j))\n if im is None:\n print \"Image {:s} does not exist\".format(d.imfile(v, i+j))\n return\n imscale = cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR)\n kwargs_rgb['data_stream' + str(j)] = np.transpose(imscale-MEAN, (2, 0, 1))[None, :, :, :]\n imf = [cv2.imread(d.flowfile(v.split(\".\")[0], min(d.nframes(v), i + j + iflow))) for iflow in xrange(NFLOWS)]\n if np.any(imf) is None:\n print \"Flow image {:s} does not exist\".format(d.flowfile(v, i+j))\n return\n imscalef = [cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR) for im in imf]\n timscale = [np.transpose(im-MEAN, (2, 0, 1))[None, :, :, :] for im in imscalef]\n kwargs_flo['data_stream' + str(j) + 'flow'] = np.concatenate(timscale, axis=1)\n \n # compute rgb and flow scores\n # two forward passes: one for the rgb and one for the flow \n net_rgb.forward(end=\"mbox_conf_flatten\", **kwargs_rgb) # forward of rgb with confidence and regression\n net_flo.forward(end=\"mbox_conf_flatten\", **kwargs_flo) # forward of flow5 with confidence and regression\n \n # compute late fusion of rgb and flow scores (keep regression from rgb)\n # use net_rgb for standard detections, net_flo for having all boxes\n scores = 0.5 * (net_rgb.blobs['mbox_conf_flatten'].data + net_flo.blobs['mbox_conf_flatten'].data)\n net_rgb.blobs['mbox_conf_flatten'].data[...] = scores\n net_flo.blobs['mbox_conf_flatten'].data[...] = scores\n net_flo.blobs['mbox_loc'].data[...] = net_rgb.blobs['mbox_loc'].data\n \n # two forward passes, only for the last layer \n # dets is the detections after per-class NMS and thresholding (stardard)\n # dets_all contains all the scores and regressions for all tubelets \n dets = net_rgb.forward(start='detection_out')['detection_out'][0, 0, :, 1:]\n dets_all = net_flo.forward(start='detection_out_full')['detection_out_full'][0, 0, :, 1:]\n \n # parse detections with per-class NMS\n if dets.shape[0] == 1 and np.all(dets == -1):\n dets = np.empty((0, dets.shape[1]), dtype=np.float32)\n\n dets[:, 2:] *= resolution_array # network output was normalized in [0..1]\n dets[:, 0] -= 1 # label 0 was background, come back to label in [0..nlabels-1]\n dets[:, 2::2] = np.maximum(0, np.minimum(w, dets[:, 2::2]))\n dets[:, 3::2] = np.maximum(0, np.minimum(h, dets[:, 3::2]))\n\n # parse detections with global NMS at 0.7 (top 300)\n # coordinates were normalized in [0..1]\n dets_all[:, 0:4*K] *= resolution_array \n dets_all[:, 0:4*K:2] = np.maximum(0, np.minimum(w, dets_all[:, 0:4*K:2]))\n dets_all[:, 1:4*K:2] = np.maximum(0, np.minimum(h, dets_all[:, 1:4*K:2]))\n idx = nms_tubelets(np.concatenate((dets_all[:, :4*K], np.max(dets_all[:, 4*K+1:], axis=1)[:, None]), axis=1), 0.7, 300)\n dets_all = dets_all[idx, :]\n \n # save file\n if not os.path.isdir(os.path.dirname(outfile)):\n os.system('mkdir -p ' + os.path.dirname(outfile))\n\n with open(outfile, 'wb') as fid:\n pickle.dump((dets, dets_all), fid)", "def test_teacher_role():\n response = test_app.post(\"/bkt_service/unwind\", params='''[{\n \"event\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/OutcomeEvent\",\n \"actor\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"student-1462300421838-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/Person\",\n \"roles\": [\n \"urn:lti:instrole:ims/lis/Teacher\"\n ]\n },\n \"action\": \"http://purl.imsglobal.org/vocab/caliper/v1/action#Graded\",\n \"object\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"attempt-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Attempt\",\n \"extensions\": {\n \"assessmentType\": \"Diagnostic Assessment\",\n \"assessmentId\": \"assessment-1462300421838-4\"\n },\n \"count\": 1,\n \"startedAtTime\": \"2016-05-03T21:33:41.844Z\",\n \"endedAtTime\": \"2016-05-03T22:03:41.844Z\"\n },\n \"generated\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"result-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"assignableId\": \"assessment-1462300421838-4\",\n \"normalScore\": 80,\n \"totalScore\": 100,\n \"itemResults\": [\n {\n \"@id\": \"item-result-1462300421838-4-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"question_type\": \"mcq\",\n \"automarkable\": 1,\n \"score\": 7,\n \"max_score\": 10,\n \"question_reference\": \"c0a3f0c8-eac7-4795-8c7a-adf98e336a7b\",\n \"item_reference\": \"Adaptive_Item2_extract_USMOs\",\n \"sequenceNumber\": 1\n }\n ]\n },\n \"group\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"class-01\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/CourseOffering\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {\n \"pageNumber\": null,\n \"courseCode\": \"course-01\",\n \"calmsCourseId\": \"calms-course-01\",\n \"lessonId\": \"lesson-01\",\n \"platform\": \"D2L\",\n \"classroomTypeId\": \"3500.0\",\n \"activityId\": \"10\",\n \"gradeLevel\": \"8\",\n \"CourseOfferingId\": \"1200.0\",\n \"adaptivewrapperId\": \"\",\n \"schoolYear\": \"2015-20116\",\n \"unitId\": \"3201.0\",\n \"moduleId\": \"1110.0\",\n \"courseId\": \"2550.0\",\n \"assessmentId\": \"4520.0\",\n \"originSystemId\": \"sams\",\n \"businessLineId\": \"1300.0\",\n \"contextId\": \"587279312bf9a9afd947ddab\"\n },\n \"dateCreated\": null,\n \"dateModified\": null,\n \"courseNumber\": null,\n \"academicSession\": null,\n \"subOrganizationOf\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"1001.0\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/w3c/Organization\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {},\n \"dateCreated\": null,\n \"dateModified\": null,\n \"subOrganizationOf\": null\n }\n },\n \"eventTime\": \"2017-01-09T14:21:00Z\"\n }\n }\n ]''')\n assert response.status == '200 OK'\n assert len(response.json) == 1\n #assert response.json[0][\"error\"][\"code\"] == 21\n #assert \"role\" in response.json[0][\"error\"][\"message\"]", "def extract_trpt_data(udp_packet):\n logger.debug('UDP packet sport [%s], dport [%s], len [%s]',\n udp_packet.sport, udp_packet.dport, udp_packet.len)\n\n trpt_pkt = TelemetryReport(_pkt=udp_packet.payload)\n trpt_eth = EthInt(trpt_pkt.payload)\n logger.debug('TRPT ethernet dst - [%s], src - [%s], type - [%s]',\n trpt_eth.dst, trpt_eth.src, trpt_eth.type)\n return extract_int_data(trpt_eth)", "def extract(soup):\r\n table = soup.find('div', id='dnn_ctr11396_TimeTableView_PlaceHolder').find('table')\r\n rows = table.findChildren('tr', recursive=False)\r\n return [[col.findAll('div', {'class': 'TTLesson'}) for col in row.findChildren('td', recursive=False)[1:]]\r\n for row in rows[1:]]", "def read_triplets(seed_candidates):\n if \"pickle\" in seed_candidates:\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n with open(file_name, 'rb') as f:\n data = pickle.load(f)\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n with open(seed_candidates, 'rb') as f:\n data = pickle.load(f)\n new_data = []\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n # idx = int(dd[0][10:])\n # new_data.append((idx, dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n column_names = ['evtid', 'h1', 'h2', 'h3']\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n df_seed_tmp = pd.read_csv(file_name, header=None, names=column_names,)\n new_data.append(df_seed_tmp)\n df_seed = pd.concat(new_data)\n else:\n df_seed = pd.read_csv(seed_candidates, header=None,\n names=column_names)\n return df_seed", "def get_teacher_career_results(self, teacher, career):\n data = []\n\n # Get the active exams of the career.\n exams = EvaluationsExam.objects.filter(\n type__exact=career.type, status=\"ACTIVE\")\n\n # Get the results for each exam.\n for exam in exams:\n\n # Get the signatures of the teacher for the career in the exam.\n signatures_dtl = EvaluationsTeacherSignature.objects.filter(\n fk_teacher__exact=teacher.id, fk_period__exact=exam.fk_period, status=\"ACTIVE\").select_related('fk_signature')\n\n signatures_results = []\n for signature_dtl in signatures_dtl:\n \n # If it raise an exception, it means that the signature isn't evaluated yet or other error.\n try:\n # Get the results of the signature.\n signature_results = EvaluationsSignatureResult.objects.get(\n group=signature_dtl.group,\n fk_signature=signature_dtl.fk_signature.id,\n fk_exam=exam.id,\n status=\"ACTIVE\"\n )\n\n # Get the results for each question in the exam for the signature.\n questions_results = EvaluationsSignatureQuestionResult.objects.filter(\n group=signature_dtl.group,\n fk_signature=signature_dtl.fk_signature.id,\n fk_exam=exam.id,\n fk_question__optional='NO',\n status=\"ACTIVE\"\n ).values_list('fk_question__description', 'result')\n\n # Get the comments of the signature/group.\n comments_result = EvaluationsSignatureQuestionResult.objects.get(\n group=signature_dtl.group,\n fk_signature=signature_dtl.fk_signature.id,\n fk_exam=exam.id,\n fk_question__optional='YES',\n status=\"ACTIVE\"\n ).result\n\n # Split the comments and add them to a list, only the ones that are not empty.\n comments = list(filter(None, comments_result.split('|')))\n\n # Crate a dictionary with the results of the signature and the questions.\n signatures_results.append({\n 'teacher': teacher.name + ' ' + teacher.last_name + ' ' + teacher.last_name_2,\n 'signature': signature_dtl.fk_signature.description,\n 'group': signature_dtl.group,\n 'average': signature_results.average,\n 'comments': comments,\n 'total_evaluated': signature_results.total_evaluated,\n 'questions': questions_results\n })\n except Exception:\n pass\n\n # Add the results to the exam dictionary.\n exam_results = {\n 'exam': exam.description,\n 'career': career.description,\n 'signatures_results': signatures_results,\n 'period': exam.fk_period.period\n }\n\n # Add the exam results to the list that will be returned at the end.\n data.append(exam_results)\n\n return data", "def extractDDE(self, lang, username, screenname, description, tweets):\n if isinstance(tweets, list):\n tweets = ' '.join(tweets)\n form = {\n 'lang': lang,\n 'username': username,\n 'screenname': screenname,\n 'description': description,\n 'tweet': tweets\n }\n return self.POST('extract', {}, form)", "def get_quiz_teacher():\n quiz_data = query_db(\n \"SELECT id, name FROM quizzes WHERE creator_id=?;\", [flask.session[\"id\"]]\n )\n quizzes = []\n for quiz in quiz_data:\n quiz_dict = {}\n quiz_dict[\"id\"] = quiz[0]\n quiz_dict[\"name\"] = quiz[1]\n quizzes.append(quiz_dict)\n return quizzes", "def decode(self, session, dev_example):\n unzipped_dev_example = list(zip(*dev_example))\n input_feed = self.create_feed_dict(unzipped_dev_example[0:4], dropout = 1)\n output_feed = [self.h_s, self.h_e, self.relevence]\n outputs = session.run(output_feed, input_feed)\n h_s = outputs[0]\n h_e = outputs[1]\n rel = outputs[2]\n return h_s, h_e, rel", "def extract_unpack(self, args):\n return self.extract_features(args)", "def load_tamper(self, dataset_dir, subset):\n # Add classes. We have one class.\n # Naming the dataset nucleus, and the class nucleus\n self.add_class(\"tampers\", 1, \"tampers\")\n\n # Which subset?\n # \"val\": use hard-coded list above\n # \"train\": use data from stage1_train minus the hard-coded list above\n # else: use the data from the specified sub-directory\n # assert subset in [\"train\", \"val\", \"stage1_train\", \"stage1_test\", \"stage2_test\"]\n # subset_dir = \"stage1_train\" if subset in [\"train\", \"val\"] else subset\n dataset_dir = os.path.join(dataset_dir, subset, 'images')\n if subset == \"val\" or subset == \"test\":\n image_ids = next(os.walk(dataset_dir))[2]\n else:\n # Get image ids from directory names\n image_ids = next(os.walk(dataset_dir))[2]\n \n\n # dircopy_move = '/data/twj/copy-move/data_zoo/dataset/images/train'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # dirnew_splicing = '/data/tamper'\n # image_ids_new_splicing = next(os.walk(os.path.join(dirnew_splicing, 'images')))[2]\n\n # dircopy_move = '/home/as/deeplab/wpmrcnn/ca2new/test'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n \n # dircopy_move = '/data/gy/ca2att/train3'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # # # dirtxt_sp = '/data/gy/tamperpre/train'\n # # # image_ids_txt_sp = next(os.walk(os.path.join(dirtxt_sp, 'images')))[2]\n\n # dirnew_sp = '/data/gy/c2newsp/train'\n # image_ids_new_sp = next(os.walk(os.path.join(dirnew_sp, 'images')))[2]\n\n # Add images\n for image_id in image_ids:\n self.add_image(\n \"tampers\",\n image_id=image_id[:-4],\n path=os.path.join(dataset_dir, image_id))\n\n # for image_id in image_ids_copy_move:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dircopy_move, 'images', image_id))\n\n # for image_id in image_ids_new_splicing:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_splicing, 'images', image_id))\n\n # # for image_id in image_ids_txt_sp:\n # # self.add_image(\n # # \"tampers\",\n # # image_id=image_id[:-4],\n # # path=os.path.join(dirtxt_sp, 'images', image_id))\n\n # for image_id in image_ids_new_sp:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_sp, 'images', image_id))", "def inference(self):\r\n\t\tfor partition, loader in self.loaders.items():\r\n\t\t\tavg_loss, (y, y_hat), post, attentions, tags = self.eval_loader(\r\n\t\t\t\tloader)\r\n\t\t\tself.preds[partition] = {\r\n\t\t\t\t'tag': tags,\r\n\t\t\t\t'y': y,\r\n\t\t\t\t'y_hat': y_hat,\r\n\t\t\t\t# 'posteriors': post,\r\n\t\t\t\t# 'attentions': attentions\r\n\t\t\t}", "def deepscore(teacher):\n if teacher == None:\n print(\"Not a valid teacher\")\n return\n if teacher.get('rmpdata') == None: snc.rateThisProfessor(teacher,instructors)\n print(\"# \" + teacher['name'])\n scoreTeacherlegacy(teacher)\n scoreTeacherues(teacher)\n scoreTeacherrmp(teacher)", "def partition_dataset(data, labels, nb_teachers, teacher_id):\n\n # Sanity check\n assert (int(teacher_id) < int(nb_teachers))\n\n # This will floor the possible number of batches\n batch_len = int(len(data) / nb_teachers)\n\n # Compute start, end indices of partition\n start = teacher_id * batch_len\n end = (teacher_id + 1) * batch_len\n\n # Slice partition off\n partition_data = data[start:end]\n if labels is not None:\n partition_labels = labels[start:end]\n else:\n partition_labels = None\n\n return partition_data, partition_labels", "def get_train(self, even=None):\n\n #self.images, self.labels, self.traces = trace_data.get_my_teacher()\n _, self.images, self.labels, self.traces, _ = trace_data.get_my_teacher()\n #print(self.labels)\n self.length = len(self.images)\n self.create_teacher()", "def legacydatapoints(teacher):\n points = 0\n for surveytype in ['2','1','H']:\n for criteria in range(0,22):\n for degree in range(0,6):\n try:\n points += teacher['data'][surveytype][criteria]['degree' + str(degree)]\n except:\n pass\n for criteria in range(22,27):\n for degree in range(0,7):\n try:\n points += teacher['data'][surveytype][criteria]['degree' + str(degree)]\n except:\n pass\n return points" ]
[ "0.6385784", "0.5618853", "0.54759747", "0.54556435", "0.5198481", "0.5048664", "0.49990323", "0.4985974", "0.49433678", "0.4874634", "0.47989118", "0.47896883", "0.4787353", "0.47753018", "0.4730215", "0.46576267", "0.46548298", "0.46495625", "0.4646888", "0.46451658", "0.46388948", "0.454687", "0.4523255", "0.45193264", "0.4513335", "0.449714", "0.44720286", "0.44492343", "0.4447388", "0.44390026" ]
0.67790115
0
Prune teachers to only use the first num_teachers of them.
def prune_teachers(self): self.teacher_policies = self.teacher_policies[: self.num_teachers] self.teacher_envs = self.teacher_envs[: self.num_teachers] self.teacher_expl_strats = self.teacher_expl_strats[: self.num_teachers] self.teacher_critics = self.teacher_critics[: self.num_teachers] self.teacher_ex_dirs = self.teacher_ex_dirs[: self.num_teachers]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanOrphanedLearners(self):\n\n # Before deleting Learners, ensure that if any Learners that are about to be\n # deleted point to a Team as their action, then that Team's count of\n # referincing Learners is decremented.\n for learner in self.learner_pop:\n if learner.getNumReferencingTeams() == 0 and not learner.isActionAtomic():\n learner.action.decrementNumReferencingLearners()\n\n # Remove all orphaned Learners from the Learner population\n self.learner_pop = [l for l in self.learner_pop if not l.getNumReferencingTeams() == 0]", "def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break", "def _prune_candidates(self, beam_width=None):\n if beam_width is None:\n beam_width = self.beam_width\n if len(self.candidates) <= beam_width:\n return\n neg_scores = np.array([-cand.logp_total() for cand in self.candidates])\n parted_indices = np.argpartition(neg_scores, beam_width - 1)\n self.candidates = np.array(self.candidates)[parted_indices[:beam_width]].tolist()", "def prune(self):\n self.sort(key=lambda chunk: chunk.probability)\n del self[:-self.model.num_parses]", "def prune_features(self, verbose=False):\n # Collect all features and prune those occurring only once.\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)\n\n self.remove_features = []\n for k in features:\n if features[k] <= 2:\n self.remove_features.append(k)\n\n if verbose:\n print \"Number of unique features: \", len(self.remove_features)\n\n self.remove_features = set(self.remove_features)\n for k in self.utterance_features:\n self.utterance_features[k].prune(self.remove_features)\n\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)", "def load_teachers(self):\n # Get the experiment's directory to load from\n ex_dir = ask_for_experiment(max_display=10, env_name=self.env_real.name, perma=False)\n self.load_teacher_experiment(ex_dir)\n if len(self.teacher_policies) < self.num_teachers:\n print(\n f\"You have loaded {len(self.teacher_policies)} teachers - load at least {self.num_teachers - len(self.teacher_policies)} more!\"\n )\n self.load_teachers()", "def init_prune_list(self):\n\t\tfor i in range(1, len(self.elements)+1):\n\t\t\tcount = self.getSupport({i})\n\t\t\tif(count >= self.support):\n\t\t\t\tself.prune_list[(i,)] = count", "def helper(reviewer: Any, graph: Graph) -> set:\n reviewers_so_far = set()\n\n for movie in graph.get_neighbours(reviewer):\n for user in graph.get_neighbours(movie):\n if graph.get_weight(user, movie) >= 8:\n reviewers_so_far.add(user)\n return reviewers_so_far", "def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]", "def _remove_experts(self):\n self.experts = [ex for ex in self.experts if np.mean(\n ex.weight) >= self.theta]", "def delete_n_volunteers(app_id):\r\n delete_memoized(n_volunteers, app_id)", "def prune_(self):\n idx = self.factor_lams() > 0\n self.factors = [f[:, idx] for f in self.factors]\n self.rank = np.sum(idx)", "def delSpillOppIRunder(trekk, vinnendeSekvenser):\n runder = []\n i = 0\n runde = []\n while i < len(trekk):\n runde.append(trekk[i])\n personX, personO = delOppRunde(runde)\n if len(runde) >= 5 and (sjekkVinnerRunde(personX, vinnendeSekvenser) or sjekkVinnerRunde(personO, vinnendeSekvenser)):\n runder.append(runde)\n runde = []\n if len(runde) == 9:\n runder.append(runde) \n runde = []\n i += 1\n return runder", "def delete_n_anonymous_volunteers(app_id):\r\n delete_memoized(n_anonymous_volunteers, app_id)", "def prune_tree(tree, cutoff, posteriors):\n new_tree = []\n for e in tree:\n try:\n if posteriors[e] > cutoff:\n new_tree.append(e)\n except KeyError:\n if posteriors[e[::-1]] > cutoff:\n new_tree.append(e)\n return new_tree", "def prune(neuron,\n number_of_nodes):\n n = len(neuron.nodes_list)\n for i in range(n - number_of_nodes):\n index = shortest_tips(neuron)\n neuron = remove_node(neuron, index)\n return neuron", "def remove_numbers(self):\n for i in range(len(self.board.board[0])):\n while self.board.board[i].count(0) < 6:\n random_val = random.randint(0, 8)\n self.board.update_board((i, random_val), 0)", "def deepscore(teacher):\n if teacher == None:\n print(\"Not a valid teacher\")\n return\n if teacher.get('rmpdata') == None: snc.rateThisProfessor(teacher,instructors)\n print(\"# \" + teacher['name'])\n scoreTeacherlegacy(teacher)\n scoreTeacherues(teacher)\n scoreTeacherrmp(teacher)", "def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n group_idx = int(layer / self.config.inner_group_num)\n inner_group_idx = int(layer - group_idx * self.config.inner_group_num)\n self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)", "def generate_teachers(G, school_type, N_classes, family_member_counter, \n\t\t\t\t\t family_counter, teacher_p_adults, teacher_p_children):\n\tN_teachers = get_N_teachers(school_type, N_classes)\n\tteacher_nodes = ['t{:04d}'.format(i) for i in range(1, N_teachers + 1)]\n\tG.add_nodes_from(teacher_nodes)\n\t\n\tfor t in teacher_nodes:\n\t\tfamily_nodes = [t]\n\t\t# draw a random number of children and adults for the family\n\t\tages, N_adults = generate_teacher_family(teacher_p_adults, teacher_p_children)\n\t\t\n\t\tages = list(ages)\n\t\tfor adult in range(N_adults - 1):\n\t\t\tages.append(20.5) # default age for adults\n\t\t\n\t\t# add the family member nodes and their attributes to the graph\n\t\tfor age in ages:\n\t\t\tfamily_member_ID = 'f{:04d}'.format(family_member_counter)\n\t\t\tfamily_nodes.append(family_member_ID)\n\t\t\tG.add_node(family_member_ID)\n\t\t\tfamily_member_counter += 1\n\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{family_member_ID:{'type':'family_member',\n\t\t\t\t\t\t\t\t\t\t 'age':age,\n\t\t\t\t\t\t\t\t\t\t 'family':family_counter,\n\t\t\t\t\t\t\t\t\t\t 'unit':'family'}})\n\t\t\t\t\t\n\t\t# finally, also set the teacher's node attributes\n\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t{t:{'type':'teacher', \n\t\t\t\t\t\t# Note: 20.5 is the age at which\n\t\t\t\t\t\t# the symptom and transmission risk\n\t\t\t\t\t\t# is that of an adult\n\t\t\t\t\t\t'age':20.5,\n\t\t\t\t\t\t'unit':'faculty_room',\n\t\t\t\t\t\t'family':family_counter}})\n\t\tfamily_counter += 1", "def tweet_cleaner(tweets):\n n_tweets = {}\n clean = cleaner()\n for tweet in tweets:\n text = clean.clean_text(tweets[tweet][\"text\"])\n if len(text) > 15:\n n_tweets[tweet] = tweets[tweet]\n return n_tweets", "def _prune(self):\n while len(self.data) > self.limit:\n self.data.popleft()", "def distribute_reviews(asmt, perStudent):\n\n # Get all submissions for this assignment.\n subs = AssignmentSubmission.objects.filter(submission_for=asmt)\n #print subs\n course = asmt.course_code\n users = ReviewUser.objects.filter(courses=course)\n numUsers = len(users)\n latestSubmissions = get_latest(course, asmt, subs, users)\n # print 'filtered subs are ', latestSubmissions\n numSubs = len(latestSubmissions)\n print 'number of submissions: ', numSubs\n asmt.reviews_per_student = perStudent\n \n for user in users:\n print user\n # Don't want to make staff review stuff.\n if(user.djangoUser.is_staff or user.djangoUser.is_superuser):\n continue\n\n review = AssignmentReview.objects.get_or_create(by=user, assignment=asmt)[0]\n reviewsAssigned = len(review.submissions.all())\n # In case lecturer assigns reviews multiple times, or number of reviews changes. \n print (\"reviewsAssigned\", reviewsAssigned)\n if(reviewsAssigned < perStudent):\n for i in range(perStudent-reviewsAssigned):\n index = random.randint(0, numSubs-1)\n submission = latestSubmissions[index]\n \n # Make sure user isn't assigned to review their own submission\n # NB in the amazing edge case where this user is the only person who \n # submitted the assignment, we get an infinite loopevi\n # Also don't want student to be assigned same submission twice.\n while((submission.by == user) or (submission in review.submissions.all())):\n print submission.by == user\n #print review.submissions.all()\n #print \"in the while loop\"\n index = random.randint(0, numSubs-1)\n print index\n submission = latestSubmissions[index]\n \n review.submissions.add(submission)\n # Lecturer has reduced number of reviews per user \n elif(reviewsAssigned > perStudent):\n # Number of submissions we want to de-assign\n deassign = reviewsAssigned - perStudent \n # Get all the submissions the student has not yet completed.\n incomplete = []\n for sub in review.submissions.all():\n annotationsDone = AssignmentReview.numAnnotations(review, sub)\n if(annotationsDone < asmt.min_annotations):\n incomplete.append((sub, annotationsDone))\n\n # Choose the least-complete deassign submission reviews to remove.\n # Ascending list \n sortedList = sorted(incomplete, key=itemgetter(1))\n removeFromIncomplete = min(deassign, len(sortedList))\n for i in range(removeFromIncomplete):\n review.submissions.remove(sortedList[i][0]) \n \n if deassign > len(sortedList):\n for i in range(deassign):\n review.submissions.remove(reviews.submissions.all()[i])\n else: # nothing to assign or remove\n return \n \n return", "def prune(self, age_hours):\r\n pass", "def remove_promotional_tweets(tweets):\n clean = cleaner()\n n_tweets = {}\n for tweet in tweets:\n if not clean.linkChecker(tweets[tweet][\"text\"]):\n n_tweets[tweet] = tweets[tweet]\n return n_tweets", "def _prunelowestweight(self):\r\n # note: must be called with acquired self._lock!\r\n numentries = len(self._dict)\r\n if numentries >= self.maxentries:\r\n # evict according to entry's weight\r\n items = [(entry.weight, key) for key, entry in self._dict.iteritems()]\r\n items.sort()\r\n index = numentries - self.prunenum\r\n if index > 0:\r\n for weight, key in items[:index]:\r\n del self._dict[key]", "def suggest_movies(self, reviewer: Any, other: Any) -> list[Any]:\n potential_recs = self.get_neighbours(other)\n suggestions_so_far = []\n neighbours = self.get_neighbours(reviewer)\n\n for p_rec in potential_recs:\n if p_rec not in neighbours and self.get_weight(other, p_rec) >= 9:\n suggestions_so_far.append(p_rec)\n\n return suggestions_so_far", "def prune(self, upper, lower):\n # max_count = sorted([self.counts[key] for key in self.counts.keys()])[::-1][upper]\n max_count = upper\n\n print('Removed all words that occur less than {} times and more than {} times'.format(lower, upper))\n for i, doc in enumerate(self.docs):\n new_doc = []\n for word in doc:\n if self.counts[word] <= max_count and self.counts[word] > lower:\n new_doc.append(word)\n self.docs[i] = new_doc", "def ensemble_preds(dataset, nb_teachers, stdnt_data):\n\n # Compute shape of array that will hold probabilities produced by each\n # teacher, for each training point, and each output class\n result_shape = (nb_teachers, len(stdnt_data), FLAGS.nb_labels)\n\n # Create array that will hold result\n result = np.zeros(result_shape, dtype=np.float32)\n\n # Get predictions from each teacher\n for teacher_id in xrange(nb_teachers):\n # Compute path of checkpoint file for teacher model with ID teacher_id\n if FLAGS.deeper:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '_deep.ckpt-' + str(FLAGS.teachers_max_steps - 1) #NOLINT(long-line)\n else:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '.ckpt-' + str(FLAGS.teachers_max_steps - 1) # NOLINT(long-line)\n\n # Get predictions on our training data and store in result array\n result[teacher_id] = deep_cnn.softmax_preds(stdnt_data, ckpt_path)\n\n # This can take a while when there are a lot of teachers so output status\n print(\"Computed Teacher \" + str(teacher_id) + \" softmax predictions\")\n\n return result", "def remove_n_nos(self, num_nos):\n for i in range(num_nos):\n elem = random.randint(1, 11 ** 4)\n self.remove(elem)" ]
[ "0.5696694", "0.545549", "0.54449904", "0.5432693", "0.52488464", "0.5203805", "0.5048298", "0.50102764", "0.493123", "0.49195728", "0.48889333", "0.48790887", "0.4860135", "0.48572624", "0.48336893", "0.48332903", "0.4776946", "0.47586012", "0.47427273", "0.47353852", "0.47345516", "0.47345436", "0.4726536", "0.46756825", "0.46719763", "0.46627593", "0.46603566", "0.4659124", "0.46532997", "0.4649696" ]
0.7730154
0
return orthanc object of study
def get(self): return orthanc.study(self.orthanc_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def orthanc_studies(self):\n return [orthanc.study(x.orthanc_id) for x in self.studies]", "def salome_study_init(theStudyId=0):\n\n global salome_study_initial\n global myStudyManager, myStudyId, myStudy, myStudyName\n global orb, lcc, naming_service, cm\n\n if salome_study_initial:\n salome_study_initial = 0\n\n orb, lcc, naming_service, cm = salome_kernel.salome_kernel_init()\n\n # get Study Manager reference\n if verbose(): print \"looking for studyManager ...\"\n obj = naming_service.Resolve('myStudyManager')\n myStudyManager = obj._narrow(SALOMEDS.StudyManager)\n if verbose(): print \"studyManager found\"\n\n # get active study Id, ref and name\n myStudyId = getActiveStudy(theStudyId)\n if verbose(): print \"myStudyId\",myStudyId\n myStudy = myStudyManager.GetStudyByID(myStudyId)\n myStudyName = myStudy._get_Name()\n\n return myStudyManager, myStudyId, myStudy, myStudyName", "def __repr__(self):\n\n return \"<Study pmid=%d doi=%s title=%s year=%d>\" % (\n self.pmid, self.doi, self.title, self.year)", "def clone(self, *args):\n return _SALOMERuntime.OutputStudyPort_clone(self, *args)", "def demo_ortho_slicer():\n pl.clf()\n oslicer = OrthoSlicer(cut_coords=(0, 0, 0))\n from .anat_cache import _AnatCache\n map, affine, _ = _AnatCache.get_anat()\n oslicer.plot_map(map, affine, cmap=pl.cm.gray)\n return oslicer", "def _createObj(self) -> None:\n phase_img = skimage.img_as_float(skimage.data.camera())[::-1, ::-1]\n mod_img = skimage.img_as_float(skimage.data.immunohistochemistry()[:, :, 0])[::-1, ::-1]\n mod = skimage.transform.resize(mod_img, self.shape,\n mode='wrap', preserve_range=True)\n phase = skimage.transform.resize(phase_img, self.shape,\n mode='wrap', preserve_range=True)\n\n # Setting the ranges\n phase = (phase - np.min(phase)) / (np.max(phase) - np.min(phase)) * self.phase_range\n mod = (mod - np.min(mod)) / (np.max(mod) - np.min(mod)) * self.mod_range\n\n # Centering the phase at 0.\n phase = np.angle(np.exp(1j * (phase - scipy.stats.circmean(phase))))\n obj = (mod * np.exp(1j * phase)).astype('complex64')\n self._setObjArrayValues(obj)", "def phonology(request):\n\n perspective_cid = request.params.get('perspective_client_id')\n perspective_oid = request.params.get('perspective_object_id')\n\n # Checking if we have limits on number of computed results.\n\n limit = (None if 'limit' not in request.params else\n int(request.params.get('limit')))\n\n limit_exception = (None if 'limit_exception' not in request.params else\n int(request.params.get('limit_exception')))\n\n limit_no_vowel = (None if 'limit_no_vowel' not in request.params else\n int(request.params.get('limit_no_vowel')))\n\n limit_result = (None if 'limit_result' not in request.params else\n int(request.params.get('limit_result')))\n\n # TODO: get perspective's translation and language it belongs to.\n\n # We get lexical entries of this perspective with markup'ed sounds.\n\n Sound = aliased(Entity, name = \"Sound\")\n PublishingSound = aliased(PublishingEntity, name = \"PublishingSound\")\n\n query = DBSession.query(LexicalEntry, Entity, Sound, PublishingEntity, PublishingSound).filter(and_(\n LexicalEntry.parent_client_id == perspective_cid,\n LexicalEntry.parent_object_id == perspective_oid,\n LexicalEntry.marked_for_deletion == False,\n Entity.parent_client_id == LexicalEntry.client_id,\n Entity.parent_object_id == LexicalEntry.object_id,\n Entity.marked_for_deletion == False,\n Entity.additional_metadata.contains({\"data_type\": \"praat markup\"}),\n PublishingEntity.client_id == Entity.client_id,\n PublishingEntity.object_id == Entity.object_id,\n PublishingEntity.published == True,\n PublishingEntity.accepted == True,\n Sound.client_id == Entity.self_client_id,\n Sound.object_id == Entity.self_object_id,\n Sound.marked_for_deletion == False,\n PublishingSound.client_id == Sound.client_id,\n PublishingSound.object_id == Sound.object_id,\n PublishingSound.published == True,\n PublishingSound.accepted == True))\n\n # We process these lexical entries in batches. Just in case, it seems that perspectives rarely have more\n # then several hundred such lexical entries.\n\n exception_counter = 0\n no_vowel_counter = 0\n result_list = list()\n\n for index, row in enumerate(query.yield_per(100)):\n\n markup_url = row.Entity.content\n sound_url = row.Sound.content\n\n cache_key = 'phonology:{0}:{1}:{2}:{3}'.format(\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id)\n\n # Checking if we have cached result for this pair of sound/markup.\n\n cache_result = CACHE.get(cache_key)\n\n if cache_result == 'no_vowel':\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]: no vowels\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we have cached exception, we do the same as with absence of vowels, show its info and\n # continue.\n\n elif isinstance(cache_result, tuple) and cache_result[0] == 'exception':\n exception, traceback_string = cache_result[1:3]\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n '[CACHE {7}]: exception\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n log.debug(traceback_string)\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we actually have the result, we use it and continue.\n\n elif cache_result:\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in cache_result)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]:\\n{8}\\n{9}\\n{10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url, result_string))\n\n result_list.append(cache_result)\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n continue\n\n try:\n # Getting markup, checking for each tier if it needs to be processed.\n\n markup_bytes = urllib.request.urlopen(urllib.parse.quote(markup_url, safe = '/:')).read()\n\n textgrid = pympi.Praat.TextGrid(xmax = 0)\n textgrid.from_file(\n io.BytesIO(markup_bytes),\n codec = chardet.detect(markup_bytes)['encoding'])\n\n tier_data_list = []\n vowel_flag = False\n\n for tier_number, tier_name in textgrid.get_tier_name_num():\n\n raw_interval_list = textgrid.get_tier(tier_number).get_all_intervals()\n raw_interval_seq_list = [[]]\n\n # Splitting interval sequence on empty intervals.\n\n for raw_index, interval in enumerate(raw_interval_list):\n\n if len(interval[2].strip()) <= 0:\n if len(raw_interval_seq_list[-1]) > 0:\n raw_interval_seq_list.append([])\n\n else:\n raw_interval_seq_list[-1].append((raw_index, interval))\n\n if len(raw_interval_seq_list[-1]) <= 0:\n del raw_interval_seq_list[-1]\n\n # Selecting interval sequences for analysis, checking if we have unusual markup.\n \n interval_seq_list = []\n interval_idx_to_raw_idx = dict()\n\n unusual_markup_flag = False\n unusual_markup_list = []\n\n for raw_interval_seq in raw_interval_seq_list:\n\n interval_seq_list.append([])\n interval_idx_to_raw_idx[len(interval_seq_list) - 1] = {}\n\n for partial_raw_index, (raw_index, interval) in enumerate(raw_interval_seq):\n\n interval_text = interval[2].strip()\n\n # Accepting interval if its text contains at least one vowel, and is short enough or\n # is a valid phonetic transcription.\n\n transcription_check = re.fullmatch(transcription_re, interval_text)\n\n if (len(interval_text) > 0 and\n any(character in vowel_set for character in interval_text) and\n (len(interval_text) <= 2 or transcription_check)):\n\n interval_seq_list[-1].append(interval)\n\n sequence_index = len(interval_seq_list) - 1\n interval_index = len(interval_seq_list[-1]) - 1\n\n interval_idx_to_raw_idx[(sequence_index, interval_index)] = raw_index\n interval_idx_to_raw_idx[sequence_index][interval_index] = partial_raw_index\n\n # Noting if the interval contains unusual (i.e. non-transcription) markup.\n\n elif not transcription_check:\n\n unusual_markup_flag = True\n unusual_markup_list.append((raw_index, interval))\n\n transcription_list = [text for begin, end, text in raw_interval_list]\n transcription = ''.join(transcription_list)\n\n selected_list = [text\n for interval_list in interval_seq_list\n for begin, end, text in interval_list]\n\n selected = ''.join(selected_list)\n\n # If we have intervals with unusual markup, we report them.\n\n if unusual_markup_flag:\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' has interval(s) with unusual transcription text: '\n '{9} / {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription, dict(unusual_markup_list)))\n\n # If the markup does not have any vowels, we note it and also report it.\n\n if all(character not in vowel_set for character in transcription):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' doesn\\'t have any vowel markup: {9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription_list))\n\n # It is also possible that while full transcription has vowels, intervals selected for\n # analysis do not. In that case we also note it and report it.\n\n elif not any(character in vowel_set for character in selected):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel_selected'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' intervals to be processed don\\'t have any vowel markup: '\n 'markup {9}, selected {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name,\n transcription_list, selected_list))\n\n # Otherwise we store tier data to be used during processing of the sound file.\n\n else:\n tier_data_list.append((tier_number, tier_name,\n (raw_interval_list, raw_interval_seq_list, interval_seq_list,\n interval_idx_to_raw_idx, transcription)))\n\n vowel_flag = True\n\n # If there are no tiers with vowel markup, we skip this sound-markup file altogether.\n\n if not vowel_flag:\n\n CACHE.set(cache_key, 'no_vowel')\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # Otherwise we retrieve the sound file and analyse each vowel-containing markup.\n # Partially inspired by source code at scripts/convert_five_tiers.py:307.\n\n sound = None\n with tempfile.NamedTemporaryFile() as temp_file:\n\n sound_file = urllib.request.urlopen(urllib.parse.quote(sound_url, safe = '/:'))\n temp_file.write(sound_file.read())\n temp_file.flush()\n\n sound = AudioPraatLike(pydub.AudioSegment.from_wav(temp_file.name))\n\n tier_result_list = []\n\n for tier_number, tier_name, tier_data in tier_data_list:\n\n if tier_data == 'no_vowel' or tier_data == 'no_vowel_selected':\n tier_result_list.append((tier_number, tier_name, tier_data))\n continue\n\n # Analyzing vowel sounds of each interval sequence.\n\n (raw_interval_list, raw_interval_seq_list, interval_seq_list, interval_idx_to_raw_idx,\n transcription) = tier_data\n\n tier_result_list.append((tier_number, tier_name, []))\n\n for seq_index, (raw_interval_list, interval_list) in enumerate(zip(\n raw_interval_seq_list, interval_seq_list)):\n\n if len(interval_list) <= 0:\n continue\n\n (max_intensity_index, max_intensity, max_length_index, max_length) = \\\n find_max_interval_praat(sound, interval_list)\n\n max_intensity_interval = interval_list[max_intensity_index]\n max_length_interval = interval_list[max_length_index]\n\n max_intensity_f1_f2 = sound.get_interval_formants(*max_intensity_interval[:2])\n max_length_f1_f2 = sound.get_interval_formants(*max_length_interval[:2])\n\n # Compiling results.\n\n max_length_str = '{0} {1:.3f} [{2}]'.format(\n max_length_interval[2], max_length,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_length_index]])))\n\n max_intensity_str = '{0} {1:.3f} [{2}]'.format(\n max_intensity_interval[2],\n max_intensity,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_intensity_index]])))\n\n tier_result_list[-1][2].append([\n ''.join(text for index, (begin, end, text) in raw_interval_list),\n max_length_str,\n '{0:.3f}'.format(max_length_f1_f2[0]),\n '{0:.3f}'.format(max_length_f1_f2[1]),\n max_intensity_str,\n '{0:.3f}'.format(max_intensity_f1_f2[0]),\n '{0:.3f}'.format(max_intensity_f1_f2[1]),\n '+' if max_intensity_index == max_length_index else '-'])\n\n # Saving result.\n\n result_list.append(tier_result_list)\n CACHE.set(cache_key, tier_result_list)\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in tier_result_list)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}):'\n '\\n{7}\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url, result_string))\n\n # Stopping earlier, if required.\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n except Exception as exception:\n\n #\n # NOTE\n #\n # Exceptional situations encountered so far:\n #\n # 1. TextGrid file actually contains sound, and wav file actually contains textgrid markup.\n #\n # Perspective 330/4, LexicalEntry 330/7, sound-Entity 330/2328, markup-Entity 330/6934\n #\n # 2. Markup for one of the intervals contains a newline \"\\n\", and pympi fails to parse it.\n # Praat parses such files without problems.\n #\n # Perspective 330/4, LexicalEntry 330/20, sound-Entity 330/6297, markup-Entity 330/6967\n #\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'exception\\n{7}\\n{8}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url))\n\n # if we encountered an exception, we show its info and remember not to try offending\n # sound/markup pair again.\n\n traceback_string = ''.join(traceback.format_exception(\n exception, exception, exception.__traceback__))[:-1]\n\n log.debug(traceback_string)\n\n CACHE.set(cache_key, ('exception', exception,\n traceback_string.replace('Traceback', 'CACHEd traceback')))\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n log.debug('phonology {0}/{1}: {2} result{3}, {4} no vowels, {5} exceptions'.format(\n perspective_cid, perspective_oid,\n len(result_list), '' if len(result_list) == 1 else 's',\n no_vowel_counter, exception_counter))\n\n # If we have no results, we indicate the situation and also show number of failures and number of\n # markups with no vowels.\n\n if not result_list:\n request.response.status = HTTPPreconditionFailed.code\n\n return {\n \"error\": \"no markups for this query\",\n \"exception_counter\": exception_counter,\n \"no_vowel_counter\": no_vowel_counter}\n\n # Otherwise we create and then serve Excel file.\n\n excel_book = xlwt.Workbook(encoding = \"utf-8\")\n sheet = excel_book.add_sheet(\"Sheet 1\")\n\n sheet.write(0, 0, 'Transcription')\n sheet.write(0, 1, 'Longest (seconds) interval')\n sheet.write(0, 2, 'F1 (Hz)')\n sheet.write(0, 3, 'F2 (Hz)')\n sheet.write(0, 4, 'Highest intensity (dB) interval')\n sheet.write(0, 5, 'F1 (Hz)')\n sheet.write(0, 6, 'F2 (Hz)')\n sheet.write(0, 7, 'Coincidence')\n\n row_counter = 1\n\n for tier_result_list in result_list:\n for tier_number, tier_name, tier_result_seq_list in tier_result_list:\n\n if tier_result_seq_list == 'no_vowel':\n continue\n\n for tier_data in tier_result_seq_list:\n for index, tier_data_str in enumerate(tier_data):\n sheet.write(row_counter, index, tier_data_str)\n\n row_counter += 1\n\n # Formatting column widths.\n\n sheet.col(0).width = 24 * 256\n sheet.col(1).width = 24 * 256\n sheet.col(2).width = 12 * 256\n sheet.col(3).width = 12 * 256\n sheet.col(4).width = 24 * 256\n sheet.col(5).width = 12 * 256\n sheet.col(6).width = 12 * 256\n sheet.col(7).width = 12 * 256\n\n excel_stream = io.BytesIO()\n excel_book.save(excel_stream)\n excel_stream.seek(0)\n\n # See http://stackoverflow.com/questions/2937465/what-is-correct-content-type-for-excel-files for Excel\n # content-type.\n\n response = Response(content_type = 'application/vnd.ms-excel')\n\n response.app_iter = FileIter(excel_stream)\n response.headers['Content-Disposition'] = \"attachment; filename=phonology.xls\"\n\n return response", "def get_o(self):\n return self.o", "def __repr__(self):\n return \"{} ({}) <{}:{} pheno: {} study: {}\".format(\n self.id, self.snpid, self.chrom, self.pos, self.phenotype_desc,\n self.study.title\n )", "def object(self):", "def __str__(self):\r\n\r\n return 'Perspective(%s)' % self.id", "def clone(self, *args):\n return _SALOMERuntime.InputStudyPort_clone(self, *args)", "def get_main_object(tc):\n return Daal(tc)", "def get_scn_obs_date(self, unq_id):\n import copy\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).all()\n ses.close()\n scn_record = None\n if query_result is not None:\n if len(query_result) == 1:\n scn_record = query_result[0]\n else:\n logger.error(\n \"PID {0} has returned more than 1 scene - must be unique something really wrong.\".format(\n unq_id))\n raise EODataDownException(\n \"There was more than 1 scene which has been found - something has gone really wrong!\")\n else:\n logger.error(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n raise EODataDownException(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n return copy.copy(scn_record.BeginPosition)", "def create_isa_study(brapi_study_id):\n brapi_study = get_brapi_study(brapi_study_id)\n this_study = Study(filename=\"s_\" + str(brapi_study_id) + \".txt\")\n this_study.identifier = brapi_study['studyDbId']\n if 'name' in brapi_study:\n this_study.title = brapi_study['name']\n elif 'studyName' in brapi_study:\n this_study.title = brapi_study['studyName']\n\n this_study.comments.append(Comment(name=\"Study Start Date\", value=brapi_study['startDate']))\n this_study.comments.append(Comment(name=\"Study End Date\", value=brapi_study['endDate']))\n if brapi_study['location'] is not None and brapi_study['location']['name'] is not None :\n this_study.comments.append(Comment(name=\"Study Geographical Location\",\n value=brapi_study['location']['name']))\n else:\n this_study.comments.append(Comment(name=\"Study Geographical Location\",value=\"\"))\n\n study_design = brapi_study['studyType']\n oa_st_design = OntologyAnnotation(term=study_design)\n this_study.design_descriptors = [oa_st_design]\n\n oref_tt = OntologySource(name=\"OBI\", description=\"Ontology for Biomedical Investigation\")\n oa_tt = OntologyAnnotation(term=\"phenotyping\", term_accession=\"\", term_source=oref_tt)\n oref_mt = OntologySource(name=\"OBI\", description=\"Ontology for Biomedical Investigation\")\n oa_mt = OntologyAnnotation(term=\"multi-technology\", term_accession=\"\", term_source=oref_mt)\n isa_assay_file = \"a_\" + str(brapi_study_id) + \".txt\"\n this_assay = Assay(measurement_type=oa_tt, technology_type=oa_mt, filename=isa_assay_file)\n this_study.assays.append(this_assay)\n\n return this_study", "def get_study_info(self,std_id):\n raise NotImplementedError", "def get_queryset(self):\n return Objective.objects.order_by('perspective')", "def example_clinical_data(study_name, environment):\n\n odm = ODM(\"test system\")(\n ClinicalData(\"Mediflex\", \"DEV\")(\n SubjectData(\"MDSOL\", \"IJS TEST4\", transaction_type=\"Insert\")(\n StudyEventData(\"SUBJECT\")(\n FormData(\"EN\", transaction_type=\"Update\")(\n # Although Signature is ODM1.3.1 RWS does not support it inbound currently\n # RWSBuilders do support outbound generation of Signature at FormData level\n # Signature()(\n # UserRef(\"isparks\"),\n # LocationRef(\"MDSOL\"),\n # SignatureRef(\"APPROVED\"),\n # DateTimeStamp(datetime(2015, 9, 11, 10, 15, 22, 80))\n # ),\n ItemGroupData()(\n ItemData(\"SUBJINIT\", \"AAA\")(\n AuditRecord(edit_point=AuditRecord.EDIT_DATA_MANAGEMENT,\n used_imputation_method= False,\n identifier='X2011',\n include_file_oid=False)(\n UserRef(\"isparks\"),\n LocationRef(\"MDSOL\"),\n ReasonForChange(\"Data Entry Error\"),\n DateTimeStamp(datetime(2015, 9, 11, 10, 15, 22, 80))\n ),\n MdsolQuery(value=\"Subject initials should be 2 chars only.\", recipient=\"Site from System\",\n status=QueryStatusType.Open)\n ),\n ItemData(\"SUBJID\", '001')\n )\n )\n )\n )\n )\n )\n return odm", "def __repr__(self):\n return f'ResidenciaModel(name={self.neighbourhood_group}, neighbourhood={self.room_type})'", "def ortho(self):\r\n\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n\r\n x = _vec3(m11, m21, m31)\r\n y = _vec3(m12, m22, m32)\r\n z = _vec3(m13, m23, m33)\r\n\r\n xl = x.length()\r\n xl*=xl\r\n y = y - ((x*y)/xl)*x\r\n z = z - ((x*z)/xl)*x\r\n\r\n yl = y.length()\r\n yl*=yl\r\n z = z - ((y*z)/yl)*y\r\n\r\n return mat4( x.x, y.x, z.x, m14,\r\n x.y, y.y, z.y, m24,\r\n x.z, y.z, z.z, m34,\r\n m41, m42, m43, m44)", "def search_research_studies_with_observations():\n return ResearchStudy.where(struct={}).include('focus', Observation, reverse=True)", "def get_objectives(self):\n return copy.deepcopy(self.objectives), self.gates_names", "def get_scn_record(self, unq_id):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).all()\n ses.close()\n scn_record = None\n if query_result is not None:\n if len(query_result) == 1:\n scn_record = query_result[0]\n else:\n logger.error(\n \"PID {0} has returned more than 1 scene - must be unique something really wrong.\".format(unq_id))\n raise EODataDownException(\n \"There was more than 1 scene which has been found - something has gone really wrong!\")\n else:\n logger.error(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n raise EODataDownException(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n return scn_record", "def get_full_article(self):\n raise NotImplementedError", "def get_rms(self):\r\n return self.rms.copy()", "def substantiate():", "def __init__(self, name=None, dss=28, date=None, project='SolarPatrol'):\n self.logger = logging.getLogger(logger.name+\".Observation\")\n DR.Observation.__init__(self, name=date, date=date, dss=dss, \n project=project)\n self.extended_init()\n \n #self.obs =Astronomy.Ephem.DSS(dss)\n #y,d = date.split('/')\n #self.year = int(y); self.DOY = int(d)\n #projdatapath, self.sessionpath, rawdatapath = \\\n # DR.get_obs_dirs(project, dss, self.year, self.DOY,\n # datafmt=None)", "def create_general_object(self, x, y):\n return self.img[y-self.rad:y+self.rad, x-self.rad:x+self.rad]", "def projection(self):\n pass", "def init_od_sr(state_dict: Dict) -> SpectralResidual:\n od = SpectralResidual(threshold=state_dict['threshold'],\n window_amp=state_dict['window_amp'],\n window_local=state_dict['window_local'],\n n_est_points=state_dict['n_est_points'],\n n_grad_points=state_dict['n_grad_points'])\n return od" ]
[ "0.6370708", "0.5323299", "0.51920646", "0.5153776", "0.5107432", "0.5070821", "0.5055284", "0.49911162", "0.49496976", "0.49446896", "0.49231794", "0.49001023", "0.4827991", "0.48052597", "0.4804953", "0.47864586", "0.4774238", "0.47692737", "0.4762107", "0.4743235", "0.47330493", "0.47329876", "0.47263598", "0.47207245", "0.471975", "0.47196102", "0.4711031", "0.47081006", "0.4705919", "0.4705121" ]
0.7259197
0
return orthanc objects of studies
def orthanc_studies(self): return [orthanc.study(x.orthanc_id) for x in self.studies]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n return orthanc.study(self.orthanc_id)", "def studies(self):\n return self._study_queryset", "def DumpStudies():\n for name in myStudyManager.GetOpenStudies():\n s=myStudyManager.GetStudyByName(name)\n print \"study:\",name, s._get_StudyId()\n DumpStudy(s)", "def search_research_studies_with_observations():\n return ResearchStudy.where(struct={}).include('focus', Observation, reverse=True)", "def objects_rst(self):\n return [_.as_rst for _ in self.objects]", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()", "def anvil_research_studies_with_observations(anvil_server, search_research_studies_with_observations):\n return search_research_studies_with_observations.perform_resources(anvil_server.server)", "def objects(self):", "def get_administerable_studies_by_name():\n researcher_admin = get_session_researcher()\n if researcher_admin.site_admin:\n studies = Study.get_all_studies_by_name()\n else:\n studies = researcher_admin.get_administered_studies_by_name()\n return studies", "def get_queryset(self):\n return Objective.objects.order_by('perspective')", "def model_cohort(cohort):\n model_traj = []\n for part in cohort:\n for traj in part.trajectories:\n if traj.filter is True:\n model_traj.append(\n modelling.model(x=traj.data.age.tolist(),\n y=traj.data.AF.tolist(),\n mutation=traj.mutation,\n variant_class=traj.variant_class,\n gene=traj.mutation.split()[0],\n id=part.id,\n p_key=traj.p_key))\n return model_traj", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n yield (obj, obj.matrix_world.copy())\n\n if obj.instance_type != 'NONE':\n obj.dupli_list_create(scene)\n for dob in obj.dupli_list:\n obj_dupli = dob.object\n if obj_dupli.type == 'MESH':\n yield (obj_dupli, dob.matrix.copy())\n\n obj.dupli_list_clear()", "def get_objectives(self):\n return copy.deepcopy(self.objectives), self.gates_names", "def objets_uniques(self):\n objets = []\n for membre in self.membres:\n for objet in membre.equipe:\n if objet.unique:\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n if membre.tenu and membre.tenu.unique:\n objet = membre.tenu\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n\n return objets", "def all_research_studies_with_observations(aggregated_server, search_research_studies_with_observations):\n return search_research_studies_with_observations.perform_resources(aggregated_server.server)", "def composeWorkplaceOntology():\n\n import ossPyFuncs \n import pandas as pd\n \n #mysql query to extract full table from government organizations\n #certian table columns feature capital letters which cases uproblems\n postgreSql_selectQuery=\"SELECT * FROM us_gov_manual.us_govman_2019 ;\"\n #pass querry and obtain table\n govTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #mysql query to obtain academic instutions\n postgreSql_selectQuery=\"SELECT institution FROM hipolabs.universities ;\"\n #pass querry and obtain table\n univTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2018_us1000;\"\n businesses1=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2019_us1000;\"\n businesses2=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2020_global2000;\"\n businesses3=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #combine theinsitutions into a vector\n combinedSeries=[govTable['AgencyName'],univTable['institution'],businesses1['company'],businesses2['company'],businesses3['company']]\n #turn the multi item vector into a single series\n fullWordbank=pd.concat(combinedSeries)\n #turn that series into a pd dataframe\n wordbankTable=pd.DataFrame(fullWordbank.unique())\n\n return wordbankTable", "def phonology(request):\n\n perspective_cid = request.params.get('perspective_client_id')\n perspective_oid = request.params.get('perspective_object_id')\n\n # Checking if we have limits on number of computed results.\n\n limit = (None if 'limit' not in request.params else\n int(request.params.get('limit')))\n\n limit_exception = (None if 'limit_exception' not in request.params else\n int(request.params.get('limit_exception')))\n\n limit_no_vowel = (None if 'limit_no_vowel' not in request.params else\n int(request.params.get('limit_no_vowel')))\n\n limit_result = (None if 'limit_result' not in request.params else\n int(request.params.get('limit_result')))\n\n # TODO: get perspective's translation and language it belongs to.\n\n # We get lexical entries of this perspective with markup'ed sounds.\n\n Sound = aliased(Entity, name = \"Sound\")\n PublishingSound = aliased(PublishingEntity, name = \"PublishingSound\")\n\n query = DBSession.query(LexicalEntry, Entity, Sound, PublishingEntity, PublishingSound).filter(and_(\n LexicalEntry.parent_client_id == perspective_cid,\n LexicalEntry.parent_object_id == perspective_oid,\n LexicalEntry.marked_for_deletion == False,\n Entity.parent_client_id == LexicalEntry.client_id,\n Entity.parent_object_id == LexicalEntry.object_id,\n Entity.marked_for_deletion == False,\n Entity.additional_metadata.contains({\"data_type\": \"praat markup\"}),\n PublishingEntity.client_id == Entity.client_id,\n PublishingEntity.object_id == Entity.object_id,\n PublishingEntity.published == True,\n PublishingEntity.accepted == True,\n Sound.client_id == Entity.self_client_id,\n Sound.object_id == Entity.self_object_id,\n Sound.marked_for_deletion == False,\n PublishingSound.client_id == Sound.client_id,\n PublishingSound.object_id == Sound.object_id,\n PublishingSound.published == True,\n PublishingSound.accepted == True))\n\n # We process these lexical entries in batches. Just in case, it seems that perspectives rarely have more\n # then several hundred such lexical entries.\n\n exception_counter = 0\n no_vowel_counter = 0\n result_list = list()\n\n for index, row in enumerate(query.yield_per(100)):\n\n markup_url = row.Entity.content\n sound_url = row.Sound.content\n\n cache_key = 'phonology:{0}:{1}:{2}:{3}'.format(\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id)\n\n # Checking if we have cached result for this pair of sound/markup.\n\n cache_result = CACHE.get(cache_key)\n\n if cache_result == 'no_vowel':\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]: no vowels\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we have cached exception, we do the same as with absence of vowels, show its info and\n # continue.\n\n elif isinstance(cache_result, tuple) and cache_result[0] == 'exception':\n exception, traceback_string = cache_result[1:3]\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n '[CACHE {7}]: exception\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n log.debug(traceback_string)\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we actually have the result, we use it and continue.\n\n elif cache_result:\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in cache_result)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]:\\n{8}\\n{9}\\n{10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url, result_string))\n\n result_list.append(cache_result)\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n continue\n\n try:\n # Getting markup, checking for each tier if it needs to be processed.\n\n markup_bytes = urllib.request.urlopen(urllib.parse.quote(markup_url, safe = '/:')).read()\n\n textgrid = pympi.Praat.TextGrid(xmax = 0)\n textgrid.from_file(\n io.BytesIO(markup_bytes),\n codec = chardet.detect(markup_bytes)['encoding'])\n\n tier_data_list = []\n vowel_flag = False\n\n for tier_number, tier_name in textgrid.get_tier_name_num():\n\n raw_interval_list = textgrid.get_tier(tier_number).get_all_intervals()\n raw_interval_seq_list = [[]]\n\n # Splitting interval sequence on empty intervals.\n\n for raw_index, interval in enumerate(raw_interval_list):\n\n if len(interval[2].strip()) <= 0:\n if len(raw_interval_seq_list[-1]) > 0:\n raw_interval_seq_list.append([])\n\n else:\n raw_interval_seq_list[-1].append((raw_index, interval))\n\n if len(raw_interval_seq_list[-1]) <= 0:\n del raw_interval_seq_list[-1]\n\n # Selecting interval sequences for analysis, checking if we have unusual markup.\n \n interval_seq_list = []\n interval_idx_to_raw_idx = dict()\n\n unusual_markup_flag = False\n unusual_markup_list = []\n\n for raw_interval_seq in raw_interval_seq_list:\n\n interval_seq_list.append([])\n interval_idx_to_raw_idx[len(interval_seq_list) - 1] = {}\n\n for partial_raw_index, (raw_index, interval) in enumerate(raw_interval_seq):\n\n interval_text = interval[2].strip()\n\n # Accepting interval if its text contains at least one vowel, and is short enough or\n # is a valid phonetic transcription.\n\n transcription_check = re.fullmatch(transcription_re, interval_text)\n\n if (len(interval_text) > 0 and\n any(character in vowel_set for character in interval_text) and\n (len(interval_text) <= 2 or transcription_check)):\n\n interval_seq_list[-1].append(interval)\n\n sequence_index = len(interval_seq_list) - 1\n interval_index = len(interval_seq_list[-1]) - 1\n\n interval_idx_to_raw_idx[(sequence_index, interval_index)] = raw_index\n interval_idx_to_raw_idx[sequence_index][interval_index] = partial_raw_index\n\n # Noting if the interval contains unusual (i.e. non-transcription) markup.\n\n elif not transcription_check:\n\n unusual_markup_flag = True\n unusual_markup_list.append((raw_index, interval))\n\n transcription_list = [text for begin, end, text in raw_interval_list]\n transcription = ''.join(transcription_list)\n\n selected_list = [text\n for interval_list in interval_seq_list\n for begin, end, text in interval_list]\n\n selected = ''.join(selected_list)\n\n # If we have intervals with unusual markup, we report them.\n\n if unusual_markup_flag:\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' has interval(s) with unusual transcription text: '\n '{9} / {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription, dict(unusual_markup_list)))\n\n # If the markup does not have any vowels, we note it and also report it.\n\n if all(character not in vowel_set for character in transcription):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' doesn\\'t have any vowel markup: {9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription_list))\n\n # It is also possible that while full transcription has vowels, intervals selected for\n # analysis do not. In that case we also note it and report it.\n\n elif not any(character in vowel_set for character in selected):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel_selected'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' intervals to be processed don\\'t have any vowel markup: '\n 'markup {9}, selected {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name,\n transcription_list, selected_list))\n\n # Otherwise we store tier data to be used during processing of the sound file.\n\n else:\n tier_data_list.append((tier_number, tier_name,\n (raw_interval_list, raw_interval_seq_list, interval_seq_list,\n interval_idx_to_raw_idx, transcription)))\n\n vowel_flag = True\n\n # If there are no tiers with vowel markup, we skip this sound-markup file altogether.\n\n if not vowel_flag:\n\n CACHE.set(cache_key, 'no_vowel')\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # Otherwise we retrieve the sound file and analyse each vowel-containing markup.\n # Partially inspired by source code at scripts/convert_five_tiers.py:307.\n\n sound = None\n with tempfile.NamedTemporaryFile() as temp_file:\n\n sound_file = urllib.request.urlopen(urllib.parse.quote(sound_url, safe = '/:'))\n temp_file.write(sound_file.read())\n temp_file.flush()\n\n sound = AudioPraatLike(pydub.AudioSegment.from_wav(temp_file.name))\n\n tier_result_list = []\n\n for tier_number, tier_name, tier_data in tier_data_list:\n\n if tier_data == 'no_vowel' or tier_data == 'no_vowel_selected':\n tier_result_list.append((tier_number, tier_name, tier_data))\n continue\n\n # Analyzing vowel sounds of each interval sequence.\n\n (raw_interval_list, raw_interval_seq_list, interval_seq_list, interval_idx_to_raw_idx,\n transcription) = tier_data\n\n tier_result_list.append((tier_number, tier_name, []))\n\n for seq_index, (raw_interval_list, interval_list) in enumerate(zip(\n raw_interval_seq_list, interval_seq_list)):\n\n if len(interval_list) <= 0:\n continue\n\n (max_intensity_index, max_intensity, max_length_index, max_length) = \\\n find_max_interval_praat(sound, interval_list)\n\n max_intensity_interval = interval_list[max_intensity_index]\n max_length_interval = interval_list[max_length_index]\n\n max_intensity_f1_f2 = sound.get_interval_formants(*max_intensity_interval[:2])\n max_length_f1_f2 = sound.get_interval_formants(*max_length_interval[:2])\n\n # Compiling results.\n\n max_length_str = '{0} {1:.3f} [{2}]'.format(\n max_length_interval[2], max_length,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_length_index]])))\n\n max_intensity_str = '{0} {1:.3f} [{2}]'.format(\n max_intensity_interval[2],\n max_intensity,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_intensity_index]])))\n\n tier_result_list[-1][2].append([\n ''.join(text for index, (begin, end, text) in raw_interval_list),\n max_length_str,\n '{0:.3f}'.format(max_length_f1_f2[0]),\n '{0:.3f}'.format(max_length_f1_f2[1]),\n max_intensity_str,\n '{0:.3f}'.format(max_intensity_f1_f2[0]),\n '{0:.3f}'.format(max_intensity_f1_f2[1]),\n '+' if max_intensity_index == max_length_index else '-'])\n\n # Saving result.\n\n result_list.append(tier_result_list)\n CACHE.set(cache_key, tier_result_list)\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in tier_result_list)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}):'\n '\\n{7}\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url, result_string))\n\n # Stopping earlier, if required.\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n except Exception as exception:\n\n #\n # NOTE\n #\n # Exceptional situations encountered so far:\n #\n # 1. TextGrid file actually contains sound, and wav file actually contains textgrid markup.\n #\n # Perspective 330/4, LexicalEntry 330/7, sound-Entity 330/2328, markup-Entity 330/6934\n #\n # 2. Markup for one of the intervals contains a newline \"\\n\", and pympi fails to parse it.\n # Praat parses such files without problems.\n #\n # Perspective 330/4, LexicalEntry 330/20, sound-Entity 330/6297, markup-Entity 330/6967\n #\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'exception\\n{7}\\n{8}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url))\n\n # if we encountered an exception, we show its info and remember not to try offending\n # sound/markup pair again.\n\n traceback_string = ''.join(traceback.format_exception(\n exception, exception, exception.__traceback__))[:-1]\n\n log.debug(traceback_string)\n\n CACHE.set(cache_key, ('exception', exception,\n traceback_string.replace('Traceback', 'CACHEd traceback')))\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n log.debug('phonology {0}/{1}: {2} result{3}, {4} no vowels, {5} exceptions'.format(\n perspective_cid, perspective_oid,\n len(result_list), '' if len(result_list) == 1 else 's',\n no_vowel_counter, exception_counter))\n\n # If we have no results, we indicate the situation and also show number of failures and number of\n # markups with no vowels.\n\n if not result_list:\n request.response.status = HTTPPreconditionFailed.code\n\n return {\n \"error\": \"no markups for this query\",\n \"exception_counter\": exception_counter,\n \"no_vowel_counter\": no_vowel_counter}\n\n # Otherwise we create and then serve Excel file.\n\n excel_book = xlwt.Workbook(encoding = \"utf-8\")\n sheet = excel_book.add_sheet(\"Sheet 1\")\n\n sheet.write(0, 0, 'Transcription')\n sheet.write(0, 1, 'Longest (seconds) interval')\n sheet.write(0, 2, 'F1 (Hz)')\n sheet.write(0, 3, 'F2 (Hz)')\n sheet.write(0, 4, 'Highest intensity (dB) interval')\n sheet.write(0, 5, 'F1 (Hz)')\n sheet.write(0, 6, 'F2 (Hz)')\n sheet.write(0, 7, 'Coincidence')\n\n row_counter = 1\n\n for tier_result_list in result_list:\n for tier_number, tier_name, tier_result_seq_list in tier_result_list:\n\n if tier_result_seq_list == 'no_vowel':\n continue\n\n for tier_data in tier_result_seq_list:\n for index, tier_data_str in enumerate(tier_data):\n sheet.write(row_counter, index, tier_data_str)\n\n row_counter += 1\n\n # Formatting column widths.\n\n sheet.col(0).width = 24 * 256\n sheet.col(1).width = 24 * 256\n sheet.col(2).width = 12 * 256\n sheet.col(3).width = 12 * 256\n sheet.col(4).width = 24 * 256\n sheet.col(5).width = 12 * 256\n sheet.col(6).width = 12 * 256\n sheet.col(7).width = 12 * 256\n\n excel_stream = io.BytesIO()\n excel_book.save(excel_stream)\n excel_stream.seek(0)\n\n # See http://stackoverflow.com/questions/2937465/what-is-correct-content-type-for-excel-files for Excel\n # content-type.\n\n response = Response(content_type = 'application/vnd.ms-excel')\n\n response.app_iter = FileIter(excel_stream)\n response.headers['Content-Disposition'] = \"attachment; filename=phonology.xls\"\n\n return response", "def getHierarchies():", "def getHierarchies():", "def iterate_studies(self, start, end):\n pass", "def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Financial').order_by('objective')", "def _get_all_oshapes(self):\n an_iname = self.node_list[0]\n an_inode = self.builder.nodes[an_iname]\n an_ishape = an_inode.oshapes['loc']\n \n return {'main' : an_ishape,\n 'loc' : an_ishape,\n 'cov' : an_ishape + [an_ishape[-1]]}", "def findAtypicalTerms(self):\n self.atypicalTermsDict = collections.OrderedDict()\n distanceList = list()\n distance = 0\n for key in self.summaryFilteredDict:\n partitionName = str(key).split(\" :\")[0]\n partition = voc.getPartition(partitionName)\n modNames = partition.getModNames()\n currentModality = str(key).split(\": \")[1]\n indexCurrentModality = modNames.index(currentModality)\n coverCurrentModality = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,partitionName + \" : \" + currentModality) #cover(v',R)\n if coverCurrentModality > 0:\n for modality in partition.getModalities():\n coverModality = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,partitionName + \" : \" + modality.getName()) # cover(v,R)\n if modality.isTrapeziumModality():\n indexModality = modNames.index(modality.getName())\n distance = abs(indexCurrentModality - indexModality) / (partition.getNbModalities() - 1) #d(v,v')\n elif modality.isEnumModality():\n if (modality.getName() == currentModality):\n distance = 0\n else:\n distance = 1\n distanceList.append(min(distance, 1 - coverCurrentModality, coverModality)) # min(d(v,v'),cover(v,R),1-cover(v',R))\n self.atypicalTermsDict[partitionName + \" : \" + currentModality] = max(distanceList) # D(v',R)\n distanceList = list()", "def all_objects():\n objs = {}\n objs['Section'] = list(h.all_sec())\n objs['Segment'] = []\n for sec in objs['Section']:\n objs['Segment'].extend(list(sec.allseg()))\n objs['PointProcess'] = []\n for seg in objs['Segment']:\n objs['PointProcess'].extend(list(seg.point_processes()))\n \n return objs", "def demo_ortho_slicer():\n pl.clf()\n oslicer = OrthoSlicer(cut_coords=(0, 0, 0))\n from .anat_cache import _AnatCache\n map, affine, _ = _AnatCache.get_anat()\n oslicer.plot_map(map, affine, cmap=pl.cm.gray)\n return oslicer", "def open_iossifov_neuron_cohort():\n logging.info('getting Iossifov et al Neuron 2012 cohort')\n s1 = pandas.read_excel(supp_s1_url, sheet_name='SNV.v4.1-normlized')\n s2 = pandas.read_excel(supp_s2_url, sheet_name='suppLGKTable')\n s3 = pandas.read_excel(supp_s3_url, sheet_name='ID.v4.1-normlized')\n \n fam_ids = list(s1.quadId) + list(s2.quadId) + list(s3.quadId)\n members = list(s1.inChild) + list(s2.inChild) + list(s3.inChild)\n \n sex = ['M', 'F']\n affected = ['aut', 'sib']\n possible = list(itertools.product(affected, sex))\n study = ['10.1016/j.neuron.2012.04.009']\n \n persons = set()\n for fam, children in zip(fam_ids, members):\n for affected, sex in possible:\n string = f'{affected}{sex}'\n if string in children:\n status = ['unaffected'] if affected != 'aut' else ['HP:0000717']\n member = 's1' if affected != 'aut' else 'p1'\n sex = 'female' if sex == 'F' else 'male'\n person_id = f'{fam}.{member}|asd_cohorts'\n \n person = Person(person_id, sex, status, study)\n persons.add(person)\n \n return persons", "def make_openalex_dataset(dataset: ObservatoryDataset) -> List[dict]:\n\n result = []\n for paper in dataset.papers:\n entry = {\n \"id\": str(paper.id),\n \"doi\": f\"https://doi.org/{paper.doi}\",\n \"cited_by_count\": len(paper.cited_by),\n \"concepts\": [\n {\"id\": str(fos.id), \"display_name\": fos.name, \"level\": fos.level} for fos in paper.fields_of_study\n ],\n \"authorships\": [\n {\n \"author\": {\n \"id\": str(author.id),\n \"display_name\": author.name,\n },\n \"institutions\": [\n {\n \"id\": str(author.institution.id),\n \"ror\": author.institution.ror_id,\n \"display_name\": author.institution.name,\n \"country_code\": author.institution.country_code,\n \"type\": author.institution.types,\n }\n ],\n }\n for author in paper.authors\n ],\n }\n result.append(entry)\n\n return result", "def salome_study_init(theStudyId=0):\n\n global salome_study_initial\n global myStudyManager, myStudyId, myStudy, myStudyName\n global orb, lcc, naming_service, cm\n\n if salome_study_initial:\n salome_study_initial = 0\n\n orb, lcc, naming_service, cm = salome_kernel.salome_kernel_init()\n\n # get Study Manager reference\n if verbose(): print \"looking for studyManager ...\"\n obj = naming_service.Resolve('myStudyManager')\n myStudyManager = obj._narrow(SALOMEDS.StudyManager)\n if verbose(): print \"studyManager found\"\n\n # get active study Id, ref and name\n myStudyId = getActiveStudy(theStudyId)\n if verbose(): print \"myStudyId\",myStudyId\n myStudy = myStudyManager.GetStudyByID(myStudyId)\n myStudyName = myStudy._get_Name()\n\n return myStudyManager, myStudyId, myStudy, myStudyName" ]
[ "0.6256025", "0.5748354", "0.5600851", "0.5587314", "0.5289076", "0.52684903", "0.52684903", "0.5218291", "0.50912505", "0.509042", "0.50746655", "0.50711817", "0.50640666", "0.5047204", "0.49905527", "0.49653897", "0.49444157", "0.49329725", "0.49328998", "0.4929676", "0.4929676", "0.49144045", "0.48689863", "0.48640102", "0.48568565", "0.48278075", "0.48118252", "0.47862917", "0.4778137", "0.4765602" ]
0.7746891
0