query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Makes some plots of basis and weights from PCA. | def plot_pca(basis, weights):
file_name = '../Data/lhc_512_5.txt'
params = np.loadtxt(file_name)
ncomp, imsize = basis.shape
npix = int(np.sqrt(imsize))
ncol = int(ncomp//2)
nsamp, _ = weights.shape
# Shows the basis images
for i in range(ncomp):
plt.subplot(2, ncol, i+1)
plt.imshow(basis[i].reshape((npix, npix)))
plt.show()
# Scatter plots
for i in range(ncomp):
# parameter number (0: flux, 1: radius, 2: g1 shear, 3: g2 shear, 4: psf fwhm)
par = 3
# weight number (x-axis) 0 -> ncomp-1
w = 11
plt.subplot(2, ncol, i+1)
plt.scatter(weights[:, w], weights[:, i], s=1, c=params[:, par])
plt.ylabel('Weight '+str(i+1), size=15)
plt.xlabel('Weight '+str(w+1), size=15)
plt.colorbar()
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def biplot(score,coeff,pcax,pcay,labels=None,nm=None):\n pca1=pcax-1\n pca2=pcay-1\n xs = score[:,pca1]\n ys = score[:,pca2]\n n=score.shape[1]\n if nm == None:\n nm = n\n #construct scales to constrain data between -1 and 1\n scalex = 1.0/(xs.max()- xs.min())\n scaley = 1.0/(ys.max()- ys.min())\n \n #scatter data points in the new basis span by pca1 and pca2\n plt.scatter(xs*scalex,ys*scaley, marker='.',edgecolor='none')\n vectors = []\n \n #overlay transforms of original features in the new basis\n for i in range(n):\n #calculate length of vectors in new basis\n vectors.append((labels[i],np.sqrt(coeff[i,pca1]**2 + coeff[i,pca2]**2)))\n #plot arrow vectors\n plt.arrow(0, 0, coeff[i,pca1], coeff[i,pca2],color='g',alpha=0.5) \n #add labels\n if labels is None:\n plt.text(coeff[i,pca1]* 1.15, coeff[i,pca2] * 1.15, \"Var\"+str(i+1), color='k', ha='center', va='center')\n else:\n plt.text(coeff[i,pca1]* 1.15, coeff[i,pca2] * 1.15, labels[i], color='k', ha='center', va='center')\n plt.xlim(-1,1)\n plt.ylim(-1,1)\n plt.xlabel(\"PC{}\".format(pcax))\n plt.ylabel(\"PC{}\".format(pcay))\n plt.grid()\n plt.show()\n #print \"Feature import (PCA)\"\n #print \"--------------------\"\n vectors = sorted(vectors,key=lambda x:x[1],reverse=False)\n \n plt.barh(range(len(vectors)),map(lambda x:x[1],vectors),edgecolor='none')\n plt.yticks(np.arange(len(vectors))+0.4,map(lambda x:x[0],vectors))\n plt.xlabel('Feature importance')\n plt.grid()\n plt.show()\n #pprint(vectors)\n return vectors",
"def visualize(vis, features, label):\n if vis == 'PCA':\n #n_components = st.sidebar.slider(\"n_components\", 2, 10)\n #alpha = st.sidebar.slider(\"alpha\", 0.8, 2.0)\n #pca = PCA(n_components)\n pca = PCA(2)\n\n X_projected = pca.fit_transform(features)\n \n x1 = X_projected[:, 0]\n x2 = X_projected[:, 1]\n\n\n fig = plt.figure()\n plt.scatter(x1, x2, c=label, alpha=0.8, cmap='viridis')\n plt.xlabel(\"Principal Component 1\")\n plt.ylabel(\"Principal Component 2\")\n plt.colorbar()\n\n st.pyplot()",
"def plot_3d_B(B, data_name=\"B\"):\n size_label = 15\n size_title = 20\n size_legend = 15\n size_tick = 12\n sns.set_style(\"white\")\n\n pca = PCA(n_components=3)\n pca.fit(B.T)\n B_pca = pca.transform(B.T)\n\n idxp = [idx*2 for idx in range(B.shape[1]/2)]\n idxm = [idx*2+1 for idx in range(B.shape[1]/2)]\n\n fig = plt.figure()\n ax = plt.axes(projection=\"3d\")\n for idx in range(len(idxp)):\n ax.plot3D(\n [B_pca[idxp[idx],0],B_pca[idxm[idx],0]],\n [B_pca[idxp[idx],1],B_pca[idxm[idx],1]],\n [B_pca[idxp[idx],2],B_pca[idxm[idx],2]],\n \"gray\",alpha=0.5)\n\n ax.plot3D(B_pca[idxp,0], B_pca[idxp,1], B_pca[idxp,2], \"2\",label=\"Primary\", markersize=10)\n ax.plot3D(B_pca[idxm,0], B_pca[idxm,1], B_pca[idxm,2],\"1\",label=\"Metastatic\", markersize=10)\n\n ax.legend(fancybox=True, framealpha=0.5, prop={\"size\":size_legend})\n\n plt.xlabel(\"PCA 1\", fontsize=size_label)\n plt.ylabel(\"PCA 2\", fontsize=size_label)\n ax.set_zlabel(\"PCA 3\", fontsize=size_label)\n plt.tick_params(labelsize=size_tick)\n plt.title(data_name, fontsize=size_title)\n plt.xlim([-15,10])\n plt.ylim([0,15])\n ax.set_zlim(-8,4)\n ##fig.savefig(\"figures/fig12pcapathway.pdf\", bbox_inches=\"tight\")",
"def plot_PCA():\n X, languages = prepare_data_matrix()\n #print(X)\n eigenvectors, eigenvalues=power_iteration_two_components(X)\n explain = explained_variance_ratio(X, eigenvectors, eigenvalues)\n X=project_to_eigenvectors(X,eigenvectors)\n\n #print(X)\n plt.title('Explained variance: %.3f' % explain)\n plt.scatter(X[:,0], X[:,1])\n for i in range(len(X)):\n plt.text(X[i,0], X[i,1], languages[i][:3])\n plt.show()",
"def pca_visual(X_data, Y_data, dict_CLnames, comp=False, clusters=None,):\n pca = PCA(2) # project from 72 to 2 dimensions\n X_pca = pca.fit_transform(X_data)\n\n #encode class labels into numeric values\n le = preprocessing.LabelEncoder()\n label_encoder = le.fit(Y_data)\n y = label_encoder.transform(Y_data)\n\n Xax=X_pca[:,0] #First Principal Component\n Yax=X_pca[:,1] #Second Principal Component\n labels= y\n cdict={0:'red',1:'green'} #dict with colors\n labl=dict_CLnames\n labl_cl = {0:'cluster 1',1:'cluster 2'}\n if comp == False:\n fig,ax=plt.subplots(figsize=(7,5))\n fig.patch.set_facecolor('white')\n for l in np.unique(labels):\n ix=np.where(labels==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl[l])\n # for loop ends\n plt.xlabel(\"First Principal Component\",fontsize=14)\n plt.ylabel(\"Second Principal Component\",fontsize=14)\n plt.legend()\n plt.show()\n \n if comp == True:\n fig,axs =plt.subplots(nrows=1, ncols=2, figsize=(15,5))\n fig.patch.set_facecolor('white')\n ax = axs[0]\n for l in np.unique(labels):\n ix=np.where(labels==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl[l])\n # for loop ends\n ax.set_xlabel(\"First Principal Component\",fontsize=14)\n ax.set_ylabel(\"Second Principal Component\",fontsize=14)\n ax.set_title('Original data')\n ax.legend()\n\n \n ax = axs[1]\n for l in np.unique(clusters):\n ix=np.where(clusters==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl_cl[l])\n # for loop ends\n ax.set_xlabel(\"First Principal Component\",fontsize=14)\n ax.set_ylabel(\"Second Principal Component\",fontsize=14)\n ax.set_title('Clustered data')\n ax.legend()\n plt.show()",
"def display(self, covariance=None, logalpha=None, name=None, extra=1,\n k=3, figures=(1,2)):\n pyplot.close('all')\n super(SplineModelPCAGaussianMixture, self).display(covariance,\n logalpha, name, figures[0])\n figurenum = figures[1]\n if PLOTTING_AVAILABLE:\n print \"Okay plotting\"\n pyplot.ioff()\n pyplot.figure(figurenum)\n pyplot.clf()\n rows = cols = np.ceil(np.sqrt(self._ncomponent()))\n if rows * cols == self._ncomponent():\n rows = rows + 1\n ncomp, ndim = self._means.shape\n perspline = (ndim - extra) / 2\n for clust in xrange(self._ncomponent()): \n t = internal_knots(coef2knots(perspline))\n t = np.concatenate((np.zeros(4),t,np.ones(4)))\n pyplot.subplot(rows, cols, clust+1)\n means = self._pc.reconstruct(self._means[clust, :]).squeeze()\n spline1, spline2 = unmix(means,k=k,extra=extra)\n plot_from_spline(spline1)\n plot_from_spline(spline2)\n pyplot.ylim(-0.2, 1.01)\n pyplot.show()\n pyplot.ion()",
"def pca():\n pca = PCA()\n\n data = pca.fit_transform([[22,23,24],[23,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54],[22,23,24],[22,84,12],[22,74,54]])\n\n print(data)",
"def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)",
"def plot_pca(trj: TrajaDataFrame, id_col: str=\"id\", bins: tuple = (8,8), three_dims: bool = False, ax = None):\n from sklearn.decomposition import PCA\n from sklearn.preprocessing import StandardScaler\n\n\n DIMS = 3 if three_dims else 2\n\n # Bin trajectories to trip grids\n grids = []\n ids = trj[id_col].unique()\n\n for id in ids:\n animal = trj[trj[id_col]==id].copy()\n animal.drop(columns=[id_col],inplace=True)\n grid = animal.traja.trip_grid(bins = bins, hist_only=True)[0]\n grids.append(grid.flatten())\n\n # Standardize the data\n gridsarr = np.array(grids)\n X = StandardScaler().fit_transform(gridsarr)\n\n # PCA projection\n pca = PCA(n_components=DIMS)\n X_r = pca.fit(X).transform(X)\n\n # Create plot axes\n if DIMS == 3:\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n if not ax:\n _, ax = plt.subplots()\n \n # Visualize 2D projection\n for idx, animal in enumerate(X_r):\n if DIMS == 2:\n ax.scatter(X_r[idx, 0], X_r[idx, 1], color=f'C{idx}', alpha=.8, lw=2, label=idx)\n elif DIMS == 3:\n ax.scatter(X_r[idx, 0], X_r[idx, 1], ax.scatter[idx,2], color=f'C{idx}', alpha=.8, lw=2, label=idx)\n\n plt.title(\"PCA\")\n plt.legend(title=id_col, loc='best', shadow=False, scatterpoints=1)\n plt.xlabel(\"Principal Component 1\")\n plt.ylabel(\"Principal Component 2\") \n\n return plt.gcf()",
"def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]",
"def cluster_plot(self):\r\n train = StandardScaler().fit_transform(self.X)\r\n pca = PCA(n_components=3)\r\n pca_component = pca.fit_transform(self.X)\r\n fig = plt.figure(figsize=(10,8))\r\n sns.set_palette(sns.color_palette(\"cubehelix\", 8))\r\n ax = Axes3D(fig)\r\n ax.scatter(pca_component[:,0].tolist(),pca_component[:,1].tolist(),pca_component[:,2].tolist(),c=self.labels,marker='v')\r\n ax.legend()\r\n plt.show()",
"def make_plots():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n k = 100\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n print \"loaded model\"\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n X_train, y_train = prep.subset(features)\n feature_importance(model, features)\n feature_subset_indices = [73, 13]\n gb_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_gb.pkl\"\n with open(gb_file) as p:\n gb = pickle.load(p)\n make_partial_dependence(gb, X_train, y_train, features, feature_subset_indices)",
"def plot(self):\n fx = self.fitness_functions(self.archive)\n n = len(fx[0])\n\n if n == 2:\n plt.xlabel(\"F1\")\n plt.ylabel(\"F2\")\n plt.suptitle(\"Pareto Front\")\n plt.scatter(fx[:,0], fx[:,1], label='Archive')\n plt.show()\n elif n == 3:\n plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter(fx[:, 0], fx[:, 1], fx[:, 2])\n ax.set_xlabel(\"F1\")\n ax.set_ylabel(\"F2\")\n ax.set_zlabel(\"F3\")\n plt.suptitle(\"Pareto Front of Archive\")\n plt.show()\n else:\n print(\"Cannot Print Multi-Dimensional Front greater than 3D\")",
"def generate_pca(X, y, cols, n_components, **kwargs):\n\n pca = PCA(n_components, **kwargs)\n pca_result = pca.fit_transform(X)\n pca_df = pd.DataFrame(pca_result, columns=cols, index=X.index)\n pca_df['label'] = y\n pca_plot = ggplot(pca_df, aes(x=\"PCA-1\", y=\"PCA-2\", color='label') ) + geom_point(size=100,alpha=0.8) + ggtitle(\"First and Second Principal Components colored by class\")\n return pca_plot",
"def performpca(df, nb_pc=5):\n # Remove uncomplete series\n print(df.shape)\n normalized=(df-df.mean())/df.std()\n # normalized.plot()\n # plt.show()\n pca = PCA(nb_pc)\n pca.fit(normalized)\n return pca, normalized",
"def run_pca(df, cols=None): \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from sklearn.preprocessing import StandardScaler\n from sklearn.decomposition import PCA\n import mpld3\n\n # Define and markers to use for different categories\n groups_dict = {(u'D', 0):('Germany, unregulated', 'g', 'o'),\n (u'N', 0):('Norway, unregulated', 'b', 'o'),\n (u'D', 1):('Germany, regulated', 'g', '^'),\n (u'N', 1):('Norway, regulated', 'b', '^')}\n \n # Extract cols of interest\n cats = df[['country', 'regulated']]\n\n if cols:\n df = df[cols].astype(float)\n\n # Standardise the feature data\n feat_std = StandardScaler().fit_transform(df)\n\n # Setup PCA. Initially, choose to keep ALL components\n pca = PCA()\n\n # Fit model\n pca.fit(feat_std)\n\n # Get explained variances (in %)\n var_exp = 100*pca.explained_variance_ratio_\n cum_exp = np.cumsum(var_exp)\n\n # Get eigenvalues\n cov_mat = np.cov(feat_std.T)\n eig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\n # Get number of EVs > 1 (Kaiser-Guttman criterion)\n # and print summary\n n_kgc = (eig_vals > 1).sum()\n print 'Variance explained by first %s PCs (%%):\\n' % n_kgc\n print var_exp[:n_kgc]\n print '\\nTotal: %.2f%%' % var_exp[:n_kgc].sum()\n \n # Plot\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 6))\n \n # Explained variance\n axes[0].bar(range(1, len(var_exp)+1), var_exp, \n align='center', label='Individual components')\n axes[0].plot(range(1, len(cum_exp)+1), cum_exp, \n 'r-o', label='Cumulative')\n axes[0].set_xlabel('Principal component')\n axes[0].set_ylabel('Variance explained (%)')\n axes[0].legend(loc='center right')\n \n # Eigenvalues\n axes[1].plot(range(1, len(eig_vals)+1), np.sort(eig_vals)[::-1], \n 'r-o', label='Eigenvalues')\n axes[1].axhline(1, c='k', ls='-', label='Kaiser-Guttman threshold')\n axes[1].set_xlabel('Principal component')\n axes[1].set_ylabel('Eigenvalue')\n axes[1].legend(loc='upper right') \n \n # PC loadings\n loads = pd.DataFrame(data=pca.components_, \n columns=df.columns,\n index=range(1, pca.components_.shape[0]+1)).T\n\n # Project into 2 and 3 components\n fig = plt.figure(figsize=(16, 6))\n \n # Plot 2 components\n ax = fig.add_subplot(1, 2, 1)\n \n # Refit the PCA, this time specifying 2 components\n # and transforming the result\n feat_reduced = PCA(n_components=2).fit_transform(feat_std)\n \n # Build df \n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n\n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], s=60,\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2])\n \n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_title('First two PCA directions')\n \n # Plot 3 components\n ax = fig.add_subplot(1, 2, 2, projection='3d', \n elev=-150, azim=135)\n\n # Refit the PCA, this time specifying 3 components\n # and transforming the result\n feat_reduced = PCA(n_components=3).fit_transform(feat_std)\n\n # Build df with colours\n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'PC3':feat_reduced[:, 2],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n \n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], group['PC3'],\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2],\n s=60)\n \n ax.set_title('First three PCA directions')\n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_zlabel('Third principal component')\n ax.legend(bbox_to_anchor=(0.15, -0.1), frameon=True)\n plt.show()\n\n return loads",
"def do_pca(X, y, components: int = 2, plot: bool = True):\n\n new_X = []\n for i in X:\n new_X.append(i.flatten())\n\n X = new_X\n\n # PCA Stuff?\n pca = PCA(n_components=components)\n pca.fit(X)\n\n # Transform input data based on eigenvectors\n X = pca.transform(X)\n\n # Get scatters\n x = [i[0] for i in X]\n w = [i[1] for i in X]\n\n # plot\n\n plt.scatter(x, w, c=y)\n plt.show()",
"def pca(data, components):\n\n\t_pca = PCA(n_components = components)\n\t_pca.fit(data)\n\tvar = _pca.explained_variance_ratio_\n\tcum_var = np.cumsum(np.round(var, decimals=4)*100)\n\tfig = plt.plot(cum_var)\n\trotation = pd.DataFrame(\n\t\t_pca.components_,\n\t\tcolumns = data.columns,\n\t\tindex = ['PC-1','PC-2','PC-3','PC-4','PC-5','PC-6','PC-7','PC-8','PC-9',]\n\t\t)\n\n\treturn (fig, rotation)",
"def make_plots(fig_title, \n t_csd_x, t_csd_y, t_csd_z, true_csd, \n ele_x, ele_y, ele_z, pots,\n k_csd_x, k_csd_y, k_csd_z, est_csd):\n fig = plt.figure(figsize=(10,16))\n #True CSD\n z_steps = 5\n height_ratios = [1 for i in range(z_steps)]\n height_ratios.append(0.1)\n gs = gridspec.GridSpec(z_steps+1, 3, height_ratios=height_ratios)\n t_max = np.max(np.abs(true_csd))\n levels = np.linspace(-1*t_max, t_max, 16)\n ind_interest = np.mgrid[0:t_csd_z.shape[2]:np.complex(0,z_steps+2)]\n ind_interest = np.array(ind_interest, dtype=np.int)[1:-1]\n for ii, idx in enumerate(ind_interest):\n ax = plt.subplot(gs[ii, 0])\n im = plt.contourf(t_csd_x[:,:,idx], t_csd_y[:,:,idx], true_csd[:,:,idx], \n levels=levels, cmap=cm.bwr_r)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n title = str(t_csd_z[:,:,idx][0][0])[:4]\n ax.set_title(label=title, fontdict={'x':0.8, 'y':0.8})\n ax.set_aspect('equal')\n cax = plt.subplot(gs[z_steps,0])\n cbar = plt.colorbar(im, cax=cax, orientation='horizontal')\n cbar.set_ticks(levels[::2])\n cbar.set_ticklabels(np.around(levels[::2], decimals=2))\n #Potentials\n v_max = np.max(np.abs(pots))\n levels_pot = np.linspace(-1*v_max, v_max, 16)\n ele_res = int(np.ceil(len(pots)**(3**-1))) \n ele_x = ele_x.reshape(ele_res, ele_res, ele_res)\n ele_y = ele_y.reshape(ele_res, ele_res, ele_res)\n ele_z = ele_z.reshape(ele_res, ele_res, ele_res)\n pots = pots.reshape(ele_res, ele_res, ele_res)\n for idx in range(min(5,ele_res)):\n X,Y,Z = grid(ele_x[:,:,idx], ele_y[:,:,idx], pots[:,:,idx])\n ax = plt.subplot(gs[idx, 1])\n im = plt.contourf(X, Y, Z, levels=levels_pot, cmap=cm.PRGn)\n ax.hold(True)\n plt.scatter(ele_x[:,:,idx], ele_y[:,:,idx], 5)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n title = str(ele_z[:,:,idx][0][0])[:4]\n ax.set_title(label=title, fontdict={'x':0.8, 'y':0.8})\n ax.set_aspect('equal')\n ax.set_xlim([0.,1.])\n ax.set_ylim([0.,1.])\n cax = plt.subplot(gs[z_steps,1])\n cbar2 = plt.colorbar(im, cax=cax, orientation='horizontal')\n cbar2.set_ticks(levels_pot[::2])\n cbar2.set_ticklabels(np.around(levels_pot[::2], decimals=2))\n # #KCSD\n t_max = np.max(np.abs(est_csd[:,:,:,0]))\n levels_kcsd = np.linspace(-1*t_max, t_max, 16)\n ind_interest = np.mgrid[0:k_csd_z.shape[2]:np.complex(0,z_steps+2)]\n ind_interest = np.array(ind_interest, dtype=np.int)[1:-1]\n for ii, idx in enumerate(ind_interest):\n ax = plt.subplot(gs[ii, 2])\n im = plt.contourf(k_csd_x[:,:,idx], k_csd_y[:,:,idx], est_csd[:,:,idx,0], \n levels=levels_kcsd, cmap=cm.bwr_r)\n #im = plt.contourf(k_csd_x[:,:,idx], k_csd_y[:,:,idx], est_csd[:,:,idx,0], \n # levels=levels, cmap=cm.bwr_r)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n title = str(k_csd_z[:,:,idx][0][0])[:4]\n ax.set_title(label=title, fontdict={'x':0.8, 'y':0.8})\n ax.set_aspect('equal')\n cax = plt.subplot(gs[z_steps,2])\n cbar3 = plt.colorbar(im, cax=cax, orientation='horizontal')\n cbar3.set_ticks(levels_kcsd[::2])\n #cbar3.set_ticks(levels[::2])\n cbar3.set_ticklabels(np.around(levels_kcsd[::2], decimals=2))\n #cbar3.set_ticklabels(np.around(levels[::2], decimals=2))\n fig.suptitle(\"Lambda,R,CV_Error,RMS_Error,Time = \"+fig_title)\n gs.tight_layout(fig, rect=[0, 0.03, 1, 0.95]) \n # #Showing\n #plt.tight_layout()\n plt.show()\n return",
"def plot_chem_profiles(self, fig=3):\n # Select the physical parameters\n chem_parms = self.chem_names\n \n # Plot these parameters\n self.plot_profiles(chem_parms, fig)",
"def pca_reduction(X, ncomp=20):\n print('Performing dimensionality reduction ...')\n\n # PCA fitting\n pca = PCA(n_components=ncomp)\n weights = pca.fit_transform(X)\n basis = pca.components_\n\n # # Plot cumsum(explained_variance) versus component\n # plt.semilogy(pca.explained_variance_ratio_*100, 's')\n # plt.ylabel('Explained Variance Ratio (%)', size=20)\n # plt.xticks(size=20)\n # plt.xlabel('Component', size=20)\n # plt.yticks(size=20)\n # plt.show()\n\n print('Explained variance ratio : '+str(round(np.cumsum(pca.explained_variance_ratio_)[-1]*100, 2))+' %.')\n\n # pickle.dump(pca, '/../Data/GPmodel/pca_'+str(ncomp))\n\n # Some plots on PCA\n # plot_pca(basis, weights)\n\n return pca, weights",
"def eigen_decomposition(X, features):\n # Center to average\n Xctr = X - X.mean(0)\n # covariance matrix\n Xcov = np.cov(Xctr.T)\n\n # Compute eigenvalues and eigenvectors\n eigen_values, eigen_vectors = sp.linalg.eigh(Xcov)\n\n # Sort the eigenvalues and the eigenvectors descending\n sortedindex = np.argsort(eigen_values)[::-1]\n eigen_values = eigen_values[sortedindex]\n eigen_vectors = eigen_vectors[:, sortedindex]\n\n ###########\n y_pos = np.arange(len(features))\n weight = eigen_vectors[0]\n\n figure, axis = plt.subplots(2, 1)\n\n axis[0].bar(features, eigen_vectors[0])\n plt.setp(axis[0], title=\"First and Second Component's Eigenvectors \", ylabel='Weight')\n axis[0].set_xticks(features, features)\n axis[1].bar(features, eigen_vectors[1])\n axis[1].set_xticks(features, features)\n plt.setp(axis[1], ylabel='Weight')\n # axis[0].bar(y_pos, weight, align='center', alpha=0.5)\n # axis[0].xticks(y_pos, features)\n # axis[0].ylabel('Weight')\n # axis[0].title('Features')\n #\n # axis[1].bar(y_pos, weight, align='center', alpha=0.5)\n # axis[1].xticks(y_pos, features)\n # axis[1].ylabel('Weight')\n # axis[1].title('Features')\n\n plt.show()\n # return eigen_values, eigen_vectors",
"def test_plot(arg):\n source_data = data.Biofile(arg)\n sample = source_data.get_header()\n feature = source_data.get_index()\n sample_size, feature_size = 106, 12042\n sample = sample[:sample_size]\n #xshape = (106 12042)\n print(sample, feature)\n X = source_data.get_matrix().T[:sample_size, :feature_size]\n mx = 100\n labs = ['rbf','poly','sigmoid']\n semi_r = util.kernel_non_negative_factorization(X.T,n_components=2, max_iter = mx, parameter = 100) #rbf 0.5\n semi_r_con = util.kernel_non_negative_factorization(X.T,n_components=2, max_iter = mx, kernel='poly', parameter= 0.5)#ploy 2\n semi_r_con1 = util.kernel_non_negative_factorization(X.T,n_components=2, max_iter=mx, kernel='sigmoid', parameter= 0.1) #sigmoid 0.5\n semi_r_con2 = util.convex_non_negative_factorization(X.T, max_iter=mx, n_components=2)\n\n #semi_r = util.semi_non_negative_factorization_with_straint(X.T, max_iter = mx,n_components=2 ,initialization= 'Kmeans',alpha = 0.01, beta = 0.01)\n #semi_r_con = util.semi_non_negative_factorization_with_straint(X.T, max_iter=mx,n_components=2 ,initialization= 'Kmeans',alpha= 10, beta = 10)\n #semi_r_con1 = util.semi_non_negative_factorization_with_straint(X.T, max_iter=mx,n_components=2, initialization= 'Kmeans',alpha= 0, beta = 10)\n #semi_r_con2 = util.semi_non_negative_factorization_with_straint(X.T, max_iter=mx,n_components=2, initialization= 'Kmeans',alpha= 10, beta = 0)\n #convex_r_con = util.convex_non_negative_factorization(X.T, n_components=2, max_iter=mx)\n\n G, G1, G2, G3 = semi_r[1], semi_r_con[1], semi_r_con1[1], semi_r_con2[1]\n result, result1, result2, result3 = semi_r[2], semi_r_con[2], semi_r_con1[2], semi_r_con2[2]\n x = [i for i in range(mx)]\n # plot the losses function\n plt.title(\"losses function of {}\".format(arg[:-4]))\n plt.xlabel(\"iteration times\")\n plt.ylabel(\"losses\")\n\n plt.plot(x, result[:mx], 'r', marker = '.', label = 'kNMF({})'.format(labs[0]))\n plt.plot(x, result1[:mx], 'b', marker ='.' , label = 'kNMF({})'.format(labs[1]))\n plt.plot(x, result2[:mx], 'c', marker ='.', label = 'kNMF({})'.format(labs[2]))\n plt.plot(x, result3[:mx], 'm', marker ='.', label = 'cvxnmf')\n \"\"\"\n plt.plot(x, result[:mx], 'r', marker = '.', label = 'sNMF')\n plt.plot(x, result1[:mx], 'b', marker ='.' , label = 'sNMF(0.5,0.5)')\n plt.plot(x, result2[:mx], 'c', marker ='.', label = 'sNMF(0,0.5)')\n plt.plot(x, result3[:mx], 'm', marker ='.', label = 'sNMF(0.5,1)')\n plt.plot(x, result4[:mx], 'k', marker = '.', label = 'cvx-NMF')\n \"\"\"\n plt.legend(bbox_to_anchor=[1,1])\n plt.grid()\n plt.show()\n\n #plot the clustering result\n plt1 = plt\n plt1.subplot(221)\n plt1.plot(G[:,0], G[:,1], 'ro')\n plt1.title(u'the distribution of items(knmf({}))'.format(labs[0]))\n #items = zip(sample, G)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.subplot(222)\n plt1.plot(G1[:,0], G1[:,1], 'bo')\n\n plt1.title(u'the distribution of items(knmf({}))'.format(labs[1]))\n\n #items = zip(sample, G1)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.subplot(223)\n plt1.plot(G2[:,0], G2[:,1], 'co')\n plt1.title(u'the distribution of items((knmf({}))'.format(labs[2]))\n #items = zip(sample, G4)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.subplot(224)\n plt1.plot(G3[:,0], G3[:,1], 'mo')\n plt1.title(u'the distribution of items(convex-nmf))')\n #items = zip(sample, G2)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.show()",
"def PCA_vis(select_PCA_features, player_attributes):\n x = player_attributes.loc[:, select_PCA_features].values\n\n # Standardizing the features\n x = StandardScaler().fit_transform(x)\n\n # perform 3 component PCA\n pca = PCA(n_components=3)\n principalComponents = pca.fit_transform(x)\n principalDf = pd.DataFrame(\n data=principalComponents,\n columns=[\n \"principal component 1\",\n \"principal component 2\",\n \"principal component 3\",\n ],\n )\n\n # plot players dataset projection on three principal components\n # %matplotlib notebook\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1, projection=\"3d\")\n ax.set_title(\"3 component PCA\", fontsize=30)\n\n # plot first k players' info along principal components\n k = 4000\n ax.scatter(\n principalDf.loc[:k, \"principal component 1\"],\n principalDf.loc[:k, \"principal component 2\"],\n principalDf.loc[:k, \"principal component 3\"],\n s=1,\n )\n\n ax.set_xlabel(\"Principal Component 1\", fontsize=15)\n ax.set_ylabel(\"Principal Component 2\", fontsize=15)\n ax.set_zlabel(\"Principal Component 3\", fontsize=15)\n plt.show()\n\n return principalDf",
"def plot_parameter_visualisation_1d_a_b(parameters_dict, nr_components, ab, colors, prec_wrt_L=False, plot_out=None):\n\n\n plot = {'data': [], 'layout': {}}\n\n\n # component weights\n weights_bg = [ v[0] for k,v in sorted(parameters_dict.iteritems()) if 'weight_bg_' in k]\n weights_contact = [ v[0] for k,v in sorted(parameters_dict.iteritems()) if 'weight_contact_' in k]\n\n #component mu\n means = [v[ab] for k,v in sorted(parameters_dict.iteritems()) if 'mu_' in k]\n\n #component sd\n sd = []\n for component in range(nr_components):\n try:\n if prec_wrt_L:\n sd.append(np.sqrt(1.0/(parameters_dict['prec_'+str(component)][ab] * 142) )) #in case precision is spec depending on L=142\n else:\n sd.append(np.sqrt(1.0/parameters_dict['prec_'+str(component)][ab]))\n except ZeroDivisionError as e:\n print(e)\n sd.append(0) #in case prec is zero bc optimizer tries strange values\n\n\n ### add components\n for component in range(nr_components):\n gaussian_component_density = get_coordinates_for_1d_gaussian(-1, 1, means[component], sd[component])\n plot['data'].append(\n go.Scatter(x=gaussian_component_density[0],\n y=gaussian_component_density[1],\n mode='lines',\n name='component ' + str(component) + ' for ' + AB[ab],\n line=dict(dash='dot',\n color=colors[component]),\n showlegend=False\n )\n )\n\n ### add mixture if there are more than one component\n if (nr_components > 1):\n gaussian_mixture_x_contact, gaussian_mixture_y_contact =get_coordinates_for_1d_gaussian_mixture(-1, 1,\n weights_contact,\n means,\n sd)\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_contact,\n y=gaussian_mixture_y_contact,\n mode='lines',\n name='mixture (contact) for ' + AB[ab],\n line=dict(color='rgb(50,205,50)',\n width = 3),\n showlegend=False\n )\n )\n\n if (nr_components > 1):\n gaussian_mixture_x_bg, gaussian_mixture_y_bg = get_coordinates_for_1d_gaussian_mixture(-1, 1,\n weights_bg,\n means,\n sd)\n plot['data'].append(go.Scatter(x=gaussian_mixture_x_bg,\n y=gaussian_mixture_y_bg,\n mode='lines',\n name='mixture (bg) for ' + AB[ab],\n line=dict(color='rgb(50,50,205 )',\n width = 3),\n showlegend=False\n )\n )\n\n\n plot['layout'].update({'title': 'Coupling prior as a gaussian mixture'})\n plot['layout'].update({'xaxis1': {'title': \"coupling values\"}})\n plot['layout'].update({'yaxis1': {'title': \"density\"}})\n plot['layout']['font'] = {'size': 18}\n\n if plot_out is not None:\n plotly_plot(plot, filename=plot_out, auto_open=False)\n else:\n return plot",
"def showEntireDataset(wl_listG, wl_listV, tsvd_graphlet_vectors, kpca_graphlet_gram, tsvd_shortestpath_vectors,\n kpca_shortestpath_gram, classes):\n for i in range(1, 8):\n if (i == 6):\n data_tsvd = tsvd_graphlet_vectors\n data_kpca = kpca_graphlet_gram\n elif (i == 7):\n data_tsvd = tsvd_shortestpath_vectors\n data_kpca = kpca_shortestpath_gram\n else:\n data_tsvd = wl_listV[i - 1]\n data_kpca = wl_listG[i - 1]\n fig = plt.figure(figsize=(15, 15))\n if (i == 6):\n fig.suptitle('Graphlet', fontsize=25)\n elif (i == 7):\n fig.suptitle('Shortest Path', fontsize=25)\n else:\n fig.suptitle(f'Weisfeiler-Lehman {i}', fontsize=25)\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223, projection='3d')\n ax4 = fig.add_subplot(224, projection='3d')\n ax1.title.set_text('2D TruncatedSVD')\n ax2.title.set_text('2D KernelPCA')\n ax3.title.set_text('3D TruncatedSVD')\n ax4.title.set_text('3D KernelPCA')\n ax1.scatter(data_tsvd[:, 0], data_tsvd[:, 1], c=classes)\n ax2.scatter(data_kpca[:, 0], data_kpca[:, 1], c=classes)\n ax3.scatter3D(data_tsvd[:, 0], data_tsvd[:, 1], data_tsvd[:, 2], c=classes)\n ax4.scatter3D(data_kpca[:, 0], data_kpca[:, 1], data_kpca[:, 2], c=classes)\n plt.show()\n print(\"________________________________________________________________________________________\")\n print()",
"def plot_PCA(fig_name):\n dir = \"log/peps mini\"\n pattern = r'(internal|access|lock)\\\\\\d{1,2}.csv$'\n pattern_valid = r'(3|6|9|12).csv$'\n utils.construct_set(dir, pattern, pattern_valid)\n X, y = utils.load_all()\n utils.plot_PCA(X, y)\n plt.title(fig_name)\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '.png')",
"def plot_modelparametercollections(plotname, parametercollection_SF, parametercollection_AGN,\n stat_SF, stat_AGN, AGNcol='blue',SFcol='red', constraintsstr=None,\n fluxratiodictionarylist=None, verbose=True):\n\n Nobj = len(parametercollection_SF)\n if verbose: print(' - Will generate plots of NEOGAL \"PDFs\" for all '+str(Nobj)+' objects in parameter collections')\n for oo in np.arange(Nobj):\n objid = parametercollection_SF[oo]['id']\n if verbose:\n infostr = ' plotting info for '+str(objid)+' ('+str(\"%.5d\" % (oo+1))+' / '+str(\"%.5d\" % Nobj)+') '\n sys.stdout.write(\"%s\\r\" % infostr)\n sys.stdout.flush()\n plotname_obj = plotname.replace('.pdf','_id'+str(objid)+'.pdf')\n # if verbose: print(' - Generating the figure '+plotname_obj)\n figuresize_x = 6\n figuresize_y = 5\n fig = plt.figure(figsize=(figuresize_x,figuresize_y))\n Fsize = 9\n LW = 2\n plt.rc('text', usetex=True) # enabling LaTex rendering of text\n plt.rc('font', family='serif',size=Fsize) # setting text font\n plt.rc('xtick', labelsize=Fsize)\n plt.rc('ytick', labelsize=Fsize)\n plt.clf()\n plt.ioff()\n\n left = 0.10 # the left side of the subplots of the figure\n right = 0.95 # the right side of the subplots of the figure\n bottom = 0.10 # the bottom of the subplots of the figure\n top = 0.90 # the top of the subplots of the figure\n wspace = 1.50 # the amount of width reserved for blank space between subplots\n hspace = 0.50 # the amount of height reserved for white space between subplots\n plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)\n\n Nrows, Ncols = 3, 6\n ylabel = 'Number of NEOGAL SF ('+str(SFcol)+') and AGN ('+str(AGNcol)+') models'\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n Nmodels_SF = float(len(parametercollection_SF[oo]['Zgas']))\n Nmodels_AGN = float(len(parametercollection_AGN[oo]['Zgas']))\n\n titlestr = 'Models satisfying ID='+str(objid)+' cuts: SF='+str(Nmodels_SF)+'; AGN='+str(Nmodels_AGN)\n if (Nmodels_AGN > 0) & (Nmodels_SF > 0):\n Nmodels_ratio = Nmodels_SF/Nmodels_AGN\n titlestr_addition = '; SF/AGN='+str(\"%.4f\" % Nmodels_ratio)\n titlestr = titlestr+titlestr_addition\n\n if fluxratiodictionarylist is not None:\n constraints = fluxratiodictionarylist[oo]\n constraintslist = [key+':['+str(\"%.2f\" % constraints[key][0])+','+str(\"%.2f\" % constraints[key][1])+']'\n for key in constraints.keys() if key not in ['id']]\n\n if len(constraintslist) < 4:\n constraintsstr = '; '.join(constraintslist)\n elif (len(constraintslist) > 3) & (len(constraintslist) < 7):\n constraintsstr = '; '.join(constraintslist[:3])+'\\n'+'; '.join(constraintslist[3:6])\n elif (len(constraintslist) > 6) & (len(constraintslist) < 10):\n constraintsstr = '; '.join(constraintslist[:3])+'\\n'+'; '.join(constraintslist[3:6])+\\\n '\\n'+'; '.join(constraintslist[6:])\n else:\n constraintsstr = '; '.join(constraintslist[:3])+'\\n'+'; '.join(constraintslist[3:6])+\\\n '\\n'+'; '.join(constraintslist[6:9])+'\\n'+'; '.join(constraintslist[9:])\n\n constraintsstr = constraintsstr.replace('10000000000.00','1e10')\n\n titlestr = titlestr+'\\n'+constraintsstr\n # titlestr = r'{\\fontsize{'+str(Fsize)+'pt}{3em}\\selectfont{}{'+titlestr+'\\r}{\\fontsize{'+str((Fsize-2.))+'pt}{3em}\\selectfont{}('+constraintsstr+'}'\n\n # plt.text(x=0.5, y=0.94, s=titlestr, fontsize=Fsize, ha=\"center\", transform=fig.transFigure)\n # plt.text(x=0.5, y=0.88, s=constraintsstr, fontsize=Fsize-2, ha=\"center\", transform=fig.transFigure)\n # fig.title(titlestr,fontsize=Fsize)\n fig.suptitle(titlestr,fontsize=Fsize-2)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Zgas\n plt.subplot(Nrows, Ncols, (1,3))\n\n bindefs = np.array([0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.004, 0.006, 0.008, 0.014,\n 0.017, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07])-0.00001\n\n plotkey = 'Zgas'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xscale('log')\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([0.00001,0.1])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # logUs\n plt.subplot(Nrows, Ncols, (4,6))\n\n bindefs = np.arange(-4.75, -0.25, 0.5)\n\n plotkey = 'logUs'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-5,-0.5])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # xid\n plt.subplot(Nrows, Ncols, (7,9))\n\n bindefs = np.array([0.0, 0.2, 0.4, 0.6])\n\n plotkey = 'xid'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-0.05,0.65])\n plt.ylabel(ylabel)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # nh\n plt.subplot(Nrows, Ncols, (10,12))\n\n bindefs = 10**np.array([1.5, 2.5, 3.5, 4.5])\n\n plotkey = 'nh'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],parametercollection_AGN[oo][plotkey],\n stat_SF[oo][plotkey],stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n plt.xscale('log')\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([10,1e5])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # COCOsol\n plt.subplot(Nrows, Ncols, (13,14))\n\n #bindefs = np.array([0.10, 0.14, 0.20, 0.27, 0.38, 0.52, 0.72, 1.00, 1.40])\n bindefs = np.arange(0.05,1.5,0.06)\n\n plotkey = 'COCOsol'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],None,\n stat_SF[oo][plotkey],None,\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([0.00,1.55])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # mup\n plt.subplot(Nrows, Ncols, (15,16))\n\n bindefs = np.array([0,200,400])\n\n plotkey = 'mup'\n nm.plot_modelparametercollections_addhist(parametercollection_SF[oo][plotkey],None,\n stat_SF[oo][plotkey],None,\n SFcol,AGNcol,LW,bindefs=bindefs,Nbins=None)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-10,410])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # alpha\n plt.subplot(Nrows, Ncols, (17,18))\n\n bindefs = np.array([-2.15,-1.85,-1.55,-1.25,-0.95])\n\n plotkey = 'alpha'\n Nbins = 10\n nm.plot_modelparametercollections_addhist(None,parametercollection_AGN[oo][plotkey],\n None,stat_AGN[oo][plotkey],\n SFcol,AGNcol,LW,bindefs=None,Nbins=Nbins)\n\n plt.xlabel(nm.keylabels(plotkey))\n plt.xlim([-2.2,-0.9])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n plt.savefig(plotname_obj)\n plt.clf()\n plt.close('all')\n # if verbose: print(' - Successfully saved figure to file')\n if verbose: print('\\n done...')",
"def doPCA(df, grouping_variable, features_to_analyse, plot_save_dir=None, PCs_to_keep=10):\n \n data = df[features_to_analyse]\n \n # Normalise the data before PCA\n zscores = data.apply(zscore, axis=0)\n \n # Drop features with NaN values after normalising\n colnames_before = list(zscores.columns)\n zscores.dropna(axis=1, inplace=True)\n colnames_after = list(zscores.columns)\n nan_cols = [col for col in colnames_before if col not in colnames_after]\n if len(nan_cols) > 0:\n print(\"Dropped %d features with NaN values after normalization:\\n%s\" %\\\n (len(nan_cols), nan_cols))\n\n print(\"\\nPerforming Principal Components Analysis (PCA)...\")\n \n # Fit the PCA model with the normalised data\n pca = PCA()\n pca.fit(zscores)\n \n # Project data (zscores) onto PCs\n projected = pca.transform(zscores) # A matrix is produced\n # NB: Could also have used pca.fit_transform()\n\n # Plot summary data from PCA: explained variance (most important features)\n important_feats, fig = pcainfo(pca, zscores, PC=1, n_feats2print=10) \n \n if plot_save_dir:\n # Save plot of PCA explained variance\n PCAplotroot = Path(plot_save_dir) / 'PCA'\n PCAplotroot.mkdir(exist_ok=True, parents=True)\n PCAplotpath = PCAplotroot / ('control_variation_in_' + \n grouping_variable + \n '_PCA_explained.eps')\n savefig(PCAplotpath, tight_layout=True, tellme=True, saveFormat='eps')\n plt.pause(2); plt.close()\n else:\n PCAplotpath=None\n plt.show(); plt.pause(2); plt.close()\n \n # Store the results for first few PCs in dataframe\n projected_df = pd.DataFrame(projected[:,:PCs_to_keep],\n columns=['PC' + str(n+1) for n in range(PCs_to_keep)]) \n \n # Add concatenate projected PC results to metadata\n projected_df.set_index(df.index, inplace=True) # Do not lose video snippet index position\n \n df = pd.concat([df, projected_df], axis=1)\n\n # Plot PCA - Variation in control data with respect to a given variable (eg. date_recording_yyyymmdd)\n \n # 2-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_explained', \n '_PCA_2_components'))\n title = \"2-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=2)\n plt.pause(2); plt.close()\n \n # 3-PC\n if plot_save_dir:\n PCAplotpath = Path(str(PCAplotpath).replace('_PCA_2_components', \n '_PCA_3_components'))\n title = \"3-Component PCA: Control variation in\\n\\\n '{0}'\".format(grouping_variable) + \" (Top256 features)\"\n plotPCA(df, grouping_variable, var_subset=None, savepath=PCAplotpath, \n title=title, n_component_axes=3, rotate=False)\n plt.pause(2)\n \n return df",
"def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])"
] | [
"0.61112005",
"0.6107796",
"0.605294",
"0.6041272",
"0.59383196",
"0.5867305",
"0.5829947",
"0.57980055",
"0.57948554",
"0.5778952",
"0.5778892",
"0.5736685",
"0.5724429",
"0.5701874",
"0.5699105",
"0.56431276",
"0.5636154",
"0.5602408",
"0.56010866",
"0.55729365",
"0.55693865",
"0.55679923",
"0.5561978",
"0.55515563",
"0.5539124",
"0.55370384",
"0.55328894",
"0.5501753",
"0.54895365",
"0.5488392"
] | 0.71734154 | 0 |
Compute the mean square error (mse) and the r squared error (r2) of the predicted set of images. | def mse_r2(true, predicted):
# Reshaping set of images
# n_imgs, nx, ny = true.shape
# true = np.reshape(true, (n_imgs, nx*ny))
# predicted = np.reshape(predicted, (n_imgs, nx*ny))
nx = 33
ny = 33
# Compute MSE
se = np.sum((true - predicted)**2, axis=1)
mse = se*(nx*ny)**-1
# Compute R squared
mean = np.mean(true, axis=1)
r2 = 1 - se*np.sum((true - np.expand_dims(mean, axis=1))**2, axis=1)**-1
return mse, r2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rmse(actual: np.ndarray, predicted: np.ndarray):\n return np.sqrt(np.mean(np.square(_error(actual, predicted))))",
"def calculate_mse(img0, img1):\n mse = skm.mean_squared_error(img0, img1)\n return mse",
"def mse(img1, img2):\n err = (np.square(img1 - img2)).mean(axis=None)\n # return the MSE, the lower the error, the more \"similar\"\n # the two images are\n return err",
"def rmse(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return np.sqrt(F.mse_loss(y_pred, y_true).cpu().item())",
"def rmse(self):\n y_pred, y_true = self._finalize_labels_and_prediction()\n\n return np.sqrt(F.mse_loss(y_pred, y_true).cpu().item())",
"def get_rmse(self, y_true, y_pred):\r\n return np.sqrt(np.mean((np.array(y_true) - np.array(y_pred)) ** 2))",
"def getRMSE(image1, image2):\n im1 = readImage(image1, grayscale=False)\n im2 = readImage(image2, grayscale=False)\n return np.sqrt( ((im1 - im2)**2).mean() )",
"def rmse(true, predictions):\n true = np.array(true)\n predictions = np.array(predictions)\n return mean_squared_error(true, predictions) ** 0.5",
"def rmse(y_true, y_pred):\n return np.sqrt(metrics.mean_squared_error(y_true, y_pred))",
"def rmse(self):\n return (self.model_error()**2).mean()**.5",
"def rmse(actual, predicted):\n rms = (actual-predicted)**2\n\n # Returning the sqaure root of the root mean square\n return float(np.sqrt(rms.mean()))",
"def rmse(y_true, y_pred):\n\treturn backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))",
"def evaluate_rmse(y_true, y_pred):\n\n mse_eval = mean_squared_error(y_true, y_pred)\n\n rmse_eval = np.sqrt(mse_eval)\n\n return rmse_eval",
"def rmse(y_true, y_pred):\n return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))",
"def run_rmse_check(self):\n\n # Shift the intensities to compare\n obs = self.obs.data - self.obs.data.mean()\n exp = self.exp.data - self.exp.data.mean()\n\n # Experimental RMSE2 in real space\n rmse_real = (obs - exp)\n rmse_real = rmse_real**2\n\n # Experimental RMSE2 in reciprocal space\n ftdiff = np.fft.fft2(obs.data,norm='ortho') - np.fft.fft2(exp.data,norm='ortho')\n rmse_fourier = np.real(np.abs(ftdiff)**2.)\n\n # Return\n rmse_ij = self.obs.deepcopy()\n rmse_ij.data = rmse_real\n\n rmse_kl = ModifiedImage(rmse_fourier)\n return rmse_ij, rmse_kl",
"def rmse(y_true: np.ndarray, y_pred: np.ndarray):\n return np.sqrt(np.mean(np.power(y_true - y_pred, 2)))",
"def calcRMSE(labelsAndPreds):\n meanOfSqErrors = labelsAndPreds.map(lambda (x,y): squaredError(x,y)).mean()\n \n return math.sqrt(meanOfSqErrors)",
"def MSE(actual, noisy):\n mean_squared_error(actual, noisy)",
"def mean_squared_error(y_true, y_pred):\n mse = np.mean((y_true - y_pred)**2)\n return mse",
"def eval_metrics(actual, pred):\r\n rmse = np.sqrt(mean_squared_error(actual, pred))\r\n mae = mean_absolute_error(actual, pred)\r\n r2 = r2_score(actual, pred)\r\n return rmse, mae, r2",
"def mean_squared_error(y_true, y_pred):\n\tmse = np.mean(np.power(y_true, y_pred, 2))\n\treturn mse",
"def mse(observed, predicted):\n return np.sqrt(np.mean((observed - predicted)**2))",
"def stderr(predicted, actual):\n return np.sqrt(mse(predicted, actual))",
"def mse(self):\n xs, ys = self.R.nonzero()\n predicted = self.full_matrix()\n error = 0\n for x, y in zip(xs, ys):\n # print(predicted[x, y], self.R[x, y] )\n error += pow(self.R[x, y] - predicted[x, y], 2)\n return np.sqrt(error)",
"def mse(image1, image2):\n err = np.sum((image1 - image2) ** 2)\n err /= float(image1.shape[0] * image1.shape[1])\n # return the MSE, the lower the error, the more \"similar\"\n # the two images are\n return err",
"def rmse(y_true, y_pred): # -> Any:\n ...",
"def _compute_rmse(self, data):\n actual = data.rating.values\n pred = self._predict_all(data)\n rmse = np.sqrt(np.sum((actual - pred) **2) /len(pred))\n return rmse",
"def mse(y_pred, y):\n return np.mean((y - y_pred)**2)",
"def mean_squared_error(y_true, y_pred):\n mse = np.mean(np.power(y_true - y_pred, 2))\n return mse",
"def rmse_metric(actual, predicted):\r\n sum_error = 0.0\r\n for i in range(len(actual)):\r\n prediction_error = predicted[i] - actual[i]\r\n sum_error += (prediction_error ** 2)\r\n mean_error = sum_error / float(len(actual))\r\n return sqrt(mean_error)"
] | [
"0.7332509",
"0.73320276",
"0.72431654",
"0.72006863",
"0.72006863",
"0.7062214",
"0.7058034",
"0.7046866",
"0.7036136",
"0.70209587",
"0.7015327",
"0.7009079",
"0.69375265",
"0.6931622",
"0.69304883",
"0.6926234",
"0.6923181",
"0.6911265",
"0.6903844",
"0.68933755",
"0.6873209",
"0.6864627",
"0.6858459",
"0.6848163",
"0.68444175",
"0.68435484",
"0.68334067",
"0.6814039",
"0.68126357",
"0.680239"
] | 0.8477247 | 0 |
When select file button is pressed a dialog of filenames is presented to the user. The entire text of the selected files is then added to the selected case. | def add_files_to_case(self):
index_list = self.ui.tableWidget.selectionModel().selectedIndexes()
rows = []
for i in index_list:
rows.append(i.row())
rows = list(set(rows)) # duplicate rows due to multiple columns
if len(rows) == 0:
return
selected_files = []
for r in rows:
selected_files.append(self.allfiles[r])
msg = ""
for file_ in selected_files:
msg += self.add_file_to_case(file_)
# Update messages and table widget
self.get_files()
self.show_or_hide_rows()
Message(self.app, _("File added to case"), msg, "information").exec()
self.parent_textEdit.append(msg)
self.app.delete_backup = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def selectFiles(self):\n\n filenames = []\n self.fileIDs = \"\"\n self.caseIDs = \"\" # clears any case selections\n cur = self.settings['conn'].cursor()\n cur.execute(\"select id, name, status from source\")\n result = cur.fetchall()\n for row in result:\n filenames.append({'id': row[0], 'name': row[1], 'status': row[2]})\n self.fileIDs += \",\" + str(row[0])\n if len(self.fileIDs) > 0:\n self.fileIDs = self.fileIDs[1:]\n\n Dialog_selectfile = QtGui.QDialog()\n ui = Ui_Dialog_selectfile(filenames)\n ui.setupUi(Dialog_selectfile, \"Select file(s) to view\", \"many\")\n ok = Dialog_selectfile.exec_()\n if ok:\n tmp_IDs = \"\"\n selectedFiles = ui.getSelected() # list of dictionaries\n for row in selectedFiles:\n tmp_IDs += \",\" + str(row['id'])\n if len(tmp_IDs) > 0:\n self.fileIDs = tmp_IDs[1:]",
"def file_select(self):\r\n # select ui file and change file extension to .py\r\n self.lineEdit_Ui_file_selection.clear()\r\n self.lineEdit_Py_file_name.clear()\r\n options = QtWidgets.QFileDialog.Options()\r\n options |= QtWidgets.QFileDialog.DontUseNativeDialog\r\n self.fileName, _ = QtWidgets.QFileDialog.getOpenFileName(\r\n None,\r\n \"QFileDialog.getOpenFileName()\",\r\n \"\",\r\n \"UI Files (*.ui);;All Files (*)\",\r\n options=options)\r\n py_Filename = self.fileName[:-2]\r\n py_Filename = py_Filename + \"py\"\r\n self.lineEdit_Ui_file_selection.insert( self.fileName )\r\n if self.fileName:\r\n self.lineEdit_Py_file_name.insert( py_Filename )",
"def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)",
"def browse( self ):\n Tk.Tk().withdraw()\n filename = askopenfilename( initialdir = self.initialdir,\n title = self.title ,\n filetypes = self.filetypes )\n\n if filename == \"\":\n return\n\n self.set_text( filename )\n #rint( f\"get_text = {self.get_text()}\", flush = True )",
"def askopenfilename():\n\n file_opt = options = {}\n options['defaultextension'] = '.*'\n options['initialdir'] = 'User\\\\'\n options['initialfile'] = ''\n options['parent'] = root\n options['title'] = 'choose file'\n options['multiple'] = 1\n\n # get filename\n filename = tk.filedialog.askopenfilename(**file_opt)\n\n if filename:\n self.sourcefile = filename\n if len(filename) is 1:\n file_path_var.set(filename)\n else:\n file_path_var.set(\n \"Multiple files, including {}\".format(filename[0]))",
"def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)",
"def selectCases(self):\n\n casenames = []\n self.fileIDs = \"\"\n self.caseIDs = \"\" # default for all cases and allows the file selection search method to occur\n cur = self.settings['conn'].cursor()\n cur.execute(\"select id, name, status from cases\")\n result = cur.fetchall()\n for row in result:\n casenames.append({'id': row[0], 'name': row[1], 'status': row[2]})\n\n Dialog_selectcase = QtGui.QDialog()\n ui = Ui_Dialog_selectfile(casenames)\n ui.setupUi(Dialog_selectcase, \"Select case(s) to view\", \"many\")\n ok = Dialog_selectcase.exec_()\n if ok:\n tmp_IDs = \"\"\n selectedCases = ui.getSelected() # list of dictionaries\n for row in selectedCases:\n tmp_IDs += \",\" + str(row['id'])\n if len(tmp_IDs) > 0:\n self.caseIDs = tmp_IDs[1:]",
"def browse_1(self):\r\n file = QFileDialog()\r\n filter_name = \"Csv files (*.csv);;Text files (*.txt);;Xls files (*.xls);; Xlsx files (*.xlsx)\"\r\n file.setNameFilter(filter_name)\r\n if file.exec():\r\n filenames = file.selectedFiles()\r\n self.browseLine.setText(str(filenames[0]))",
"def callDialog(self):\n self.pathTuple = filedialog.askopenfilenames(filetypes=[(\"Excel files\", \".xlsx .xls .xlsm .xlsb\")])\n self.fileNames = [basename(path.abspath(name)) for name in self.pathTuple]",
"def choosefilenamedsm(self, e):\n filename = QFileDialog.getOpenFileName(self.dlg,\"Select TIFF file\",\n \"/home\", \"TIF files (*.tif);;All files (*.*)\")\n if filename:\n self.dlg.leDem.setText(filename)",
"def buttonClick(self):\n \n self.fpath=filedialog.askopenfilename()\n self.label_fpath.config(text=self.fpath)\n self.err_label.config(text='')\n pass",
"def select_files(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(self.parent,\n \"File Export\",\n os.path.expanduser('~/'),\n \"Ensemble Files (*.ens, *.bin);;Binary Files (*.bin);;All Files (*)\",\n options=options)\n if files:\n # Store the list of results\n self.selected_files = files\n\n # Analyze the files\n self.analyze_files()",
"def send_file_name():\n if value.get() == \"----------------------\":\n messagebox.showinfo(\"Choose File\", \"Please choose a file to edit.\", parent=app_frame)\n return\n elif len(entries) != 0:\n messagebox.showinfo(\"Warning!\", \"You must first close the current file!\", parent=app_frame)\n return\n\n events = get_file(value.get())\n # Call display_lr_assignments() and send events file to be displayed in the application window\n display_lr_assignments(events)",
"def choose_file(self):\n pass",
"def open_file(self: object) -> None:\n self.file = filedialog.askopenfilename(\n initialdir= os.getcwd(),title=\"Select File\",filetypes=(\n (\"Text Files\", \"*.txt\"),(\"all files\",\"*.*\")))\n\n if self.file:\n messagebox.showinfo(\"Selected file\", \"You have selected %s\"%(\n self.file))",
"def getFileName(self, textEntry):\n textEntry.setText(QtGui.QFileDialog.getOpenFileName())\n textEntry.emit(QtCore.SIGNAL('FILE_SELECTED'))",
"def select_file() -> True:\n current_directory = os.getcwd()\n selected_file = eg.fileopenbox(title=EG_TITLE+': Open a file',\n default=os.path.join(current_directory, \"..\"),\n filetypes=\"*.txt,*.py\")\n print(f\"Selected file: {os.path.basename(selected_file)}\")\n print(f\"In directory: {os.path.dirname(selected_file)}\")\n return True",
"def choosefile():\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**options)\r\n #print filename, '*****'\r\n\r\n # open file on your own\r\n if filename:\r\n #return open(filename, 'r')\r\n tasks.upload_chosen = filename",
"def _open_files(self):\n file_names = filedialog.askopenfilenames(initialdir=self.current_directory, title = \"Select file\")\n if(file_names): self.current_directory = os.path.dirname(file_names[0])\n if(len(file_names) == 1):\n file_names = file_names[0]\n return file_names",
"def choosefile(self, diagtitle):\r\n root = Tk()\r\n root.withdraw()\r\n sfile = tkFileDialog.askopenfilename(\r\n parent=root,\r\n filetypes = [('.TXT files', '.txt')],\r\n title=diagtitle )\r\n return sfile",
"def tag_file_chooser(self):\n filename_list = tk.filedialog.askopenfilenames()\n self._tag_path_var.set(filename_list)",
"def planet_clicked(self, filename):\n self.chosen_filename = filename\n self.accept()",
"def on_File1_toolButton_clicked(self):\n my_file = QtWidgets.QFileDialog.getOpenFileName(self, u'打开文件', '/')\n if my_file[0]:\n self.File1_lineEdit.setText(my_file[0])\n else:\n QtWidgets.QMessageBox.warning(self, u'警告', u'请选择输入文件')",
"def action(self):\n self.filename = self.ui_SelectedName.text()\n if self.filename == \"\" or self.filename is None:\n return\n\n dirname = fs.path.forcedir(\".\")\n if self.wparm is not None:\n dirname = self.selected_dir\n if dirname.startswith(self.active_url):\n filename = \"{}{}\".format(fs.path.forcedir(self.active_url), self.filename)\n else:\n # We can't use fs.path.join and also not fs.path.abspath because of protocol url\n filename = \"{}{}{}\".format(\n fs.path.forcedir(self.active_url),\n fs.path.forcedir(dirname),\n self.filename,\n )\n filename = filename.replace(fs.path.forcedir(\".\"), \"\")\n if self.show_save_action and not self.show_dirs_only:\n self.save_settings()\n self.filename = self.ui_SelectedName.text()\n if self.filename == \"\":\n return\n info = self.get_info(fs.path.split(filename)[1], namespaces=None)\n if info is not None and info.is_dir:\n sel = QtWidgets.QMessageBox.warning(\n self,\n \"Warning\",\n \"You can't create a file with this name: {0}\".format(self.filename),\n QtWidgets.QMessageBox.No,\n )\n elif info is not None and info.is_file:\n sel = QtWidgets.QMessageBox.question(\n self,\n \"Replace Filename\",\n \"This will replace the filename: {0}. Continue?\".format(\n self.filename\n ),\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n )\n if sel == QtWidgets.QMessageBox.Yes:\n self.filename = filename\n self.close()\n else:\n pass\n else:\n self.filename = filename\n self.close()\n else:\n self.filename = filename\n self.close()",
"def selection_name(self):\n if not self.show_save_action and not self.show_dirs_only:\n self.ui_Action.setEnabled(False)\n self.filename = self.ui_SelectedName.text()\n if self.ui_DirList.currentText() == self.active_url:\n dirname = \"\"\n else:\n dirname = self.ui_DirList.currentText()\n if self.wparm is not None:\n _dirname = fs.path.forcedir(\".\")\n if self.wparm.value == \"{}{}\".format(_dirname, self.wparm.text):\n dirname = fs.path.dirname(\"{}{}\".format(_dirname, self.wparm.text))\n else:\n dirname = self.selected_dir\n if not self.show_dirs_only:\n _filename = fs.path.combine(dirname, self.filename)\n _file_names = [list(name)[0] for name in self.file_list_items]\n if dirname == fs.path.forcedir(\".\"):\n _file_names = [\n fs.path.combine(dirname, list(name)[0])\n for name in self.file_list_items\n ]\n if _filename in _file_names:\n self.ui_Action.setEnabled(True)\n index = _file_names.index(_filename) + len(self.dir_list_items)\n self.ui_FileList.selectRow(index)\n else:\n if not self.show_save_action:\n self.ui_Action.setEnabled(False)\n self.ui_FileList.clearSelection()",
"def on_open_file(self):\n return tkFileDialog.askopenfilename(\n filetypes=[('default', '*.txt'), ('All files', '*.*')])",
"def selection_file_type(self):\n self.selection_directory()\n self.ui_FileList.clearSelection()\n if not self.show_save_action:\n self.ui_SelectedName.setText(None)\n if self.show_save_action:\n text = self.ui_SelectedName.text()\n new_text = text.split(\".\")[0]\n self.ui_SelectedName.setText(new_text)",
"def on_open(self):\n\n ftypes = [('CSV', '.csv'), ('JSON', '.json'), ('All files', '*')]\n dlg = filedialog.Open(self, filetypes=ftypes)\n\n absolute_file_path = dlg.show()\n \n if absolute_file_path:\n # extract the file name from the absolute path\n file_name = absolute_file_path.split('/')[len(absolute_file_path.split('/')) - 1]\n \n # update the label text\n self.selected_file_name.configure(text=file_name)\n\n self.__set_full_path_of_file(absolute_file_path)\n else:\n # update the label text\n self.selected_file_name.configure(text=\"<Selected file name>\")\n\n self.__set_full_path_of_file(None)",
"def select_files(self):\n pass",
"def invoke (self, context, event):\n context.window_manager.fileselect_add (self)\n return {'RUNNING_MODAL'}"
] | [
"0.7599809",
"0.7312092",
"0.71427107",
"0.7118748",
"0.70763516",
"0.7027887",
"0.70081896",
"0.6910631",
"0.6909173",
"0.6898093",
"0.6897799",
"0.6894666",
"0.6867969",
"0.6848958",
"0.684623",
"0.68427694",
"0.67669785",
"0.67478716",
"0.674199",
"0.6724243",
"0.6709323",
"0.668337",
"0.6678219",
"0.6655522",
"0.66514313",
"0.6623832",
"0.6615977",
"0.6610526",
"0.65743077",
"0.6570398"
] | 0.74605596 | 1 |
The entire text of the selected file is added to the selected case. Also, a nontext file is linked to the case here. The text positions will be 0 and 0. | def add_file_to_case(self, file_):
cur = self.app.conn.cursor()
text_len = 0
if file_[2] is not None:
text_len = len(file_[2]) - 1
link = {'caseid': self.case['caseid'], 'fid': file_[0], 'pos0': 0,
'pos1': text_len, 'owner': self.app.settings['codername'],
'date': datetime.datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S"), 'memo': ""}
# Check for an existing duplicated linked file first
cur.execute("select * from case_text where caseid = ? and fid=? and pos0=? and pos1=?",
(link['caseid'], link['fid'], link['pos0'], link['pos1']))
result = cur.fetchall()
if len(result) > 0:
msg = _("This file has already been linked to this case ") + file_[1] + "\n"
return msg
# Even non-text files can be assigned to the case here
sql = "insert into case_text (caseid, fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)"
cur.execute(sql, (link['caseid'], link['fid'], link['pos0'], link['pos1'],
link['owner'], link['date'], link['memo']))
self.app.conn.commit()
msg = file_[1] + _(" added to case.") + "\n"
# Update table entry assigned to Yes
rows = self.ui.tableWidget.rowCount()
for row in range(0, rows):
fid = int(self.ui.tableWidget.item(row, 0).text())
if fid == file_[0]: # file_[0] is fid
item = QtWidgets.QTableWidgetItem(_("Yes"))
item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
self.ui.tableWidget.setItem(row, 2, item)
return msg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mark(self):\n\n if self.selected_text_file is None:\n return\n # selectedText = self.textBrowser.textCursor().selectedText()\n pos0 = self.ui.textBrowser.textCursor().selectionStart()\n pos1 = self.ui.textBrowser.textCursor().selectionEnd()\n if pos0 == pos1:\n return\n # add new item to case_text list and database and update GUI\n item = {'caseid': self.case['caseid'],\n 'fid': self.selected_text_file[ID],\n 'pos0': pos0, 'pos1': pos1,\n 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().astimezone().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'memo': \"\"}\n self.case_text.append(item)\n self.highlight()\n\n cur = self.app.conn.cursor()\n # Check for an existing duplicated linkage first\n cur.execute(\"select * from case_text where caseid=? and fid=? and pos0<=? and pos1>=?\",\n (item['caseid'], item['fid'], item['pos0'], item['pos1']))\n result = cur.fetchall()\n if len(result) > 0:\n Message(self.app, _(\"Already Linked\"),\n _(\"This segment has already been linked to this case\"), \"warning\").exec()\n return\n cur.execute(\"insert into case_text (caseid,fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)\",\n (\n item['caseid'], item['fid'], item['pos0'], item['pos1'], item['owner'], item['date'], item['memo']))\n self.app.conn.commit()\n # File may not be assigned in the table widget as Yes\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False",
"def add_files_to_case(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n selected_files = []\n for r in rows:\n selected_files.append(self.allfiles[r])\n msg = \"\"\n for file_ in selected_files:\n msg += self.add_file_to_case(file_)\n # Update messages and table widget\n self.get_files()\n self.show_or_hide_rows()\n Message(self.app, _(\"File added to case\"), msg, \"information\").exec()\n self.parent_textEdit.append(msg)\n self.app.delete_backup = False",
"def load_case_text(self):\n\n self.case_text = []\n if self.selected_text_file is None:\n return\n cur = self.app.conn.cursor()\n cur.execute(\"select caseid, fid, pos0, pos1, owner, date, memo from case_text where fid = ? and caseid = ?\",\n [self.selected_text_file[ID], self.case['caseid']])\n result = cur.fetchall()\n for row in result:\n self.case_text.append({'caseid': row[0], 'fid': row[1], 'pos0': row[2],\n 'pos1': row[3], 'owner': row[4], 'date': row[5], 'memo': row[6]})",
"def automark(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n selected_files = []\n filenames = \"\"\n for r in rows:\n if self.allfiles[r][2] is not None and self.allfiles[r][2] != \"\":\n selected_files.append(self.allfiles[r])\n filenames += self.allfiles[r][1] + \" \"\n ui_se = DialogGetStartAndEndMarks(self.case['name'], filenames)\n ok = ui_se.exec()\n if not ok:\n return\n start_mark = ui_se.get_start_mark()\n end_mark = ui_se.get_end_mark()\n if start_mark == \"\" or end_mark == \"\":\n Message(self.app, _(\"Warning\"), _('Cannot have blank text marks'), \"warning\").exec()\n return\n msg = _(\"Auto assign text to case: \") + self.case['name']\n msg += _(\"\\nUsing \") + start_mark + _(\" and \") + end_mark + _(\"\\nIn files:\\n\")\n msg += filenames\n warning_msg = \"\"\n already_assigned = \"\"\n entries = 0\n cur = self.app.conn.cursor()\n for f in selected_files:\n cur.execute(\"select name, id, fulltext, memo, owner, date from source where id=?\",\n [f[0]])\n currentfile = cur.fetchone()\n text = currentfile[2]\n text_starts = [match.start() for match in re.finditer(re.escape(start_mark), text)]\n text_ends = [match.start() for match in re.finditer(re.escape(end_mark), text)]\n # Add new code linkage items to database\n already_assigned = \"\"\n for start_pos in text_starts:\n text_end_iterator = 0\n try:\n while start_pos >= text_ends[text_end_iterator]:\n text_end_iterator += 1\n except IndexError:\n text_end_iterator = -1\n warning_msg += _(\"Auto assign. Could not find an end mark: \") + f[1] + \" \" + end_mark + \"\\n\"\n if text_end_iterator >= 0:\n pos1 = text_ends[text_end_iterator]\n item = {'caseid': self.case['caseid'], 'fid': f[0],\n 'pos0': start_pos, 'pos1': pos1,\n 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().astimezone().strftime(\"%Y-%m-%d %H:%M:%S\"), 'memo': \"\"}\n # Check if already assigned to case_text\n sql = \"select id from case_text where caseid=? and fid=? and pos0=? and pos1=?\"\n cur.execute(sql, [item['caseid'], item['fid'], item['pos0'], item['pos1']])\n res = cur.fetchone()\n if res is None:\n sql = \"insert into case_text (caseid,fid,pos0,pos1,owner,date,memo) values(?,?,?,?,?,?,?)\"\n cur.execute(sql, (item['caseid'], item['fid'], item['pos0'], item['pos1'],\n item['owner'], item['date'], item['memo']))\n entries += 1\n self.app.conn.commit()\n else:\n already_assigned = _(\"\\nAlready assigned.\")\n # Update messages and table widget\n self.get_files()\n self.fill_table()\n # Text file is loaded in browser then update the highlights\n self.load_case_text()\n self.highlight()\n msg += \"\\n\" + str(entries) + _(\" sections found.\")\n Message(self.app, _(\"File added to case\"), msg + \"\\n\" + warning_msg + \"\\n\" + already_assigned).exec()\n self.parent_textEdit.append(msg)\n self.parent_textEdit.append(warning_msg)\n self.app.delete_backup = False",
"def addContent(text):",
"def text_reader(file_path,text_edit):\n\n parent.ui.textEdit_design_image.clear()\n path = os.getcwd()+'\\media\\docs' + file_path\n f = open(path,'r');\n for x in f:\n text_edit.insertPlainText(x)",
"def highlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n format_ = QtGui.QTextCharFormat()\n cursor = self.ui.textBrowser.textCursor()\n for item in self.case_text:\n try:\n cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.MoveMode.KeepAnchor)\n format_.setFontUnderline(True)\n format_.setUnderlineColor(QtCore.Qt.GlobalColor.red)\n cursor.setCharFormat(format_)\n except Exception as err:\n msg = \"highlight, text length \" + str(len(self.ui.textBrowser.toPlainText()))\n msg += \"\\npos0:\" + str(item['pos0']) + \", pos1:\" + str(item['pos1'])\n msg += \"\\n\" + str(err)\n logger.debug(msg)",
"def highlightingTextInFile():\n savingFilePDF = re.sub('\\t', '', item_text[0] + \".pdf\")\n doc = fitz.open(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n page = doc[0]\n\n with open(newTextFile, \"r\") as file2:\n time.sleep(0.5)\n text1 = file2.read()\n\n # Search for the text in the PDF in order to highlight it\n text_instances = page.searchFor(text1, hit_max=200)\n\n # Loop though the text and add highlight to the text in the HighlightedText.txt file\n for inst in text_instances:\n print(inst, type(inst))\n page.addHighlightAnnot(inst)\n\n try:\n doc.save(gradedFilesFolder + \"\\\\\" + \"Corrected - \" + savingFilePDF,\n garbage=4, deflate=True, clean=True)\n doc.close()\n os.remove(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n\n except RuntimeError as error:\n print(\"PDF file may be open\" + str(error))",
"def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)",
"def set_text(self):\n\n if not self.text and len(self.get_files()) > 0:\n self.text = self.files[0].get_title()\n # if \"_\" in str(self.text):\n if re.match(\"[0-9]_[0-9]\", self.text) is not None:\n self.text = self.files[0].get_parent()[\"title\"]\n else:\n try: \n int(self.text)\n # is a simple int\n if int(self.text) > 20:\n self.text = self.files[0].get_parent()[\"title\"]\n except Exception as e:\n # not a simple int\n # do nothing cause probably set already\n pass\n self.text = self.text.replace(\"_\", \" \")\n self.set_keywords()",
"def file_open(self):\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File')\n\n with open(filename, 'r', encoding=\"utf8\") as file:\n self.file_cont = file.readlines()\n self.textToAnalize.setText(''.join(self.file_cont))",
"def open_file(self):\n files = [('Text Document', '*.txt'), ('PDF Document', '*.pdf'), ('Word Document', '*.docx')]\n text_file = askopenfile(mode='r', title=\"Open your file\", filetypes=files,\n defaultextension=files)\n if text_file is not None:\n self.file_path = text_file.name\n text_inside = self.file.load_file(text_file.name)\n text_file.close()\n self.textbox.delete(\"1.0\", tk.END)\n self.textbox.insert(\"1.0\", text_inside)\n self.text = self.textbox",
"def find_file(self):\n selected_file = tk.filedialog.askopenfilename(initialdir='/', title='Select File',\n filetypes=(('txt Files', '*.txt'), ('All Files', '*.*')))\n self.markov_chain.add_file(selected_file)",
"def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)",
"def saveTexts(self):\n if self.currentItem is not None:\n # Get name of selected file in the List\n currentItempath = path.join(self.workDir, self.currentItem)\n # Pure-text annotation\n filepath_cor = currentItempath + TEXT_ANNO_EXT\n cor_text = self.TextCorr.GetValue().strip()\n self.editFile(filepath_cor, cor_text, self.PlayList.setTextAnno)\n # XML annotation\n filepath_xcor = currentItempath + XML_ANNO_EXT\n xcor_text = self.XMLCorr.GetValue().strip()\n self.editFile(filepath_xcor, xcor_text, self.PlayList.setXMLAnno)\n # Command annotation\n filepath_cmd = currentItempath + CMD_ANNO_EXT\n cmd_text = self.CorrCommand.GetValue().strip()\n self.editFile(filepath_cmd, cmd_text, self.PlayList.setCommandAnno)\n # Annotator comments\n filepath_nfo = currentItempath + COMMENT_EXT\n nfo_text = self.Comments.GetValue().strip()\n self.editFile(filepath_nfo, nfo_text, None)",
"def WriteText( self, text ) :\n # Always adjust the insertion point BEFORE the insertion.\n self.folderTxtCtl.SetInsertionPointEnd()\n self.folderTxtCtl.WriteText( text )",
"def save_text(self):\n content = self.get_content()\n if content != '':\n self.text.append((content, self.context, self.ancestor))",
"def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n text = f.read()\n f.close()\n self.add_string(text)",
"def row_selection_changed(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n self.ui.textBrowser.setText(\"\")\n self.selected_text_file = None\n index = rows[0]\n # A fulltext source is displayed if fulltext is present\n # If the mediapath is None, this represents an A/V transcribed file\n self.ui.label_file.setText(_(\"Displayed file: \") + self.allfiles[index][NAME])\n if self.allfiles[index][FULLTEXT] != \"\" and self.allfiles[index][FULLTEXT] is not None:\n self.selected_text_file = self.allfiles[index]\n self.ui.textBrowser.setText(self.allfiles[index][FULLTEXT])\n self.load_case_text()\n self.unlight()\n self.highlight()\n return",
"def add_transcription_textgrid(self):\n filename, _ = QFileDialog.getOpenFileName(None, 'Open file', '', \"(*.TextGrid)\")\n if os.path.isfile(filename):\n add_transcription_data(\n nwbfile=self.model.nwb,\n path_transcription=filename,\n tr_type='textgrid'\n )\n self.action_vis_transcription.setEnabled(True)\n self.transcriptionadd_tools_menu.setEnabled(False)\n # Write changes to NWB file\n self.model.io.write(self.model.nwb)\n print('Transcription data added successfully!')",
"def get_sample_text(self, sample_file):\n text = get_text_pdf(sample_file)\n self.ui.plainTextEdit.appendPlainText(text)",
"def text(self):\n for mt in Config.mimes_rtf:\n if mt in self.sub_type:\n self.add_file_string('Rich Text file')\n # TODO: need a way to convert it to plain text\n self.force_ext('.txt')\n return\n for mt in Config.mimes_ooxml:\n if mt in self.sub_type:\n self.add_file_string('OOXML File')\n self._ooxml()\n return\n self.add_file_string('Text file')\n self.force_ext('.txt')",
"def open_file():\n filepath = askopenfilename(\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")]\n )\n if not filepath:\n return\n txt_edit.delete(1.0, tk.END)\n with open(filepath, \"r\") as input_file:\n text = input_file.read()\n txt_edit.insert(tk.END, text)\n window.title(f\"Simple Text Editor - {filepath}\")",
"def loadText(self,filePath):\n ins = file(filePath,'r')\n reComment = re.compile(r\"#.*\")\n reSection = re.compile(r'@ +(srcmod|replace)',re.M)\n reReplace = re.compile(r\"(\\w[-\\w ']+)\\s*:\\s*(.+)\")\n reNewIds = re.compile(r\",\\s*\")\n mode = None\n for line in ins:\n line = reComment.sub('',line.strip())\n maSection = reSection.match(line)\n if maSection:\n mode = maSection.group(1)\n elif not line: #--Empty/comment line\n pass\n elif mode == 'srcmod':\n self.srcModName = line\n elif mode == 'replace':\n maReplace = reReplace.match(line)\n if not maReplace: continue\n oldId = maReplace.group(1)\n self.newIds[oldId.lower()] = reNewIds.split(maReplace.group(2))\n ins.close()",
"def PutText(self, text):\n if not self.HasSelection():\n cpos = self.GetCurrentPos()\n lepos = self.GetLineEndPosition(self.GetCurrentLine())\n if self.GetOvertype() and cpos != lepos:\n self.CharRight()\n self.DeleteBack()\n self.AddText(text)\n else:\n self.ReplaceSelection(text)",
"def open_dialog1(self):\n file_name = QtGui.QFileDialog.getOpenFileName()\n self.change_task(\"Computing recency vectors of text 1\")\n txt = self.controller.process_raw_text(file_name, LEFT_TEXT)\n self.end_task()\n self._window.column1.align_disp.set_text(txt)",
"def os_open_txt_file( cls, txt_file ):\n cls.file_text_editor.os_call( txt_file )",
"def add_case(self, name):\n mod = self._mod\n std = mod.give_aster_study()\n prefs = aster_s_gui.AsterPreferences()\n \n case = std.add_case(self.find_new_name(std, name))\n case.use(aster_s.CommFile(self.get_str(\"command-file\")))\n case.use(aster_s.SMeshEntry(self.give_field(\"mesh\").node.entry))\n if prefs.get(aster_s_gui.InteractiveFollowUp):\n case.use(aster_s.InteractivFollowUp())\n if prefs.get(aster_s_gui.SaveBaseResult):\n case.use(aster_s.HasBaseResult())\n mod.update()\n #salome.sg.updateObjBrowser(0)",
"def new_file():\n text.delete('1.0', tk.END)",
"def newTestTxt(self):\n self.newTab( extension = TestTxt.TYPE, repoDest=UCI.REPO_UNDEFINED )"
] | [
"0.67745167",
"0.66048443",
"0.64684236",
"0.64509314",
"0.6278228",
"0.6071602",
"0.60579133",
"0.5877363",
"0.5813676",
"0.57943785",
"0.5788398",
"0.57818663",
"0.5736257",
"0.5703492",
"0.5703116",
"0.5663151",
"0.56324875",
"0.5624252",
"0.55971783",
"0.55862516",
"0.55659765",
"0.5564395",
"0.5536917",
"0.5513646",
"0.55035365",
"0.54935265",
"0.5480674",
"0.5477426",
"0.5464977",
"0.54432726"
] | 0.6665885 | 1 |
Remove selected files from case. | def remove_files_from_case(self):
index_list = self.ui.tableWidget.selectionModel().selectedIndexes()
rows = []
for i in index_list:
rows.append(i.row())
rows = list(set(rows)) # duplicate rows due to multiple columns
if len(rows) == 0:
return
selected_files = []
remove_msg = ""
for r in rows:
selected_files.append(self.allfiles[r])
remove_msg += "\n" + self.allfiles[r][1]
del_ui = DialogConfirmDelete(self.app, remove_msg)
ok = del_ui.exec()
if not ok:
return
cur = self.app.conn.cursor()
sql = "delete from case_text where caseid=? and fid=?"
for f in selected_files:
try:
cur.execute(sql, [self.case['caseid'], f[0]])
self.app.conn.commit()
self.parent_textEdit.append(f[1] + " removed from case " + self.case['name'])
except Exception as e:
print(e)
logger.debug(str(e))
# Update assigned files and table widget
self.get_files()
self.fill_table()
self.app.delete_backup = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_files(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n os.remove(src)",
"def clean_files(self):\n self.filenames.clear()",
"def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()",
"def handleRemoveFile(self):\n for w in self.filesList.selectedItems():\n self.filesList.removeFile(w.text(2))\n self.metadataList.clear()\n self.metadataList.setRowCount(0)\n self.metadataList.setHorizontalHeaderLabels([\"Metadata Header\", \"Value\"])\n self.personalDataList.clear()",
"def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)",
"def remove_files(files):\n for file_name in files:\n os.remove(file_name)",
"def delete_file(self):\n os.remove(self.id+\"-input.txt\")\n if(self.lang == \"PYTHON\"):\n os.remove(self.id+\".py\")\n elif(self.lang == \"C\"):\n os.remove(self.id+\".c\")\n if(self.status == 1):\n os.remove(self.id+\"_c\")\n elif(self.lang == 'CPP'):\n os.remove(self.id+\".cpp\")\n if(self.status == 1):\n os.remove(self.id+\"_cpp\")\n elif(self.lang == 'JAVA'):\n os.remove(self.id+\".java\")\n if(self.status == 1):\n os.remove(self.id+\"_java\") \n elif(self.lang == \"JS\"):\n os.remove(self.id+\".js\")\n # if(self.status == 1):\n # os.remove(self.id+\"_js\")s",
"def cleanup(options=None):\n if options is None:\n for f in glob.glob(\"*.grmpy.*\"):\n os.remove(f)\n elif options == 'regression':\n for f in glob.glob(\"*.grmpy.*\"):\n if f.startswith('regression'):\n pass\n else:\n os.remove(f)",
"def remove_files(filename=None):\n os.remove(filename)\n print(\"The file %s has been removed\" % filename)",
"def decide_files_to_delete(files: list) -> Set:\n files_to_keep = decide_files_to_keep(files)\n file_set = set(files)\n # using set theory: files_to_delete = files - files_to_keep\n return file_set.difference(files_to_keep)",
"def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)",
"def remove_file(self):\n selected_column = self.files_treeview.selection()\n\n if not selected_column:\n return\n self.files_treeview.delete(selected_column)\n treeview_items = self.files_treeview.get_children()\n if treeview_items:\n self.files_treeview.selection_set(treeview_items[-1])",
"def delete_files(src_files):\n for i, src_file in enumerate(src_files):\n sys.stdout.write(str(i + 1) + ': ' + src_file + '\\n')\n subprocess.call(['rm', src_file])",
"def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass",
"def clean():\n clean_files()",
"def clean_files(ftype, remove=False):\n import os\n files = os.listdir()\n found_files = [f for f in files if ftype in f]\n if remove:\n for ff in found_files:\n os.remove(ff)\n print(\"Removed {}\".format(ff))\n else:\n return found_files",
"def remove(self,filelist):\n\n self.ws.execute('svn remove %s' % (' '.join(filelist)))",
"def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"",
"def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)",
"def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))",
"def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)",
"def delete(self, filename):\n pass",
"def clean(vendor):\n remove_all(\n path\n for path in vendor.glob('*')\n if path.basename() != 'vendored.txt'\n )",
"def RemoveCase(dirc):\n if os.path.exists(dirc):\n shutil.rmtree(dirc)\n #subprocess.call(['rm', '-r', dirc])",
"def clean_files_for(file):\n for f in [file, f\"{file}.json\", f\"{file}.lock\"]:\n if os.path.isfile(f):\n os.remove(f)",
"def remove(self):\n self.remove_file()",
"def cleanup() -> None:\n\n for fname in glob(os.path.join(tdir, 'alexandria.*')):\n if os.path.splitext(fname)[1] not in {'.c', '.h'}:\n os.unlink(fname)",
"def rmGt(self):\n gtfiles = [self.outselect, self.outmktime, self.outltcube,\n self.outbincub, self.outbinmap, self.outbinexp, \n self.outexpmap, self.outsrcmap, \n os.path.join(self.workpath, 'SrcList_cntspec'+self.suffix+'.fits'),\n os.path.join(self.workpath, 'SrcList_cntspec'+self.suffix+'.log')]\n for f in gtfiles:\n if os.path.isfile(f):\n os.remove(f)\n return",
"def remove_extra_files(self):\n\n for f in self._extra_files:\n if os.path.isfile(f):\n os.remove(f)",
"def remove(self): \n self.doRoot(self.removeDir)\n settings.getChanged('mosh.resourceReplacer.applied').remove(self.file)"
] | [
"0.69670033",
"0.6920205",
"0.65350205",
"0.65289146",
"0.6523229",
"0.6520089",
"0.6501256",
"0.6375232",
"0.6366755",
"0.63380545",
"0.6300556",
"0.6276868",
"0.62680596",
"0.62603486",
"0.62171566",
"0.6187643",
"0.6185476",
"0.616471",
"0.6157825",
"0.61527884",
"0.6143036",
"0.6142887",
"0.6127514",
"0.6118649",
"0.61183727",
"0.6112356",
"0.60983485",
"0.60808635",
"0.6057768",
"0.6053771"
] | 0.7693116 | 0 |
Show or hide table rows if check box hide is checked or not. | def show_or_hide_rows(self):
rows = self.ui.tableWidget.rowCount()
if self.ui.checkBox_hide.isChecked():
for r in range(0, rows):
# Text present so hide
if len(self.ui.tableWidget.item(r, 2).text()) > 0:
self.ui.tableWidget.hideRow(r)
return
for r in range(0, rows):
self.ui.tableWidget.showRow(r) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hideallstate(self):\n if self.hideallcheck.isChecked() == True:\n self.field.setOwnRobotsVisibility(False, self.index)\n self.field.setPathVisibility(False, self.index)\n self.field.setBallVisibility(False, self.index)\n self.field.setTeammateVisibility(False, self.index)\n #self.field.setPathVisibility(False, self.index)\n self.field.setOpponentVisibility(False, self.index)\n self.field.setUndefVisibility(False, self.index)\n self.ballcheck.setChecked(False)\n self.teammatecheck.setChecked(False)\n self.opponentcheck.setChecked(False)\n self.undefcheck.setChecked(False)\n self.targetcheck.setChecked(False)\n else:\n self.field.setOwnRobotsVisibility(True, self.index)\n self.field.setPathVisibility(True, self.index)\n self.field.setBallVisibility(True, self.index)\n self.field.setTeammateVisibility(True, self.index)\n #self.field.setPathVisibility(True, self.index)\n self.field.setOpponentVisibility(True, self.index)\n self.field.setUndefVisibility(True, self.index)\n self.ballcheck.setChecked(True)\n self.teammatecheck.setChecked(True)\n self.opponentcheck.setChecked(True)\n self.undefcheck.setChecked(True)\n self.targetcheck.setChecked(True)",
"def toggleTableVisibility(id, isVisible):\n table = SavedSearch.objects(id=id).first()\n if not table:\n return {'success': False,\n 'message': \"Error finding table. Please refresh and try again\"}\n message = table.name+ \" is now \"\n if isVisible:\n message += \"visible\"\n else:\n message += \"hidden\"\n table.isPinned = isVisible\n table.save()\n return {'success': True,'message': message}",
"def set_display_columns(self, set_true=[], set_false=[]):\n for i in range(len(self.fields)):\n if self.fields[i].name in set_true:\n self.fields[i].display = True\n elif self.fields[i].name in set_false:\n self.fields[i].display = False",
"def __grid_visibility_checkbox(self, c):\n self.grid_visibility(c.checked)\n self.__grid_visibility = c.checked",
"def __grid_visibility_checkbox(self, c):\n self.grid_visibility(c.checked)\n self.__grid_visibility = c.checked",
"def hide(self):\n hide_us = []\n for rd in self.row_detail_list:\n if rd.cb_var.get():\n hide_us.append(rd)\n if hide_us:\n # Make sure the category/time values are in the JSON file before we remove them\n self.save()\n\n for rd in hide_us:\n self.row_detail_list.remove(rd)\n self.refresh_display()\n else:\n tk.messagebox.showinfo(\"Missing\", \"Click the checkbox next to the category you want to hide.\")",
"def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()",
"def show_data_table(self, show_data_table):\n\n self.container['show_data_table'] = show_data_table",
"def switch_rawhide(self, key, rows):\n self.controller.set_context('rawhide')",
"def toggle_hidden(self):\n self.show_hidden = not self.show_hidden\n self.reload('.')",
"def action_togglevisible(self, ids):\n # Load all populations by the set of IDs\n target_populations = self.get_query().filter(self.model.id.in_(ids)).all()\n\n # Build a list of all the results\n results = []\n\n if len(target_populations) > 0:\n\n for population in target_populations:\n # Build a helpful message string to use for messages.\n population_str = 'population #' + str(population.id) + ' (' + population.name + ')'\n visible_status = ''\n try:\n if not population.visible:\n population.visible = True\n visible_status = ' as visible'\n else:\n population.visible = False\n visible_status = ' as not visible'\n except Exception as ex:\n results.append('Error changing ' + population_str + ': ' + str(ex))\n else:\n results.append('Marked ' + population_str + visible_status + '.')\n\n # Save our changes.\n self.session.commit()\n\n else:\n results.append('No populations were selected.')\n\n # Flash the results of everything\n flash(\"\\n\".join(msg for msg in results))",
"def gridDisplay(self):\n\n if self.griddButton.isCheckable():\n self.photo_grid.setVisible(False)\n self.griddButton.setCheckable(False)\n self.griddButton.setDown(False)\n self.statustext.setText(\"Hide Grid\")\n else:\n self.griddButton.setCheckable(True)\n self.photo_grid.setVisible(True)\n self.griddButton.setDown(True)\n self.statustext.setText(\"Display Grid - Rule of thirds\")",
"def kinnectTB1Checked(self, state):\n if state == QtCore.Qt.Checked:\n print('Show TB1 Kinnect Selected')\n # # release video capture\n # self.cap = cv2.VideoCapture(0)\n # # read image in BGR format\n # ret, img = self.cap.read()\n # image = QtGui.QImage(img, img.shape[1], img.shape[0],\n # img.shape[1] * img.shape[2],\n # QtGui.QImage.Format_RGB888)\n # pixmap = QtGui.QPixmap()\n # pixmap.convertFromImage(image.rgbSwapped())\n # self.simulationWidget.setPixmap(pixmap)\n else:\n print('Hide Kinnect TB1 Unchecked')\n # self.cap.release()",
"def toggle_show_labels(self, checked):\n logger.debug(\"Set show labels to %s.\", checked)\n self.do_show_labels = checked\n self.text_visual.toggle()\n self.canvas.update()",
"def hide_invisible_headers(self):\n # Hide all the non selected columns\n col_index = 0\n for header in self.column_headers_all:\n if header in self.column_headers:\n self.csv_data_table.setColumnHidden(col_index, False)\n self.file_changed = True\n self.set_save_enabled(True)\n else:\n self.csv_data_table.setColumnHidden(col_index, True)\n col_index = col_index + 1",
"def visible(self, show):",
"def show_table(table, has_customer_id=True):\n titles = [\"ID\", \"Title\", \"Price\", \"Date\"]\n if has_customer_id:\n titles.append(\"Customer ID\")\n output_table = [[row[ID], row[TITLE], row[PRICE],\n '/'.join((str(row[YEAR]), str(row[MONTH]), str(row[DAY]))), row[CUSTOMER_ID]] for row in table]\n else:\n output_table = [[row[ID], row[TITLE], row[PRICE],\n '/'.join((str(row[YEAR]), str(row[MONTH]), str(row[DAY])))] for row in table]\n\n ui.clear_scr()\n ui.print_table(output_table, titles, TITLE)",
"def ToggleVisible(self, event):\n pass",
"def checked(self, tbl, big = True, array = ''):\r\n selectall = self.BIG[tbl]\r\n counter = self.catcounts[tbl]\r\n rtag = self.rcols[tbl]\r\n \r\n if big:\r\n array = self.boxes[tbl]\r\n all_bools = [i.get() for i in array.values()]\r\n if sum(all_bools) in [0, len(all_bools)] or selectall.get():\r\n for bvar in array.values():\r\n bvar.set(selectall.get())\r\n else:\r\n all_bools = [array[k].get() for k in array]\r\n if selectall.get():\r\n selectall.set(False)\r\n elif not selectall.get() and sum(all_bools) == len(all_bools):\r\n selectall.set(True)\r\n \r\n if selectall.get():\r\n num = len(kit.SQL_pull('*', tbl))\r\n else:\r\n to_count = [key for key in array if array[key].get()]\r\n in_str = '(\"' + '\", \"'.join(to_count) + '\")'\r\n code = '{} IN {}'.format(rtag, in_str)\r\n num = len(kit.SQL_pull('*', tbl, code))\r\n \r\n if tbl == 'tvshows':\r\n label = 'TV Shows'\r\n else:\r\n label = tbl.capitalize()\r\n \r\n counter.configure(text = '{} {}'.format(num, label))",
"def on_chkenable_change(self):\n logger.debug(\"Enabled checkbox changed\")\n if self.vars[\"enabled\"].get():\n self.subnotebook_show()\n else:\n self.subnotebook_hide()\n self.set_info_text()",
"def toggle_visibility(self):\n if self.is_visible():\n self.hide()\n else:\n self.show()",
"def update_visibility(self, state):\n # The problem is that the following loop triggers __on_item_changed() which would cause the\n # data container to update its visibility in each iteration. It is better to do this once at the\n # end of this function. That's why the following two lines:\n update_data_container_visibility = self.__update_data_container_visibility # save the current state\n self.__update_data_container_visibility = False\n \n # Update all QList items but not the data container\n for item in self.__ordered_items:\n if item.is_hidden:\n continue\n \n if state == 1: item.set_checked()\n elif state == 0: item.set_unchecked()\n elif state == -1: item.toggle_check_state()\n\n # Now, update the data container visibility\n self.__update_data_container_visibility = update_data_container_visibility\n self.__data_container.update_visibility()",
"def fill_table(self):\n\n rows = self.ui.tableWidget.rowCount()\n for r in range(0, rows):\n self.ui.tableWidget.removeRow(0)\n self.ui.tableWidget.setColumnCount(len(self.header_labels))\n self.ui.tableWidget.setHorizontalHeaderLabels(self.header_labels)\n\n for row, f in enumerate(self.allfiles):\n self.ui.tableWidget.insertRow(row)\n item = QtWidgets.QTableWidgetItem(str(f[0]))\n item.setFlags(QtCore.Qt.ItemFlag.ItemIsEnabled)\n self.ui.tableWidget.setItem(row, 0, item)\n item = QtWidgets.QTableWidgetItem(f[1])\n item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)\n self.ui.tableWidget.setItem(row, 1, item)\n # Mark Yes if assigned\n assigned = \"\"\n for i in self.casefiles:\n if f[0] == i[0]:\n assigned = _(\"Yes\")\n item = QtWidgets.QTableWidgetItem(assigned)\n item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)\n self.ui.tableWidget.setItem(row, 2, item)\n for a in self.attributes:\n for col, header in enumerate(self.header_labels):\n if f[0] == a[2] and a[0] == header:\n string_value = ''\n if a[1] is not None:\n string_value = str(a[1])\n if header == \"Ref_Authors\":\n string_value = string_value.replace(\";\", \"\\n\")\n item = QtWidgets.QTableWidgetItem(string_value)\n if header in (\"Ref_Authors\", \"Ref_Title\", \"Ref_Type\", \"Ref_Year\"):\n item.setFlags(QtCore.Qt.ItemFlag.ItemIsEnabled)\n self.ui.tableWidget.setItem(row, col, item)\n\n self.ui.tableWidget.hideColumn(0)\n if self.app.settings['showids']:\n self.ui.tableWidget.showColumn(0)\n self.ui.tableWidget.resizeColumnsToContents()",
"def displayGrid(self, toggled):\n self.scene.setGridVisible(visible=toggled)",
"def _table_selected(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n\n #update table column selection\n columns_indexes = [tup[0] for tup in self._datafile.query(sciplot.database.Query(\"SELECT VariableID FROM TableColumn WHERE TableID = (?);\", [table_id], 1))[0]]\n new_checked_items = []\n column_ids = [tup[0] for tup in self._columns]\n\n for variable_id in columns_indexes:\n new_checked_items.append(column_ids.index(variable_id))\n\n self._ckl_columns.SetCheckedItems(new_checked_items)\n\n #update displayed table data\n self.refresh_table()",
"def _checkbutton_toggle(self):\n new_value = self.value_checkbutton.var.get()\n if self.master.change_field_value(self.field_name, new_value):\n self.value_checkbutton.config(fg=\"#3F3\" if new_value else \"#F33\", text=\"ON\" if new_value else \"OFF\")\n else:\n self.value_checkbutton.var.set(not new_value)",
"def show_df_ui(df,transpose=False,default=\"Hide\",message=\"Show dataframe: \"):\n def make_btn(val):\n btn_widget=widgets.Button(\n value=False,\n description=val,\n disabled=False,\n button_style='',\n layout=widgets.Layout(width=\"80px\"),\n )\n return btn_widget\n \n def show_head():\n if not transpose:\n display(df.head(10))\n else:\n display(df.head(10).transpose())\n def show_tail():\n if not transpose:\n display(df.tail(10))\n else:\n display(df.tail(10).transpose())\n def show_full():\n if not transpose:\n display(df)\n else:\n display(df.transpose())\n def show_random():\n if not transpose:\n display(Frames.smart_sample(df,10))\n else:\n display(Frames.smart_sample(df,10).transpose())\n def hide_output():\n pass\n \n def refresh():\n Notebook.clear()\n Widgets.show_df_ui(df,transpose=transpose,message=message)\n \n def show_head_refresh(b):\n refresh()\n show_head()\n def show_tail_refresh(b):\n refresh()\n show_tail()\n def show_full_refresh(b):\n refresh()\n show_full()\n def show_random_refresh(b):\n refresh()\n show_random()\n def hide_output_refresh(b):\n refresh()\n \n behaviors={\n \"Hide\": hide_output,\n \"Head\": show_head,\n \"Tail\": show_tail,\n \"Random\": show_random,\n \"Full\": show_full\n }\n \n btn_head=make_btn(\"Head\")\n btn_random=make_btn(\"Random\")\n btn_tail=make_btn(\"Tail\")\n btn_full=make_btn(\"Full\")\n btn_hide=make_btn(\"Hide\")\n \n btn_head.on_click(show_head_refresh)\n btn_tail.on_click(show_tail_refresh)\n btn_full.on_click(show_full_refresh)\n btn_random.on_click(show_random_refresh)\n btn_hide.on_click(hide_output_refresh)\n \n ui_group=widgets.HBox([\n widgets.Label(value=message),\n btn_head,\n btn_random,\n btn_tail,\n btn_full,\n btn_hide,\n ])\n display(ui_group)\n if default in behaviors:\n behaviors[default]()",
"def add_header_visible_options(self, header_list, visible_list):\n # TODO: On hidding the columns, the bottom info bar should reflect the changes\n # It doesnot work because it uses columnCount() which ignores the state of columns\n\n layout = QVBoxLayout()\n\n for header in header_list:\n print(header)\n check_box = QCheckBox(header)\n if self.visible_headers_list:\n if header in self.visible_headers_list:\n check_box.setChecked(True)\n else:\n check_box.setChecked(False)\n else:\n check_box.setChecked(True)\n layout.addWidget(check_box)\n\n self.column_layout_list_scroll_area.setLayout(layout)\n self.visible_headers_list = visible_list",
"def fullLatticeCheckChanged(self, val):\n if val == QtCore.Qt.Unchecked:\n self.writeFullLattice = False\n else:\n self.writeFullLattice = True",
"def toggle_highlighted_spikes(self, checked):\n self.show_all_spikes = checked\n self.set_interval()"
] | [
"0.5464415",
"0.5450269",
"0.544036",
"0.5378214",
"0.5378214",
"0.51764005",
"0.5109101",
"0.5095365",
"0.50392693",
"0.5015401",
"0.4977343",
"0.49333766",
"0.49240357",
"0.4893646",
"0.48924047",
"0.48620972",
"0.481087",
"0.48104262",
"0.48057994",
"0.48027998",
"0.47643366",
"0.46743566",
"0.46712518",
"0.46434143",
"0.46380964",
"0.46207008",
"0.46182284",
"0.46166146",
"0.45888418",
"0.45878586"
] | 0.7700553 | 0 |
Row selection changed. If first row is text, show the text in textEdit. | def row_selection_changed(self):
index_list = self.ui.tableWidget.selectionModel().selectedIndexes()
rows = []
for i in index_list:
rows.append(i.row())
rows = list(set(rows)) # duplicate rows due to multiple columns
if len(rows) == 0:
return
self.ui.textBrowser.setText("")
self.selected_text_file = None
index = rows[0]
# A fulltext source is displayed if fulltext is present
# If the mediapath is None, this represents an A/V transcribed file
self.ui.label_file.setText(_("Displayed file: ") + self.allfiles[index][NAME])
if self.allfiles[index][FULLTEXT] != "" and self.allfiles[index][FULLTEXT] is not None:
self.selected_text_file = self.allfiles[index]
self.ui.textBrowser.setText(self.allfiles[index][FULLTEXT])
self.load_case_text()
self.unlight()
self.highlight()
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handleTableSelectionChange(self):\n self.selectEntireRow()\n self.showSelectedDataset()",
"def user_selection(self, selected):\n\n source_index = self.proxy_model.mapToSource(selected)\n self.row = self.table_model.selectedRow(source_index.row())\n\n self.curr_selection()\n self.upd_preview()",
"def cell_entered(self, row: int, _):\n self.selectRow(row)",
"def exec_selected_text(self):\r\n editortabwidget = self.get_current_editortabwidget()\r\n editortabwidget.exec_selected_text()",
"def set_row_input_text(self, row_idx, data):\n input_box = self.row_items[row_idx][1]\n input_box.delete(0, 'end')\n input_box.insert(Tkinter.END, data)",
"def selection_changed(self):\n self.emit('selection_changed')",
"def get_selected_row(event):\n global selected_tuple\n index = listing.curselection()[0]\n selected_tuple = listing.get(index)\n\n entry1.delete(0, END)\n entry1.insert(END, selected_tuple[1])\n\n entry2.delete(0, END)\n entry2.insert(END, selected_tuple[2])\n\n entry3.delete(0, END)\n entry3.insert(END, selected_tuple[3])\n\n entry4.delete(0, END)\n entry4.insert(END, selected_tuple[4])",
"def _do_change_row(self, treeview):\n _return = False\n\n self.treeview.handler_block(self._lst_handler_id[0])\n\n (_model, _row) = treeview.get_selection().get_selected()\n try:\n _level = _model.get_value(_row, 11)\n except TypeError:\n _level = None\n\n _columns = treeview.get_columns()\n\n # Change the column headings depending on what is being selected.\n if _level == 'mission':\n _headings = [\n _(u\"Mission ID\"),\n _(u\"Description\"),\n _(u\"Units\"),\n _(u\"Start Time\"),\n _(u\"End Time\"),\n _(u\"\"),\n _(u\"\"),\n _(u\"\")\n ]\n elif _level == 'phase':\n _headings = [\n _(u\"Phase ID\"),\n _(u\" Code\\t\\tDescription\"),\n _(u\"Units\"),\n _(u\"Start Time\"),\n _(u\"End Time\"),\n _(u\"\"),\n _(u\"\"),\n _(u\"\")\n ]\n elif _level == 'environment':\n _headings = [\n _(u\"Environment ID\"),\n _(u\"Condition\"),\n _(u\"Units\"),\n _(u\"Minimum Value\"),\n _(u\"Maximum Value\"),\n _(u\"Mean Value\"),\n _(u\"Variance\"),\n _(u\"\")\n ]\n else:\n _headings = []\n\n i = 0\n for _heading in _headings:\n _label = gtk.Label()\n _label.set_line_wrap(True)\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_markup(\"<span weight='bold'>\" + _heading + \"</span>\")\n _label.set_use_markup(True)\n _label.show_all()\n _columns[i].set_widget(_label)\n\n i += 1\n\n self.treeview.handler_unblock(self._lst_handler_id[0])\n\n return _return",
"def _ui_cell_double_click(self, row, column):\n self.selected_name = self._table.item(row, 0).text()\n self.accept()",
"def cellSelected(self, row, column):\n self.selected_row = row\n self.selected_column = column\n mode = self.getMode()\n if mode == \"Modification\":\n self.askToOverwrite = QtGui.QMessageBox.question(self, 'Overwrite data?', \"\"\"Do you want to overwrite\nthe existing data in the form with the data in the cell and modify that cell?\"\"\",\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) \n if self.askToOverwrite == QtGui.QMessageBox.Yes:\n self.fetchDataToForm(self.selected_row, self.selected_column,\"All\")\n\n #if the audit form isn't already cleared,\n #ask for a confirmation about clearing it, clear it and then call this function once again.",
"def dataGridView_SelectionChanged(self, sender, eventArgs):\r\n # Clear previous selection only if new rows have been selected.\r\n if self.wf.dataGridView.SelectedRows.Count > 0:\r\n Application.SelectObj(\"\", \"\", True)\r\n selectedNames = \"\"\r\n for row in self.wf.dataGridView.SelectedRows:\r\n name = row.Cells[0].Value\r\n selectedNames += ( name + \",\" )\r\n if selectedNames:\r\n Application.SelectObj(selectedNames, \"\", True)",
"def exec_selected_text(self):\r\n editor = self.currentWidget()\r\n ls = editor.get_line_separator()\r\n \r\n line_from, _index_from, line_to, index_to = editor.getSelection()\r\n if line_from != line_to:\r\n # Multiline selection -> first line must be entirely selected\r\n editor.setSelection(line_from, 0, line_to, index_to)\r\n lines = unicode( editor.selectedText() )\r\n \r\n # If there is a common indent to all lines, remove it\r\n min_indent = 999\r\n for line in lines.split(ls):\r\n if line.strip():\r\n min_indent = min(len(line)-len(line.lstrip()), min_indent)\r\n if min_indent:\r\n lines = [line[min_indent:] for line in lines.split(ls)]\r\n lines = ls.join(lines)\r\n\r\n last_line = lines.split(ls)[-1]\r\n if last_line.strip() == unicode(editor.text(line_to)).strip():\r\n # If last line is complete, add an EOL character\r\n lines += ls\r\n \r\n self.interactive_console.shell.execute_lines(lines)\r\n self.interactive_console.shell.setFocus()",
"def _pressed(self, evt):\n x, y, widget = evt.x, evt.y, evt.widget\n item = widget.identify_row(y)\n column = widget.identify_column(x)\n\n if not column or not item in self._items:\n # clicked in the weekdays row or just outside the columns\n return\n\n item_values = widget.item(item)['values']\n if not len(item_values): # row is empty for this month\n return\n\n text = item_values[int(column[1]) - 1]\n if not text: # date is empty\n return\n\n bbox = widget.bbox(item, column)\n if not bbox: # calendar not visible yet\n return\n\n # update and then show selection\n text = '%02d' % text\n self._selection = (text, item, column)\n self._show_selection(text, bbox)",
"def cellSelected(self):\n\n x = self.tableWidget.currentRow()\n y = self.tableWidget.currentColumn()\n if y != self.CAT_COLUMN:\n return\n catText = str(self.tableWidget.item(x, y).text())\n #print(x,y, catText)\n self.tableWidget.item(x, y).setSelected(False)\n for row, code in enumerate(self.codes):\n if code['category'] == catText:\n self.tableWidget.item(row, self.CODE_COLUMN).setSelected(True)",
"def watchSelection(self, sel):\n sel.observers.append(self.selectionLabel.set_text)",
"def __list_item_selection_changed_event(self):\n obj = self.selected_row_object\n if obj:\n # if the user has edit permission then\n if self.has_view_permissions(obj):\n self.object_pk = obj.pk\n self._list.selected_row_id = None\n self.show_edit_form(obj.pk)\n else:\n raise Exception('You do not have permissions to visualize this record.')",
"def curr_selection(self):\n\n self.domain = self.row[0]\n abstract = self.row[5]\n self.data_type = self.row[1]\n self.object_id = self.row[3]\n self.service = self.row[2]\n self.layer_title = self.row[4]\n crs_options = self.row[6]\n self.dlg.uCRSCombo.clear()\n if self.data_type != \"table\":\n self.dlg.uCRSCombo.addItems(crs_options)\n curr_crs = self.map_crs()\n if curr_crs in crs_options:\n idx = self.dlg.uCRSCombo.findText(curr_crs)\n self.dlg.uCRSCombo.setCurrentIndex(idx)\n self.dlg.uTextDescription.setText(abstract)",
"def _selection_changed(self, event):\n if self.typeCombo.get() in Constants.TASKS[0:3]:\n if self.subjectAdded == False:\n self._placeWidgets(self.subjectLabel, self.subjectCombo)\n self.subjectAdded = True\n else:\n if self.subjectAdded:\n # Hide subject label and combobox\n self.subjectLabel.place_forget()\n self.subjectCombo.place_forget()\n self.subjectAdded = False\n self.row -= 1",
"def selected(self):\r\n EditableComboBox.selected(self)\r\n self.emit(SIGNAL(\"open(QString)\"), self.currentText())",
"def current_selection(self):\n row_ids = set(item.row() for item in self.tableView.selectedIndexes())\n sel = []\n for row_id in row_ids:\n row = self.table[row_id]\n sel.append('\\t'.join(row))\n return '\\n'.join(sel)",
"def start_edit(self):\n txt = self.model.get_current_line()\n self._line.original_widget = self._line_edit\n self._line_edit.set_edit_text(txt)\n self._line_edit.set_edit_pos(len(txt))\n self._top.set_focus(2)",
"def handle_left_click(self,event):\n\n c = self.seqframe\n if 'textlabel' in c.gettags(CURRENT):\n self.show_item(event)\n elif 'comparison_seq' in c.gettags(CURRENT):\n self.show_sequence_label(event)\n else:\n self.start_selection(event)\n return",
"def current_selection(self):\n row_ids = set(item.row() for item in self.tableView.selectedIndexes())\n sel = []\n for row_id in row_ids:\n row = self.table[row_id]\n sel.append('\\t'.join(row[self.table.ordinal:]))\n return '\\n'.join(sel)",
"def __refreshContent(self):\n self._window.clear()\n self.drawBorder()\n for i in range(self.__firstShownLine,\n self.__firstShownLine + self.height - 2):\n if self._focused and i == self.__selectedRow:\n self._window.attron(curses.A_BOLD)\n self.__printRow(i)\n self._window.attroff(curses.A_BOLD)",
"def select_documents_grid_row_checkbox(self, row_identifier_text):\n self.select_grid_row_checkbox(self.documents_grid_div_id, row_identifier_text, self.documents_grid_checkbox_column_number)\n self.wait_for_ajax_spinner_load()",
"def update_line_text(self, index, text):\n if index is not None:\n index = self.index(index)\n if index < self.index(tk.END):\n cfg = _get_pared_cfg(self.itemconfig(index))\n self.insert(index, text)\n self.itemconfig(index, **cfg)\n self.delete(index + 1)\n return True\n return False",
"def _on_item_selection_changed(self, event):\n item = event.GetItem()\n if item is not None:\n self._model.change_value(event.GetColumn(), item)",
"def insert_before(self):\n selected_rows = self.tabentry.grid.GetSelectedRows()\n if not selected_rows:\n wx.MessageBox(\n _('Please select a row first (click to the left of the row)'))\n return False, None, None\n pos = selected_rows[0]\n if pos == 0: ## for table config only\n wx.MessageBox(_('The %s must always come first') % mg.SOFA_ID)\n return False, None, None\n bolinserted, row_data = self.tabentry.insert_row_above(pos)\n return bolinserted, pos, row_data",
"def cell_selection_changed(self):\n # Enable Edit Cell menu if a single cell is selection else disable it\n self.cells_selected = self.csv_data_table.selectionModel().selectedIndexes()\n if len(self.cells_selected) == 1:\n self.action_edit_data.setEnabled(True)\n else:\n self.action_edit_data.setEnabled(False)\n\n # Enable delete options iff 1 or more cells are selected\n if len(self.cells_selected) >= 1:\n self.action_delete_selected.setEnabled(True)\n self.action_toolbar_delete_selected.setEnabled(True)\n else:\n self.action_delete_selected.setEnabled(False)\n self.action_toolbar_delete_selected.setEnabled(False)\n\n # Add a way to identify all the currently selected columns\n cols = self.csv_data_table.selectionModel().selectedColumns()\n self.selected_columns = []\n for index in sorted(cols):\n col = index.column()\n self.selected_columns.append(col)\n\n rows = self.csv_data_table.selectionModel().selectedRows()\n self.selected_rows = []\n for index in sorted(rows):\n row = index.row()\n self.selected_rows.append(row)\n\n self.set_bottom_toolbar_info()\n\n # Enable plot toolbars iff exactly 2 columns are selected\n if len(self.selected_columns) == 2:\n self.set_plot_options(True)\n else:\n self.set_plot_options(False)",
"def on_idEdit_textChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()"
] | [
"0.7136895",
"0.672994",
"0.6510278",
"0.64237314",
"0.64045364",
"0.61852086",
"0.60918605",
"0.5974532",
"0.59213084",
"0.58744264",
"0.5853039",
"0.5832434",
"0.58085626",
"0.5803898",
"0.5799842",
"0.57951725",
"0.5766497",
"0.5761151",
"0.57519233",
"0.5741872",
"0.57338876",
"0.5681765",
"0.5665129",
"0.56651187",
"0.5663019",
"0.56395566",
"0.56146514",
"0.56068146",
"0.55875885",
"0.55863255"
] | 0.7985545 | 0 |
Open image or media file to view. Check media file link works, as media may have moved. Text files are displayed via row_selection_changed. | def view_file(self):
index_list = self.ui.tableWidget.selectionModel().selectedIndexes()
index = None
if len(index_list) > 0:
index = index_list[0].row()
if index is None:
return
# Need the data as a dictionary to view images and audio/video
dictionary = {'name': self.allfiles[index][NAME], 'mediapath': self.allfiles[index][MEDIAPATH],
'owner': self.allfiles[index][OWNER], 'id': self.allfiles[index][0],
'date': self.allfiles[index][DATE],
'memo': self.allfiles[index][MEMO], 'fulltext': self.allfiles[index][FULLTEXT],
'av_text_id': self.allfiles[index][AV_TEXT_ID]}
# Mediapath will be None for a .transcribed empty text media entry, and 'docs:' for a linked text document
if self.allfiles[index][MEDIAPATH] is None or self.allfiles[index][MEDIAPATH][0:5] == 'docs:':
return
# Added checks to test for media presence
if self.allfiles[index][MEDIAPATH][:6] in ("/video", "video:"):
if self.allfiles[index][MEDIAPATH][:6] == "video:":
abs_path = self.allfiles[index][MEDIAPATH].split(':')[1]
if not os.path.exists(abs_path):
return
if self.allfiles[index][MEDIAPATH][:6] == "/video":
abs_path = self.app.project_path + self.allfiles[index][MEDIAPATH]
if not os.path.exists(abs_path):
return
ui_av = DialogViewAV(self.app, dictionary)
ui_av.exec()
if self.allfiles[index][MEDIAPATH][:6] in ("/audio", "audio:"):
if self.allfiles[index][MEDIAPATH][0:6] == "audio:":
abs_path = self.allfiles[index][MEDIAPATH].split(':')[1]
if not os.path.exists(abs_path):
return
if self.allfiles[index][MEDIAPATH][0:6] == "/audio":
abs_path = self.app.project_path + self.allfiles[index][MEDIAPATH]
if not os.path.exists(abs_path):
return
ui_av = DialogViewAV(self.app, dictionary)
ui_av.exec()
if self.allfiles[index][MEDIAPATH][:7] in ("/images", "images:"):
if self.allfiles[index][MEDIAPATH][0:7] == "images:":
abs_path = self.allfiles[index][MEDIAPATH].split(':')[1]
if not os.path.exists(abs_path):
return
if self.allfiles[index][MEDIAPATH][0:7] == "/images":
abs_path = self.app.project_path + self.allfiles[index][MEDIAPATH]
if not os.path.exists(abs_path):
return
# Requires {name, mediapath, owner, id, date, memo, fulltext}
ui_img = DialogViewImage(self.app, dictionary)
ui_img.exec() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view_media(self, obj):\n for handle in self.selected_handles():\n ref_obj = self.dbstate.db.get_object_from_handle(handle)\n mpath = media_path_full(self.dbstate.db, ref_obj.get_path())\n open_file_with_default_application(mpath)",
"def __openFile(self):\n itm = self.findList.selectedItems()[0]\n self.on_findList_itemDoubleClicked(itm, 0)",
"def _openButton(self):\n #get the specified file\n selected_file = self.view.list.getSelected()\n\n if selected_file:\n self.model.open(selected_file)\n return\n\n #prompt if they really want to open maya\n dialogs = Dialogs(self.view)\n\n msg = 'No file selected!'\n msg += '\\n\\nAre you sure you want to open maya without a file?'\n dialogs.confirmPrompt(msg)\n\n self.model.open()",
"def open_file(self, event=None):\n file = fd.askopenfile(title=\"Choose file to open\",\n filetypes=[(\"Python(default)\", \"*.py\"), (\"Text\", \"*.txt\"),\n (\"Java\", \"*.java\"), (\"JavaScript\", \"*.js\"),\n (\"HTML\", \"*.html\"), (\"CSS\", \"*.css\"),\n (\"All files\", \"*.*\")])\n if file is None:\n return\n else:\n if imghdr.what(\n file.name): # if file is image return image type otherwise return None if file is not an image type\n from project_explorer import ProjectExplorer\n ProjectExplorer().open_image(file.name)\n else:\n self.add_tab(file=file.name, open_file=1)\n from syntax_highlight import Highlighting\n Highlighting().highlight2()",
"def quick_open_preview(self, window):\n if not self.current_history_entry:\n return\n\n view = self.current_view\n other_view = self.get_view_from_another_group(window, view.file_name())\n\n # Only try to open and position the file if it is transient\n if self.is_transient_view(window, view):\n if not self.REOPEN_IN_CURRENT_GROUP and other_view:\n # Focus the other view instead of opening a clone\n self.debug(\"Focussing existing view in group %d\" % window.get_view_index(other_view)[0])\n self.__close_preview(window)\n window.focus_view(other_view)\n # Changing focus to another group requires reopening the panel, unfortunately\n return True\n else:\n (group, index) = self.__calculate_view_index(window, self.current_history_entry)\n view = window.open_file(self.current_history_entry['filename'])\n window.set_view_index(view, group, index)\n\n # Refocus on the newly opened file rather than the original one\n self.__clear_context()\n self.__track_calling_view(window)",
"def openFile(self):\r\n from SXM import FileIO,Data\r\n fname = str(QFileDialog.getOpenFileName(self.widget,self.tr(\"Open File\"), \\\r\n \".\",FileIO.getFilterString(types=(Data.Image,))))\r\n if len(fname) > 0:\r\n root, ext = os.path.splitext(fname)\r\n self.statusBar().showMessage(self.tr(\"Loading data: %1\").arg(fname),2000)\r\n image = FileIO.fromFile(fname)\r\n image.load()\r\n imwin = ImageWindow(self,image)\r\n self.Images.append(imwin)\r\n self.updateImageList()\r\n imwin.windowModality = False\r\n imwin.show()",
"def open_slot(self):\n caption = 'Open files'\n directory = './'\n filter_mask = \"JPEG File Interchange Format (*.jpg *.jpeg *jfif)|\" + \"*.jpg;*.jpeg;*.jfif\"\n files = QFileDialog.getOpenFileNames(None, caption, directory, filter_mask)[0]\n self._model.set_filenames(files)\n if len(files) > 1:\n self._ui.bt_next.setEnabled(True)\n self._ui.bt_prev.setEnabled(True)\n self._ui.bt_right.setEnabled(True)\n self._ui.bt_left.setEnabled(True)\n elif len(files) == 1:\n self._ui.bt_left.setEnabled(True)\n self._ui.bt_right.setEnabled(True)\n self._ui.bt_next.setEnabled(False)\n self._ui.bt_prev.setEnabled(False)\n else:\n self._ui.bt_left.setEnabled(False)\n self._ui.bt_right.setEnabled(False)\n self._ui.bt_next.setEnabled(False)\n self._ui.bt_prev.setEnabled(False)\n\n self.refresh_images()",
"def show_file(filename, row=None):\r\n # Check if file exists if being referred to file system\r\n if os.path.exists(filename):\r\n # Get active window\r\n window = sublime.active_window()\r\n window.focus_group(0)\r\n # Check if file is already open\r\n found = False\r\n view = window.find_open_file(filename)\r\n if not view is None:\r\n found = True\r\n window.focus_view(view)\r\n # Set focus to row (line number)\r\n show_at_row(view, row)\r\n # Open file if not open\r\n if not found:\r\n view = window.open_file(filename)\r\n window.focus_view(view)\r\n # Set focus to row (line number) when file is loaded\r\n S.SHOW_ROW_ONLOAD[filename] = row",
"def on_activated(self, widget, row, col):\n model = widget.get_model()\n item = model[row][1]\n if os.path.isdir(item) and opt_cmd == 'xdg-open':\n open_file(item)\n elif opt_force:\n open_file(item)\n else:\n md = gtk.MessageDialog(None, \n gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, \n gtk.BUTTONS_YES_NO, \"Run with %s?\" % opt_cmd)\n res = md.run()\n if res == gtk.RESPONSE_YES:\n open_file(item)\n md.destroy()",
"def showOpenImageDialog(self, event):\r\n openImageDialog = wx.FileDialog(self, \"Open\",\r\n style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\r\n if openImageDialog.ShowModal() == wx.ID_CANCEL:\r\n return\r\n self.setImage(openImageDialog.GetPath())",
"def select_file() -> True:\n current_directory = os.getcwd()\n selected_file = eg.fileopenbox(title=EG_TITLE+': Open a file',\n default=os.path.join(current_directory, \"..\"),\n filetypes=\"*.txt,*.py\")\n print(f\"Selected file: {os.path.basename(selected_file)}\")\n print(f\"In directory: {os.path.dirname(selected_file)}\")\n return True",
"def open_file(self):\n try:\n filename = tkFileDialog.askopenfilename()\n file = open(filename)\n self.image_window.status.config(text='Opened: ' + filename)\n return file\n except:\n self.status.config(text='You fool!')\n tkMessageBox.showwarning(\"Open file\",\n \"Cannot open file \" + filename)\n return None",
"def open(self):\n file = askopenfilename(\n initialdir=self.initial_directory,\n filetypes=(\n (\"Audio Video Interleave\", \"*.avi\"),\n (\"Matroska\", \"*.mkv\"),(\"MPEG-4 AVC\",\"*.mp4\"),\n )\n )\n if isinstance(file, tuple):\n return\n if os.path.isfile(file):\n self.play_film(file)",
"def browse_source(self, event=None):\n if self.app.children:\n fileName=self.app.childActive.source.getWordFileName(whole=1)\n if fileName and fileName[0]!='\"':\n self.openList(fileName)\n else:\n if not fileName: fileName=''\n self.SetActiveStatusText('Sorry, can not locate file %s'%fileName)",
"def _open_files(view, sel):\n schema, word = get_names(view, sel)\n file_name = word + '.sql'\n path = [schema, None, file_name]\n files = find_file(view.window().folders(), path)\n if len(files) > 5:\n print('something is wrong; too many files; aborting')\n return\n for f in files:\n view.window().open_file(f)",
"def open_video(self):\n\n \n self.filename_temp, _ = QFileDialog.getOpenFileName(self, \"Open Video\")\n\n if self.filename_temp != '':\n if self.filename_temp[-3:] == \"mp4\" or self.filename_temp[-3:] == \"wav\" or self.filename_temp[-3:] == \"wmv\" or self.filename_temp[-3:] == \"mov\":\n self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile(self.filename_temp)))\n self.playBtn.setEnabled(True)\n self.saveBtn.setEnabled(True)\n self.videoOpened = True\n self.clear_annotation()\n self.filename = self.filename_temp\n\n\n else:\n message = QMessageBox()\n message.setWindowTitle(\"Fail\")\n message.setText(\"Please choose a file with one of the following extensions:\\nmp4, wav, mov or wmv.\")\n x = message.exec_() # this will show our messagebox¨¨\n\n\n elif self.filename_temp == '' and self.videoOpened:\n self.filename = self.filename\n elif self.filename_temp == '' and not self.videoOpened:\n self.filename = None",
"def on_lookpushButton_clicked(self):\n # TODO: not implemented yet\n self.openFile()",
"def open_viewer(self):\r\n choice = self.thoughts_lst.get(tk.ACTIVE)\r\n subject = self.refference[choice]\r\n tbl = self.home_table[subject]\r\n view = kit.SQL_pull('*', tbl, 'subject_id = \"{}\"'.format(subject))\r\n obj = kit.class_fill(tbl, view[0])\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jv.Viewer(self.session, obj)",
"def openFile(self, index):\n page_name = index.data().toString()\n file_name = self.file_names[str(page_name)]\n self.main_help_window.setHtml(open(file_name, 'r').read())",
"def on_open(self):\n\n ftypes = [('CSV', '.csv'), ('JSON', '.json'), ('All files', '*')]\n dlg = filedialog.Open(self, filetypes=ftypes)\n\n absolute_file_path = dlg.show()\n \n if absolute_file_path:\n # extract the file name from the absolute path\n file_name = absolute_file_path.split('/')[len(absolute_file_path.split('/')) - 1]\n \n # update the label text\n self.selected_file_name.configure(text=file_name)\n\n self.__set_full_path_of_file(absolute_file_path)\n else:\n # update the label text\n self.selected_file_name.configure(text=\"<Selected file name>\")\n\n self.__set_full_path_of_file(None)",
"def menu_open_files(self, event=None):\n self.parentPanel.open(event)",
"def file_menu_open_activate(self, widget, data=None):\n self.open_chooser.show()",
"def open_file_browser(path: str):\n call(file_browser + [path])",
"def on_openFilesButton_clicked(self):\n self.__enableFindButton()",
"def openFile(self, path=None):\n if not path:\n dialog = OpenDialog()\n dialog.set_folders_only(False)\n path = dialog.getOpenFileName(\n self,\n \"Open File\",\n '',\n \"ReStructuredText Files (*.rst *.txt)\"\n )\n\n if path:\n file_path = Path(path[0])\n filename = file_path.name\n tree_dir = file_path.parent.absolute()\n self.handleFileChanged(tree_dir, filename)",
"def open_file(self): # need to fix this to open in a new window\n\t\tself.file_path = filedialog.askopenfilename()\n\t\tf = open(self.file_path)\n\t\tfreader = f.read()\n\t\tself.textBox.insert(END, freader)",
"def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)",
"def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)",
"def display_media_cli():\r\n\r\n global config_var # Using the global variable that reads and modifies the configuration file\r\n\r\n cursor = connection.cursor()\r\n count = 0 # The total amount of items displayed\r\n\r\n # Parsing the entire database and displaying every record that matches the current media folder\r\n cursor.execute(\"SELECT COUNT(*) FROM media\")\r\n\r\n number_of_entries = cursor.fetchone() # This variable will store the amount of records in the database\r\n\r\n for i in range(number_of_entries[0]):\r\n cursor.execute(\"SELECT full_path FROM media WHERE id = \" + str(i + 1))\r\n entry_path = cursor.fetchone() # This variable will store the path of the currently selected item\r\n\r\n # Checking if the currently selected item from the database is located in the media folder\r\n if (os.path.dirname(entry_path[0])) == config_var['MEDIA FOLDER']['folder']:\r\n print(\"\\n\" + os.path.basename(entry_path[0]) + \" || ID: \" + str(i + 1))\r\n count += 1\r\n\r\n if not count: # No items could be found\r\n print(\"\\nThere are no media files in the media folder.\")\r\n\r\n cursor.close()",
"def open_file(self, widget, data=None):\n\n #Displays a fiel chooser dialog\n dialog = gtk.FileChooserDialog(\"Open..\",None,\n gtk.FILE_CHOOSER_ACTION_OPEN,\n (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n dialog.set_default_response(gtk.RESPONSE_OK)\n\n response = dialog.run()\n \n self.drawing.set_initial_values()\n self.drawing.cr.move_to(20,20)\n\n if response == gtk.RESPONSE_OK:\n self.filename = dialog.get_filename() \n self.window.set_title(\"Python Viewer - \" + self.filename )\n\n try: \n ifile = open(self.filename, 'r')\n self.drawing.text = ifile.read().split('\\n')\n #self.drawing.text = ifile.read()\n ifile.close()\n dialog.destroy()\n \n self.drawing.line_count = len(self.drawing.text)\n \n self.drawing.parse_text()\n\n self.drawing.redraw_canvas(0) \n except IOError:\n pass\n \n elif response == gtk.RESPONSE_CANCEL:\n self.window.set_title(\"Python Viewer\")\n dialog.destroy()"
] | [
"0.70973283",
"0.6602566",
"0.64204323",
"0.61612755",
"0.61230564",
"0.61121106",
"0.60914075",
"0.6049491",
"0.6022753",
"0.60044396",
"0.5955318",
"0.5933987",
"0.59316117",
"0.58974123",
"0.58972645",
"0.5885088",
"0.5837336",
"0.582319",
"0.57997763",
"0.5786882",
"0.57730806",
"0.5751144",
"0.57195204",
"0.5712727",
"0.5660116",
"0.565519",
"0.5650857",
"0.5650857",
"0.5636415",
"0.56161606"
] | 0.74040633 | 0 |
Load case text for selected_text_file. | def load_case_text(self):
self.case_text = []
if self.selected_text_file is None:
return
cur = self.app.conn.cursor()
cur.execute("select caseid, fid, pos0, pos1, owner, date, memo from case_text where fid = ? and caseid = ?",
[self.selected_text_file[ID], self.case['caseid']])
result = cur.fetchall()
for row in result:
self.case_text.append({'caseid': row[0], 'fid': row[1], 'pos0': row[2],
'pos1': row[3], 'owner': row[4], 'date': row[5], 'memo': row[6]}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(file):\n\n try:\n with open(file) as in_file:\n loaded_text = in_file.read().strip().split(\"\\n\")\n loaded_text = [x.lower() for x in loaded_text]\n return loaded_text\n except IOError as e:\n print(\"{}\\n Error opening {}. Terminationg program.\".format(e,file), file = sys.stderr)\n sys.exit()",
"def load_file(self):\n return tkinter.filedialog.askopenfilename(defaultextension=\".txt\")",
"def load_file(self, file_path):\n self.disabled = True\n if not EVENTS['IS_OBJ']:\n self.disabled = False\n with open(file_path, 'r') as file:\n data = file.read()\n file.close()\n self.text = data\n EVENTS['EDITOR_SAVED'] = True",
"def loadText(self,textFileName):\n textFile = file(textFileName,'rb')\n reHeader = re.compile('^# ([a-zA-Z_0-9]+)')\n id,lines,changed = None,[],[]\n id_records = dict((record.id.lower(),record) for record in self.scripts)\n def unBuffer():\n record = id and id_records.get(id.lower())\n if record:\n code = (''.join(lines)).strip()\n if code.lower() != record.sctx.data.strip().lower():\n record.setCode(code)\n changed.append(id)\n for line in textFile:\n maHeader = reHeader.match(line)\n if maHeader:\n unBuffer()\n id,lines = maHeader.group(1),[]\n elif id: \n lines.append(line)\n textFile.close()\n unBuffer()\n return sorted(changed,key=string.lower)",
"def readTextFromFile(self, filename):\r\n f = open(filename)\r\n self.text = f.read()\r\n f.close()",
"def load_txt(filename, **kwargs):\n with sys_open(filename, 'r', **kwargs) as f:\n return f.readlines()",
"def loadText(self,filePath):\n ins = file(filePath,'r')\n reComment = re.compile(r\"#.*\")\n reSection = re.compile(r'@ +(srcmod|replace)',re.M)\n reReplace = re.compile(r\"(\\w[-\\w ']+)\\s*:\\s*(.+)\")\n reNewIds = re.compile(r\",\\s*\")\n mode = None\n for line in ins:\n line = reComment.sub('',line.strip())\n maSection = reSection.match(line)\n if maSection:\n mode = maSection.group(1)\n elif not line: #--Empty/comment line\n pass\n elif mode == 'srcmod':\n self.srcModName = line\n elif mode == 'replace':\n maReplace = reReplace.match(line)\n if not maReplace: continue\n oldId = maReplace.group(1)\n self.newIds[oldId.lower()] = reNewIds.split(maReplace.group(2))\n ins.close()",
"def from_text_file(cls, filename):\n raise NotImplementedError()",
"def loadTextFile(self):\n if self.tempFilePath is None or not MyFile.checkFileExists(self.tempFilePath):\n raise Exception(\"Temporary text file does not exist!\")\n\n io = Ioread()\n self.sentencesList = io.readFileContentList(self.tempFilePath)",
"def open_file(self):\n files = [('Text Document', '*.txt'), ('PDF Document', '*.pdf'), ('Word Document', '*.docx')]\n text_file = askopenfile(mode='r', title=\"Open your file\", filetypes=files,\n defaultextension=files)\n if text_file is not None:\n self.file_path = text_file.name\n text_inside = self.file.load_file(text_file.name)\n text_file.close()\n self.textbox.delete(\"1.0\", tk.END)\n self.textbox.insert(\"1.0\", text_inside)\n self.text = self.textbox",
"def read_text(filepath):\n\n text = open(filepath, encoding = \"utf8\").read()\n \n if text_lower:\n return text.lower()\n\n return text",
"def load(filename):\n try:\n with open(filename) as in_file:\n loaded_txt = in_file.read().strip().split(\"\\n\")\n loaded_txt = [x.lower() for x in loaded_txt]\n return loaded_txt\n except IOError as e:\n print(\"{}\\nError opening {}. Terminating program.\".format(e, filename))\n # sys.exit(1)",
"def file_open(self):\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File')\n\n with open(filename, 'r', encoding=\"utf8\") as file:\n self.file_cont = file.readlines()\n self.textToAnalize.setText(''.join(self.file_cont))",
"def load_simplified_conversation_text(filename, conv_number):\n pass",
"def open_file_dialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_name, _ = QFileDialog.getOpenFileName(self,\n \"Select text file\",\n \"\",\n \"Text Files(*);;\",\n options=options)\n if file_name:\n try:\n content = read_file(file_name)\n self.ui.plainTextEdit.setPlainText(\"\".join(content))\n except:\n QMessageBox.question(self, 'Error', \"Chosen file is not text\",\n QMessageBox.Ok | QMessageBox.NoButton)",
"def get_text(path):\n with io.open(path, 'r', encoding='utf8') as f:\n return f.read().lower()",
"def load_text_file(file_path: str):\n with open(file_path) as f:\n content = f.readlines()\n return content",
"def load_text(filename):\n\n return \" \".join(list(\n map(\n lambda word: word.strip(), open(filename))))",
"def read_filename(self, filename):\r\n self.text_lines = task3.read_text_file(filename)",
"def load_input(self, path):\n f = codecs.open(path, 'r', 'utf-8')\n raw_text = f.read()\n return raw_text",
"def test_load_text():\n # Create lexer without value\n lexer = lex._lexer(None, None)._load_text(\"TEST\")\n\n # Check if the loaded text\n assert lexer._original_text == \"TEST\" and lexer._text_to_process == \"TEST\"",
"def read_file(self):\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n self.filename = askopenfilename(title='Select Hospital Text File') # show an \"Open\" dialog box and return the path to the selected file",
"def __init__(self, file_path):\n\n\t\tsuper(Text, self).__init__()\n\n\t\tself.open_file_path(file_path)\n\t\tself.preprocess_raw_text()\n\t\tself.concatenate_processed_text()\n\t\tself.generate_list_of_words()\n\n\t\tself.name = split(file_path)[-1]",
"def loadText(self,textFileName):\n #--Text File\n infoKey = None\n text = None\n texts = {}\n reHeader = re.compile('^#')\n reInfo = re.compile('@ +(\\d) +\"(.+?)\" +(\\d+)')\n reSingleQuote = re.compile('[\\x91\\x92]')\n reDoubleQuote = re.compile('[\\x93\\x94]')\n reEllipsis = re.compile('\\x85')\n reEolSpaces = re.compile(r' +\\r\\n')\n reExtraSpaces = re.compile(r' +')\n reIllegalChars = re.compile(r'[@#]')\n #--Read file\n textFile = file(textFileName,'rb')\n for line in textFile:\n if reHeader.match(line): continue\n maInfo = reInfo.match(line)\n if maInfo:\n infoKey = (int(maInfo.group(1)),maInfo.group(2),maInfo.group(3))\n texts[infoKey] = text = []\n else:\n text.append(line)\n textFile.close()\n #--Strip and clean texts\n updated = []\n unmatched = []\n trimmed = {}\n for infoKey in texts.keys():\n if infoKey not in self.infos:\n unmatched.append(infoKey)\n continue\n text = ''.join(texts[infoKey])\n #--Required Subs\n text = text.strip(' \\r\\n')\n text = reSingleQuote.sub('\\'',text)\n text = reDoubleQuote.sub('\"',text)\n text = reEllipsis.sub('...',text)\n text = reIllegalChars.sub('',text)\n #--Optional subs\n text = reEolSpaces.sub('\\r\\n',text)\n text = reExtraSpaces.sub(' ',text)\n #--Trim?\n if len(text) > 511:\n trimmed[infoKey] = (text[:511],text[511:])\n text = text[:511]\n info = self.infos[infoKey]\n if text != info.text:\n info.text = text\n info.setChanged()\n updated.append(infoKey)\n #--Report\n buff = cStringIO.StringIO()\n for header,infoKeys in ((_('Updated'),updated),(_('Unmatched'),unmatched)):\n if infoKeys:\n buff.write('=== %s\\n' % (header,))\n for infoKey in infoKeys:\n buff.write('* %s\\n' % (infoKey,))\n if trimmed:\n buff.write('=== %s\\n' % (_('Trimmed'),))\n for infoKey,(preTrim,postTrim) in trimmed.items():\n buff.write(`infoKey`+'\\n'+preTrim+'<<<'+postTrim+'\\n\\n')\n return buff.getvalue()",
"def load_text_file(file_name: str) -> str:\r\n try:\r\n with open(file_name, encoding='windows-1251') as file_object:\r\n return file_object.read()\r\n except FileNotFoundError as err:\r\n print(f\"{err}\\n\"\r\n f\"Please make sure the file you are trying to open exists!\")\r\n quit()",
"def os_open_txt_file( cls, txt_file ):\n cls.file_text_editor.os_call( txt_file )",
"def open_text(name):\n\t# Load data for each from from a file (will be part of your data processing script)\n\tinput_file = open(name+ '.pickle','r')\n\ttext = pickle.load(input_file)\n\treturn text",
"def loadtxt(fname, **kwargs):\n\n return call_origin(numpy.loadtxt, fname, **kwargs)",
"def loadCodeFromFile():\n global notes_text\n\n notes_text.delete(\"1.0\", END)\n load_interface = Tk()\n load_interface.filename = filedialog.askopenfilename( initialdir = (\"../Templates\") ,title = \"Select file\",filetypes = ((\"Bit Tune File\",\"*.btu\"),(\"All Files\",\"*.*\")))\n load_interface.destroy()\n\n with open (load_interface.filename, 'r') as f:\n code = f.read()\n notes_text.insert(END, str(code))",
"def load_text(self, encoding='utf8', encoding_errors='ignore'):\n log.error('Cannot load: %s', self.file_name)"
] | [
"0.6466261",
"0.6265181",
"0.6237055",
"0.62311083",
"0.61719495",
"0.61592454",
"0.6108069",
"0.60587436",
"0.6046481",
"0.5990657",
"0.59851503",
"0.588781",
"0.58720994",
"0.58479667",
"0.58400893",
"0.5826861",
"0.5807017",
"0.57860124",
"0.57841766",
"0.57835245",
"0.5767114",
"0.57588714",
"0.57382435",
"0.57158923",
"0.57080936",
"0.56910944",
"0.5667034",
"0.5665633",
"0.5662631",
"0.5644149"
] | 0.7759995 | 0 |
Context menu for textBrowser. Mark, unmark, copy, select all. | def text_browser_menu(self, position):
if self.ui.textBrowser.toPlainText() == "":
return
cursor = self.ui.textBrowser.cursorForPosition(position)
selected_text = self.ui.textBrowser.textCursor().selectedText()
menu = QtWidgets.QMenu()
menu.setStyleSheet("QMenu {font-size:" + str(self.app.settings['fontsize']) + "pt} ")
action_select_all = None
action_mark = None
action_unmark = None
action_copy = None
if selected_text == "":
action_select_all = menu.addAction(_("Select all"))
if selected_text != "" and not self.is_marked():
action_mark = menu.addAction(_("Mark"))
if selected_text != "":
action_copy = menu.addAction(_("Copy"))
for item in self.case_text:
if item['pos0'] <= cursor.position() <= item['pos1']:
action_unmark = menu.addAction(_("Unmark"))
break
action = menu.exec(self.ui.textBrowser.mapToGlobal(position))
if action is None:
return
if action == action_mark:
self.mark()
if action == action_unmark:
self.unmark(position)
if action == action_copy:
self.copy_selected_text_to_clipboard()
if action == action_select_all:
self.ui.textBrowser.selectAll() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def text_select_context_menu_click (selectedtext) :\n\tsettings = Composition.CompositionManager.Get[Interfaces.Settings.IApplicationSettingsProvider]()\n\tsettings.GlobalExclude.Add(selectedtext)",
"def on_context_menu(self, event):\n self.declaration.context_menu_event()",
"def select_editor_contextual(menuName, onselected=None, *args, **kwargs):\n\n process_all_events()\n windows = Gtk.Window.list_toplevels()\n click_in_text(GPS.EditorBuffer.get().current_view().cursor(), button=3)\n\n def internal_onselected(windows):\n close_contextual(windows)\n process_all_events()\n if onselected:\n onselected(*args, **kwargs)\n\n GLib.idle_add(internal_onselected, windows)\n activate_contextual(windows, menuName)",
"def contextMenuEvent(self, event):\n menu = self.createStandardContextMenu()\n menu.removeAction(menu.actions()[0])\n menu.removeAction(menu.actions()[0])\n menu.insertSeparator(menu.actions()[0])\n menu.insertAction(menu.actions()[0], self.treeSelectAction)\n self.treeSelectAction.setEnabled(self.isChildView and\n len(self.toPlainText().strip()) > 0)\n menu.exec_(event.globalPos())",
"def request_context_menu(self, pos):\n pass",
"def about_right_click(event):\n popup_menu = Menu(tearoff=0)\n popup_menu.add_command(label='Copy')\n\n popup_menu.post(event.x_root, event.y_root)",
"def contextMenuEvent(self, event):\r\n self.selected = self.selectionModel().selection().indexes()\r\n #define context menu items\r\n menu = QtGui.QMenu()\r\n opis = QtGui.QAction('opis', self)\r\n menu.addAction(opis)\r\n #connect context menu items\r\n opis.triggered.connect(self.emit_izabrani_fault)\r\n #display context menu\r\n menu.exec_(self.mapToGlobal(event.pos()))",
"def on_menuitem_select(self, id):\n\t\tif id == _('Copy'):\n\t\t\tself.clipboard.clear()\n\t\t\tself.clipboard.set_with_data( [('x-special/gnome-copied-files', 0, 0),('text/uri-list',0,0)],self.clipboardGet, self._clipboardClearFuncCb, self.cp)\n\t\t\t\n\t\telif id == _('Delete'):\n\t\t\tif screenlets.show_question(self,_(\"Delete \") + self.cp + ' ?'):\n\t\t\t\tos.system('rm -rf ' + chr(34) + self.cp.replace('file://','') + chr(34))\n\t\telif id == _('Paste'):\n\t\t\tfiles = self.clipboard.wait_for_text().split('\\n')\n\t\t\tfor f in files:\n\t\t\t\tif os.path.exists(str(f).replace('file://','')):\n\t\t\t\t\tos.system('cp ' + chr(34) + str(f).replace('file://','') + chr(34)+ ' ' + chr(34) + self.folder_path_current+ chr(34))\n\t\t\t\t\t\n\t\telif id == \"export\":\n\t\t\tself.show_edit_dialog()\n\n\t\telif id == \"lock_align_all\":\n\t\t\tfor fv in folderview_list:\n\t\t\t\tfv.lock_position = True\n\t\t\t\tfv.window_auto_align = True\n\n\t\telif id == \"unlock_all\":\n\t\t\tfor fv in folderview_list:\n\t\t\t\tfv.lock_position = False\n\t\t\t\tfv.window_auto_align = False\n\n\t\tif id in self.apps_list:\n\t\t\tos.system(self.apps_execs[id]+ ' &')\n\t\tself.clicked = False\n\t\tself.on_mouse_leave(0)",
"def contextMenuEvent(self, event):\n menu = QtWidgets.QMenu(self)\n\n menu.addAction(cuegui.Action.create(self,\n \"Select matching jobs (Enter)\",\n \"Select matching jobs\",\n self._actionSelect))\n\n menu.addAction(cuegui.Action.create(self,\n \"Clear\",\n \"Clear text\",\n self.actionClear))\n\n menu.exec_(QtCore.QPoint(event.globalX(), event.globalY()))",
"def exec_selected_text(self):\r\n editortabwidget = self.get_current_editortabwidget()\r\n editortabwidget.exec_selected_text()",
"def __contextMenuRequested(self, pos):\n menu = QMenu(self)\n \n menu.addAction(self.tr(\"Open\"), self.__openFile)\n menu.addAction(self.tr(\"Copy Path to Clipboard\"),\n self.__copyToClipboard)\n \n menu.exec_(QCursor.pos())",
"def aboutToShowContextMenuEvent(self):\n\t\tpass",
"def contextMenuEvent(self,event):\n\t\tmenu=self.createStandardContextMenu ()\n\t\tmenu.addAction(self.actionLaunchCharWidgetTable)\n\t\tmenu.exec_(event.globalPos())",
"def addContextMenuItems(*args):",
"def set_mouse_selection(self, item, mpos):\r\n if item.is_mouse_selection(mpos):\r\n item.set_font_color(RED)\r\n item.set_italic(True)\r\n else:\r\n item.set_font_color(WHITE)\r\n item.set_italic(False)",
"def widget_ctx_menu(self):\n def toggle_step():\n self.showStepExponent = not self.showStepExponent\n\n def toggle_write():\n self.writeOnPress = not self.writeOnPress\n\n menu = self.lineEdit().createStandardContextMenu()\n menu.addSeparator()\n ac = menu.addAction('Toggle Show Step Size')\n ac.triggered.connect(toggle_step)\n\n ac_write = menu.addAction('Toggle Write On Press')\n ac_write.triggered.connect(toggle_write)\n\n return menu",
"def set_mouse_selection(self, item, mpos):\r\n\t\tif item.is_mouse_selection(mpos):\r\n\t\t\titem.set_font_color(YELLOW)\r\n\t\t\titem.set_italic(True)\r\n\t\telse:\r\n\t\t\titem.set_font_color(WHITE)\r\n\t\t\titem.set_italic(False)",
"def __toggleBookmark(self):\n self.activeWindow().menuToggleBookmark()",
"def onContextMenu(self, event):\n # Skipping the save state functionality for release 0.9.0\n # return\n pos = event.GetPosition()\n pos = self.ScreenToClient(pos)\n self.PopupMenu(self.popUpMenu, pos)",
"def _(event):\n # Take the current cursor position as the start of this selection.\n buff = event.current_buffer\n if buff.text:\n buff.start_selection(selection_type=SelectionType.CHARACTERS)",
"def selChgCmd(self, *args):\n self.tDisp.selId = self.tDisp.selection()\n self.tDisp.selIdx = self.tDisp.index(self.tDisp.selId)\n self.event_generate('<<SelItem>>', x=self.tDisp.selIdx)",
"def request_context_menu(self, pos):\n super(ItemListView, self).request_context_menu(pos)\n self.get_selected()\n self.manage_actions()\n self.display_context_menu(pos)",
"def menu_keyboard_shortcuts(self, event=None):\n self.link('http://pythonide.stani.be/manual/html/manual12.html')",
"def handle_right_click(self, event):\n c=self.seqframe\n if 'textlabel' in c.gettags(CURRENT):\n self.currobjs = c.find_withtag(CURRENT)\n return",
"def _select_and_cut_text(wordCount):\n clipboard = Clipboard()\n clipboard.set_system_text('')\n Key('cs-left/3:%s/10, c-x/10' % wordCount).execute()\n return clipboard.get_system_text()",
"def _event(self, event):\n self._event_select_text(event)",
"def context_menu(self, treeview, position):\n\n all_item = get_current_item(self,treeview,single=False)\n\n if len(all_item) == 1:\n\n item = all_item[0]\n\n list_operations = ['Print attrs','-','Plot Hist', 'Plot 2D']\n action,actions = get_actions(treeview,position,list_operations)\n\n if action == actions['Print attrs']:\n send_dict_to_console(self,item,treeview)\n #print_attributes(self,item,treeview)\n\n if action == actions['Plot Hist']:\n plot_histogram(self,item,treeview)\n\n if action == actions['Plot 2D']:\n plot2d(self,item,treeview)\n\n elif len(all_item) == 2:\n\n item0,item1 = all_item\n\n list_operations = ['Plot Scatter','Plot Line']\n action,actions = get_actions(treeview,position,list_operations)\n\n if action == actions['Plot Scatter']:\n plot1D(self,item0,item1,treeview,plot='scatter')\n\n if action == actions['Plot Line']:\n plot1D(self,item0,item1,treeview,plot='line')",
"def showContextMenu(self, event):\r\n menu = wx.Menu()\r\n menu.Append(wx.ID_OPEN, \"Open...\\tCtrl+O\", \"Open an image...\", )\r\n menu.Append(wx.ID_SAVE, \"Save\\tCtrl+S\", \"Save the cropped image...\")\r\n menu.AppendSeparator()\r\n menu.Append(wx.ID_ABOUT, \"About\\tCtrl+I\", \"About this program...\")\r\n\r\n menu.Bind(wx.EVT_MENU, self.showOpenImageDialog, id=wx.ID_OPEN)\r\n menu.Bind(wx.EVT_MENU, self.saveImage, id=wx.ID_SAVE)\r\n menu.Bind(wx.EVT_MENU, self.showAboutDialog, id=wx.ID_ABOUT)\r\n\r\n self.PopupMenu(menu, event.GetPosition())\r\n menu.Destroy()",
"def txt_area_popup_menu(self, event=None):\n try:\n text_area = self.get_current()\n state1 = 'disabled'\n state2 = 'disabled'\n if text_area.modified:\n state1 = 'normal'\n if text_area.count('sel.first', 'sel.last', 'chars')[0] != 'None':\n state2 = 'normal'\n self.popup_menu.entryconfigure(0, state=state1)\n self.popup_menu.entryconfigure(1, state=state1)\n self.popup_menu.entryconfigure(2, state=state2)\n self.popup_menu.entryconfigure(3, state=state2)\n self.popup_menu.post(event.x_root, event.y_root)\n finally:\n self.popup_menu.grab_release()",
"def _context_menu_make(self, pos):\n format = self._control.cursorForPosition(pos).charFormat()\n name = format.stringProperty(QtGui.QTextFormat.ImageName)\n if name:\n menu = QtGui.QMenu()\n\n menu.addAction('Copy Image', lambda: self._copy_image(name))\n menu.addAction('Save Image As...', lambda: self._save_image(name))\n menu.addSeparator()\n\n svg = self._name_to_svg_map.get(name, None)\n if svg is not None:\n menu.addSeparator()\n menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))\n menu.addAction('Save SVG As...',\n lambda: save_svg(svg, self._control))\n else:\n menu = super(RichJupyterWidget, self)._context_menu_make(pos)\n return menu"
] | [
"0.69107985",
"0.6675353",
"0.6581614",
"0.6338795",
"0.6309858",
"0.62430674",
"0.6161751",
"0.61062753",
"0.6101523",
"0.60749346",
"0.60675216",
"0.6040953",
"0.59743303",
"0.5966907",
"0.592536",
"0.5858884",
"0.5840029",
"0.583531",
"0.57941383",
"0.57554394",
"0.57290405",
"0.5713741",
"0.5674589",
"0.56739795",
"0.56711346",
"0.5647526",
"0.56403446",
"0.56317633",
"0.56158686",
"0.56106097"
] | 0.75562125 | 0 |
Check current text selection and return False if not marked and True if marked. | def is_marked(self):
pos0 = self.ui.textBrowser.textCursor().selectionStart()
pos1 = self.ui.textBrowser.textCursor().selectionEnd()
for c in self.case_text:
if c['pos0'] <= pos0 <= c['pos1']:
return True
if c['pos0'] <= pos1 <= c['pos1']:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hasSelectedText(self):\n return self.textCursor().hasSelection()",
"def HasSelection(self):\n sel = super(EditraBaseStc, self).GetSelection()\n return sel[0] != sel[1]",
"def IsSelected(self):\r\n\r\n return self._hasHilight != 0",
"def _can_add_text(self):\n return self.textCursor().selectionStart() >= self._prev_input_indexes[-1]",
"def containsCursor(self, textCursor):\n return self.cursor.selectionStart() <= textCursor.position() < \\\n self.cursor.selectionEnd()",
"def HasMultilineSelection(self):\n bMulti = False\n sel = super(EditraBaseStc, self).GetSelection()\n if sel[0] != sel[1]:\n sline = self.LineFromPosition(sel[0])\n eline = self.LineFromPosition(sel[1])\n bMulti = sline != eline\n return bMulti",
"def text_editor():\n return True",
"def is_selected(self) -> bool:\r\n return self.selected",
"def requires_selection(self) -> bool:\n return True",
"def is_selected(self) -> bool:\n return self.proto.is_selected",
"def is_on(self):\n return self._cur != -1",
"def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False",
"def is_selected(self):\n return self._element_call(lambda: self.el.is_selected)",
"def is_selected(self):\n return self._selected",
"def is_selected(self):\n return self._selected",
"def is_text( self ):\n return self.get_main_type() == 'text'",
"def is_selected(self):\n return self.container['is_selected']",
"def getMarked(self):\n if not self.selection.isSelection():\n return u\"\"\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx1 = sm1[1]\n cx2 = sm2[1]\n if (w1 == w2):\n return w1.string[cx1:cx2]\n # Get the word fragments at the beginning and end of the selection\n snip1 = w1.string[cx1:]\n snip2 = w2.string[:cx2]\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n # Start the text string with the format of the first line\n text = tl1.para.getFormat() + snip1\n # then get all intervening words\n if (tl1 == tl2): # only 1 line is involved\n # get words from wx1+1 to wx2-1 (incl.)\n for w in tl1.twords[wx1+1:wx2]:\n text += u\" \" + w.string\n ch = u\" \"\n\n else: # deletion block covers >1 line\n # get words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n text += u\" \" + w.string\n # get all the intervening lines\n while True:\n para = tl1.para\n tl1 = self.rsubject.nextLine(tl1)\n if (tl1.para == para):\n text += u\" \"\n else:\n text += u\"\\n\" + tl1.para.getFormat()\n if (tl1 == tl2): break\n text += tl1.getText()\n\n ch = u\"\"\n # Add the remaining words in tl2 up to w2-1\n for w in tl2.twords[:wx2]:\n text += ch + w.string\n ch = u\" \"\n\n # Add the fragment of the last marked word\n return text + ch + snip2",
"def test__markMarkdown_textOnly2(self):\n self._degrotesque._restoreDefaultElementsToSkip()\n assert(self._degrotesque._markMarkdown(\"a\")==\"0\")",
"def autoselect(self):\n # type: () -> bool\n return self._autoselect",
"def CanCopy(self):\n return self.HasSelection()",
"def keyboard_on_key_down(self, window, keycode, text, modifiers):\n if super(SelectableLayout, self).keyboard_on_key_down(\n window,\n keycode,\n text,\n modifiers,\n ):\n return True\n if self.select_with_key_down(window, keycode, text, modifiers):\n return True\n return False",
"def selected(self):\n\n return self.element().is_selected() if self.exists() else False",
"def isSetText(self):\n return _libsbml.TextGlyph_isSetText(self)",
"def matches(self, text):\n return text == self.command",
"def is_selected_main(self, xpos, ypos):\n\n if self.x < xpos < self.x + self.width and self.y < ypos < self.y + self.height:\n return True\n else:\n return False",
"def selectable(cls):\n return True",
"def check(self, text):\n\n try:\n console.print(self.parser.parse(text)[\"result\"][1:], style=\"green\")\n return True\n\n except:\n console.print(\"An error has occurred while trying to parse the typo!\", style=\"red\")\n return False",
"def is_selected(self, selector):\n el = self.locate_element(selector)\n return el.is_selected()",
"def check_marked_cell(table, row, col):\n\n\tif table.cell(row, col).paragraphs[0].runs[0].text == \"<>\": # если в указанной ячейке только маркер\n\t\treturn True\n\telse: \n\t\treturn False"
] | [
"0.7856161",
"0.69251645",
"0.67446756",
"0.66529745",
"0.63673097",
"0.63556296",
"0.62504935",
"0.62277675",
"0.6220633",
"0.61411035",
"0.6019499",
"0.59944403",
"0.59551316",
"0.59294784",
"0.59294784",
"0.5870629",
"0.5837775",
"0.5812784",
"0.5779514",
"0.57756186",
"0.5762915",
"0.57511663",
"0.57477045",
"0.5743805",
"0.5740094",
"0.5735407",
"0.5719685",
"0.56986016",
"0.56978744",
"0.5691852"
] | 0.7682713 | 1 |
Remove all text highlighting from current file. | def unlight(self):
if self.selected_text_file is None:
return
if self.selected_text_file[FULLTEXT] is None:
return
cursor = self.ui.textBrowser.textCursor()
try:
cursor.setPosition(0, QtGui.QTextCursor.MoveMode.MoveAnchor)
cursor.setPosition(len(self.selected_text_file[FULLTEXT]) - 1, QtGui.QTextCursor.MoveMode.KeepAnchor)
cursor.setCharFormat(QtGui.QTextCharFormat())
except Exception as e:
logger.debug((str(e) + "\n unlight, text length" + str(len(self.ui.textBrowser.toPlainText())))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_highlighting(self):\n for match in vim.eval('getmatches()'):\n if match['group'] == 'PSearchMatches':\n vim.command(\"call matchdelete({0})\".format(match['id']))",
"def __unhighlight(self):\n self.unhighlight()",
"def clear_highlight(self):\n core = cutter.core()\n highlighter = core.getBBHighlighter()\n for bblock in self.config['bb_hits']:\n highlighter.clear(bblock)",
"def remove_highlights(bv=None):\n if no_coverage_warn():\n return\n if bv is None:\n bv = gbv\n clear_highlights(covdb.total_coverage, bv)\n log.log_info(\"Highlights cleared.\")",
"def unhighlight(self, current=False):\n if current:\n if self.currentEditor is not None:\n self.currentEditor.highlight()\n else:\n for editor in self.editors:\n editor.highlight()",
"def no_highlight(): #py:no_highlight\n RUR._no_highlight_()",
"def restore_default_highlights(bv=None):\n highlight_set(covdb.total_coverage)\n log.log_info(\"Default highlight colors restored\")",
"def erase(self):\n self.view.erase_status('00_git_gutter')",
"def smart_highlight_off(self, buf):\n start, end = buf.get_bounds()\n if (self.update_colors or\n buf.get_tag_table().lookup('smart_highlight') == None):\n self.fill_tag_table(buf)\n buf.remove_tag_by_name('smart_highlight', start, end)",
"def _reset(self):\n\t\tself._style = TextStyle()",
"def clear_highlights(addr_set, bv):\n for addr in addr_set:\n blocks = bv.get_basic_blocks_at(addr)\n for block in blocks:\n block.set_user_highlight(HighlightStandardColor.NoHighlightColor)",
"def exit(self):\n if self.currentEditor is not None:\n self.currentEditor.highlight()\n self.currentEditor = None\n \n for editor in self.editors:\n editor.refreshCoverageAnnotations()\n \n self.__setSbFile()",
"def clear(self):\n self._editor.clear()",
"def clear_all(cls):\n del cls.text_labels[:]",
"def unmark(self, position):\n\n if self.selected_text_file is None:\n return\n if len(self.case_text) == 0:\n return\n cursor = self.ui.textBrowser.cursorForPosition(position)\n self.ui.textBrowser.setTextCursor(cursor)\n\n location = self.ui.textBrowser.textCursor().selectionStart()\n unmarked = None\n for item in self.case_text:\n if item['pos0'] <= location <= item['pos1']:\n unmarked = item\n if unmarked is None:\n return\n\n # Delete from database, remove from case_text and update gui\n cur = self.app.conn.cursor()\n cur.execute(\"delete from case_text where fid=? and caseid=? and pos0=? and pos1=?\",\n (unmarked['fid'], unmarked['caseid'], unmarked['pos0'], unmarked['pos1']))\n self.app.conn.commit()\n if unmarked in self.case_text:\n self.case_text.remove(unmarked)\n self.unlight()\n self.highlight()\n # The file may be assigned Yes in the table widget but should be empty\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False",
"def clear(self):\n lines = self._lines\n image, bkg_image = self.image, self._image\n for line in lines: line.clear(image, bkg_image) #prej bkg_img\n self._cursor = 0",
"def __clearAllSyntaxErrors(self):\n for editor in self.editors:\n editor.clearSyntaxError()",
"def clearMouseSelection(self):\n pass",
"def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"",
"def clear_text(self):\n self.textBrowser.clear()\n #self.ser.flushInput()\n #self.clean_graph()",
"def deselectall(self):\n if self.selection:\n for node in self.selection[:]: node.deselect()",
"def _remove_background_colors(text) -> StyledStr:\n return _remove_regex(BACKGROUND_COLORS_REGEX, text)",
"def _remove_text_colors(text) -> StyledStr:\n return _remove_regex(FOREGROUND_COLORS_REGEX, text)",
"def removeOwnPunctuation(self):\n\t\tself.textFile = self.removePunctuation(self.open(self.filePath)).split()",
"def highlightCode(self, _event=None):\n count = 0\n if self.text.tag_ranges('sel'):\n self.text.tag_add('color' + str(count), tk.SEL_FIRST, tk.SEL_LAST)\n self.text.tag_configure('color' + str(count), foreground='black', background='yellow')\n count += 1\n else:\n # Do this if you want to overwrite all selection colors when you change color without selection\n # for tag in text.tag_names():\n # text.tag_delete(tag)\n self.text.config(foreground='yellow')\n\n fileContainingText = open(newTextFile, \"a\")\n\n hText = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n fileContainingText.write(hText)",
"def highlightingTextInFile():\n savingFilePDF = re.sub('\\t', '', item_text[0] + \".pdf\")\n doc = fitz.open(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n page = doc[0]\n\n with open(newTextFile, \"r\") as file2:\n time.sleep(0.5)\n text1 = file2.read()\n\n # Search for the text in the PDF in order to highlight it\n text_instances = page.searchFor(text1, hit_max=200)\n\n # Loop though the text and add highlight to the text in the HighlightedText.txt file\n for inst in text_instances:\n print(inst, type(inst))\n page.addHighlightAnnot(inst)\n\n try:\n doc.save(gradedFilesFolder + \"\\\\\" + \"Corrected - \" + savingFilePDF,\n garbage=4, deflate=True, clean=True)\n doc.close()\n os.remove(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n\n except RuntimeError as error:\n print(\"PDF file may be open\" + str(error))",
"def highlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n format_ = QtGui.QTextCharFormat()\n cursor = self.ui.textBrowser.textCursor()\n for item in self.case_text:\n try:\n cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.MoveMode.KeepAnchor)\n format_.setFontUnderline(True)\n format_.setUnderlineColor(QtCore.Qt.GlobalColor.red)\n cursor.setCharFormat(format_)\n except Exception as err:\n msg = \"highlight, text length \" + str(len(self.ui.textBrowser.toPlainText()))\n msg += \"\\npos0:\" + str(item['pos0']) + \", pos1:\" + str(item['pos1'])\n msg += \"\\n\" + str(err)\n logger.debug(msg)",
"def clear_specific(self):\n self.specific_file = None\n self.specific_parser = None\n\n self.specific_box.delete(0, END)",
"def cleanup(self):\r\n\r\n # Remove strip from window.\r",
"def remove_color(text):\n color_reg = re.compile(r\"\\033\\[\\d*(;\\d*)*m\")\n return re.sub(color_reg, \"\", text)"
] | [
"0.7206986",
"0.7137766",
"0.7049057",
"0.6944913",
"0.6892949",
"0.6847911",
"0.6067839",
"0.6041372",
"0.58566576",
"0.57954323",
"0.5773145",
"0.5745158",
"0.57420564",
"0.57248193",
"0.57221854",
"0.5707038",
"0.5656613",
"0.5656423",
"0.5652253",
"0.56450975",
"0.56089586",
"0.5608737",
"0.56011784",
"0.55996144",
"0.55850965",
"0.5575229",
"0.55571187",
"0.5551019",
"0.5546072",
"0.5544858"
] | 0.723039 | 0 |
Apply text highlighting to current file. Highlight text of selected case with red underlining. format_.setForeground(QtGui.QColor("990000")) | def highlight(self):
if self.selected_text_file is None:
return
if self.selected_text_file[FULLTEXT] is None:
return
format_ = QtGui.QTextCharFormat()
cursor = self.ui.textBrowser.textCursor()
for item in self.case_text:
try:
cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveMode.MoveAnchor)
cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.MoveMode.KeepAnchor)
format_.setFontUnderline(True)
format_.setUnderlineColor(QtCore.Qt.GlobalColor.red)
cursor.setCharFormat(format_)
except Exception as err:
msg = "highlight, text length " + str(len(self.ui.textBrowser.toPlainText()))
msg += "\npos0:" + str(item['pos0']) + ", pos1:" + str(item['pos1'])
msg += "\n" + str(err)
logger.debug(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def highlightCode(self, _event=None):\n count = 0\n if self.text.tag_ranges('sel'):\n self.text.tag_add('color' + str(count), tk.SEL_FIRST, tk.SEL_LAST)\n self.text.tag_configure('color' + str(count), foreground='black', background='yellow')\n count += 1\n else:\n # Do this if you want to overwrite all selection colors when you change color without selection\n # for tag in text.tag_names():\n # text.tag_delete(tag)\n self.text.config(foreground='yellow')\n\n fileContainingText = open(newTextFile, \"a\")\n\n hText = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n fileContainingText.write(hText)",
"def _conf_highlight(self):\n textbuffer = self.ref_object.get_buffer()\n tag_table = textbuffer.get_tag_table()\n c_tag = tag_table.lookup(\"colored\")\n if not c_tag:\n c_tag = textbuffer.create_tag(\"colored\", foreground=\"#000000\", background=\"#FFFF00\")\n text = textbuffer.get_text(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])\n textbuffer.delete(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])\n for line in re.split(r'\\r\\n|\\r|\\n', text):\n for e in re.compile(\"(\" + self.entry.get_text().lower() + \")\", re.I).split(line):\n if re.search(self.entry.get_text().lower(), e, re.I):\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), e, c_tag)\n else:\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), e)\n textbuffer.insert_with_tags(textbuffer.get_end_iter(), '\\n')",
"def _render_highlighted(\n document_text: str,\n begin: int,\n end: int,\n context_size: int = 0,\n highlight_color: str = \"On_Green\",\n) -> str:\n black_color = _get_text_color_from_list(\"Color_off\")\n return (\n document_text[begin - context_size : begin]\n + _get_text_color_from_list(highlight_color)\n + document_text[begin:end]\n + black_color\n + document_text[end : end + context_size]\n )",
"def paint(self):\n if self.config['colorize']:\n self.highlight()\n else:\n self.clear_highlight()",
"def _define_highlights(self):\n for ansi_code in dict.fromkeys([*self._colors.values(),\n *self._colors_special.values()]):\n code_safe = ansi_code.replace(';', '_')\n fg, bg, special = ansi_to_vim_color(ansi_code)\n args = ''\n if fg is not None:\n args += 'ctermfg=' + fg\n if bg is not None:\n args += ' ctermbg=' + bg\n if special: # special is never None\n args += ' cterm=' + special\n if args:\n cmd = f'hi color{code_safe} {args}'\n logger.debug(cmd)\n self._vim.command(cmd)",
"def _highlight(self, source):\n if not self.hasmarkup:\n return source\n try:\n from pygments.formatters.terminal import TerminalFormatter\n from pygments.lexers.python import PythonLexer\n from pygments import highlight\n except ImportError:\n return source\n else:\n return highlight(source, PythonLexer(), TerminalFormatter(bg=\"dark\"))",
"def open_highlight(self, pad, lang='c++'):\n pad.tag_configure('default', foreground='#e0115f')\n pad.tag_configure('loops', foreground='green')\n pad.tag_configure('P_datatypes', foreground='aqua')\n pad.tag_configure('quotes', foreground='gold')\n pad.tag_configure('A_datatypes', foreground='orange')\n for i in keywords[lang]:\n for j in keywords[lang][i]:\n self.highlight_pattern(pad, j, i)\n\n pattern = '\"([A-Za-z0-9_\\./\\\\-]*)\"'\n self.highlight_pattern(pad, pattern, 'quotes', '1.0', 'end', True)\n pattern = \"'([A-Za-z0-9_\\./\\\\-]*)'\"\n self.highlight_pattern(pad, pattern, 'quotes', '1.0', 'end', True)",
"def highlightBlock(self, text):\n for format_, expression in self.rules:\n # get first match\n index = expression.indexIn(text)\n while index >= 0:\n length = expression.matchedLength()\n self.setFormat(index, length, format_)\n # jump to next match\n index = expression.indexIn(text, index + length)\n self.setCurrentBlockState(0)",
"def format_text(self):\n for line, _ in enumerate(self.readlines()[:-1]):\n self.root.colour_line(line + 1)",
"def highlight_source(source):\n return highlight(source, PythonLexer(), HtmlFormatter())",
"def highlightingTextInFile():\n savingFilePDF = re.sub('\\t', '', item_text[0] + \".pdf\")\n doc = fitz.open(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n page = doc[0]\n\n with open(newTextFile, \"r\") as file2:\n time.sleep(0.5)\n text1 = file2.read()\n\n # Search for the text in the PDF in order to highlight it\n text_instances = page.searchFor(text1, hit_max=200)\n\n # Loop though the text and add highlight to the text in the HighlightedText.txt file\n for inst in text_instances:\n print(inst, type(inst))\n page.addHighlightAnnot(inst)\n\n try:\n doc.save(gradedFilesFolder + \"\\\\\" + \"Corrected - \" + savingFilePDF,\n garbage=4, deflate=True, clean=True)\n doc.close()\n os.remove(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n\n except RuntimeError as error:\n print(\"PDF file may be open\" + str(error))",
"def set_text_f(self, format, *args):\n self._text.set(format % args)\n self.change_bg(\"green\")\n self._label.update_idletasks()",
"def highlightBlock(self, text):\n # Do other syntax formatting\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = len(expression.cap(nth))\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)",
"def highlightBlock(self, text):\n # Do other syntax formatting\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n format = self.styles[format]\n\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = len(expression.cap(nth))\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)\n\n # Do multi-line strings\n in_multiline = self.match_multiline(text, *self.tri_single)\n if not in_multiline:\n in_multiline = self.match_multiline(text, *self.tri_double)",
"def highlightBlock(self, text: str) -> None:\n # Do other syntax formatting\n for expression, nth, fmt in self.rules:\n index = expression.indexIn(text, 0)\n\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = len(expression.cap(nth))\n self.setFormat(index, length, fmt)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)",
"def highlight(string: str) -> str:\n return text_color(string, \"cyan\")",
"def highlight(self):\n \n try:\n code = pygments.highlight(self.code, self.lexer, self.formatter)\n highlighted = '\\n'.join(['<div class=\\'highlighted\\'>\\n',\n code,\n '\\n</div>',\n ])\n except Exception as ex:\n _log.error('wp_highlighter.highlight() error:\\n{}'.format(ex))\n highlighted = ''\n return highlighted",
"def highlight(self, on, **kw):\n tag = 'strong'\n if on:\n return self._open(tag, attr={'class': 'highlight'}, allowed_attrs=[], **kw)\n return self._close(tag)",
"def show(self):\n print highlight(self.current_content, self.lexer(), Formatter())",
"def highlightBlock(self, text):\n\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = expression.cap(nth).length()\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n self.setCurrentBlockState(0)",
"def highlight(self, **highlight):\n self._evaluated = False\n self._highlight = highlight\n return self",
"def write_highlighted(self, data):\n for token, text in data:\n for c in text:\n self.write_char(c, token=token)",
"def _change_text_color(text, color_code) -> StyledStr:\n uncolored_fg = _remove_text_colors(text)\n return _apply_ansi_code(color_code, uncolored_fg)",
"def highlight_code(code, lexer=None):\n# See this page for help with colouring: http://pygments.org/docs/tokens/\n#\n#from pygments.styles.default import DefaultStyle\n#from pygments.style import Style\n#from pygments.styles import get_style_by_name\n#from pygments.token import Comment, Keyword, Name, String, Operator, Number\n#from pygments import formatters\n#class SciPyStyle(Style):\n #default_style = \"\"\n #styles = {\n ##Comment: '#888',\n ##Keyword: 'bold #080',\n ##Name: '#080',\n ##Name.Function: '#00F',\n ##Name.Class: 'bold #00F',\n ##String: '#BA2121',\n #Comment: '#008000',\n #Keyword: 'bold #000080',\n #Name: '#000',\n #Name.Builtin: '#407090',\n #Name.Function: 'bold #008080',\n #Name.Class: 'bold #00F',\n #Name.Namespace: '#000000',\n #Number: '#008080',\n #String: '#800080',\n #String.Doc: '#800000',\n #Operator: '#000000',\n #Operator.Word: 'bold #AA22FF',\n #}\n\n#formatter = formatters.HtmlFormatter(style=SciPyStyle)\n#print(formatter.get_style_defs('.highlight'))\n\n if code is None:\n return None\n else:\n lexer_class = lexers.get_lexer_for_mimetype(lexer or 'text/x-python')\n return highlight(code, lexer_class,\n formatters.HtmlFormatter(linenos=True,\n linenostep=1,))",
"def highlightSearch(self, wordList=None, regExpList=None):\n backColor = self.palette().brush(QPalette.Active,\n QPalette.Highlight)\n foreColor = self.palette().brush(QPalette.Active,\n QPalette.HighlightedText)\n if wordList is None:\n wordList = []\n if regExpList is None:\n regExpList = []\n for regExp in regExpList:\n for match in regExp.finditer(self.toPlainText()):\n matchText = match.group()\n if matchText not in wordList:\n wordList.append(matchText)\n selections = []\n for word in wordList:\n while self.find(word):\n extraSel = QTextEdit.ExtraSelection()\n extraSel.cursor = self.textCursor()\n extraSel.format.setBackground(backColor)\n extraSel.format.setForeground(foreColor)\n selections.append(extraSel)\n cursor = QTextCursor(self.document())\n self.setTextCursor(cursor) # reset main cursor/selection\n self.setExtraSelections(selections)",
"def setSpellchecking(self, color=QtCore.Qt.blue):\n self.format.setUnderlineStyle(\n QtGui.QTextCharFormat.SpellCheckUnderline)\n self.format.setUnderlineColor(color)",
"def highlight(self):\n core = cutter.core()\n highlighter = core.getBBHighlighter()\n for bblock in self.config['bb_hits']:\n highlighter.highlight(bblock, self.config['color'])",
"def _colored(self, text, *color_args):\n if self.allow_colors and color_args:\n return termcolor.colored(text, *color_args)\n return text",
"def color_file(syntax_dictionary, theme_dictionary, source_file):\n # Open and read file\n with open(source_file) as file:\n text = file.read()\n\n # looping through the keys in the dictionary\n for key in syntax_dictionary.keys():\n\n # Find the matches in the text\n replacement = re.search(syntax_dictionary[key], text)\n\n # Color matches if there are any\n if replacement is not None:\n color = color_format(replacement.group(), theme_dictionary[key])\n text = re.sub(syntax_dictionary[key], color, text)\n # Print out the results\n print(text)",
"def highlight_code(code, language, style, output_format='html'):\n\n lexer = get_lexer_by_name(language, stripall=True)\n formatter = get_formatter_by_name(output_format, style=style,\n linenos=True, cssclass=\"source\")\n highlighted_code = highlight(code, lexer, formatter)\n\n css_code = formatter.get_style_defs('.highlight')\n\n return css_code, highlighted_code"
] | [
"0.73625904",
"0.7081406",
"0.67791414",
"0.66204315",
"0.65363246",
"0.6494588",
"0.6297588",
"0.62256855",
"0.61767644",
"0.61758226",
"0.61407155",
"0.606125",
"0.6051572",
"0.6048676",
"0.60164833",
"0.5956007",
"0.58919746",
"0.58887446",
"0.5838698",
"0.5821508",
"0.579637",
"0.57857686",
"0.5768383",
"0.57579523",
"0.5757774",
"0.57499963",
"0.5738835",
"0.5719821",
"0.5712651",
"0.5707616"
] | 0.7963435 | 0 |
Mark selected text in file with this case. | def mark(self):
if self.selected_text_file is None:
return
# selectedText = self.textBrowser.textCursor().selectedText()
pos0 = self.ui.textBrowser.textCursor().selectionStart()
pos1 = self.ui.textBrowser.textCursor().selectionEnd()
if pos0 == pos1:
return
# add new item to case_text list and database and update GUI
item = {'caseid': self.case['caseid'],
'fid': self.selected_text_file[ID],
'pos0': pos0, 'pos1': pos1,
'owner': self.app.settings['codername'],
'date': datetime.datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S"),
'memo': ""}
self.case_text.append(item)
self.highlight()
cur = self.app.conn.cursor()
# Check for an existing duplicated linkage first
cur.execute("select * from case_text where caseid=? and fid=? and pos0<=? and pos1>=?",
(item['caseid'], item['fid'], item['pos0'], item['pos1']))
result = cur.fetchall()
if len(result) > 0:
Message(self.app, _("Already Linked"),
_("This segment has already been linked to this case"), "warning").exec()
return
cur.execute("insert into case_text (caseid,fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)",
(
item['caseid'], item['fid'], item['pos0'], item['pos1'], item['owner'], item['date'], item['memo']))
self.app.conn.commit()
# File may not be assigned in the table widget as Yes
self.get_files()
self.fill_table()
self.app.delete_backup = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def highlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n format_ = QtGui.QTextCharFormat()\n cursor = self.ui.textBrowser.textCursor()\n for item in self.case_text:\n try:\n cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.MoveMode.KeepAnchor)\n format_.setFontUnderline(True)\n format_.setUnderlineColor(QtCore.Qt.GlobalColor.red)\n cursor.setCharFormat(format_)\n except Exception as err:\n msg = \"highlight, text length \" + str(len(self.ui.textBrowser.toPlainText()))\n msg += \"\\npos0:\" + str(item['pos0']) + \", pos1:\" + str(item['pos1'])\n msg += \"\\n\" + str(err)\n logger.debug(msg)",
"def automark(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n selected_files = []\n filenames = \"\"\n for r in rows:\n if self.allfiles[r][2] is not None and self.allfiles[r][2] != \"\":\n selected_files.append(self.allfiles[r])\n filenames += self.allfiles[r][1] + \" \"\n ui_se = DialogGetStartAndEndMarks(self.case['name'], filenames)\n ok = ui_se.exec()\n if not ok:\n return\n start_mark = ui_se.get_start_mark()\n end_mark = ui_se.get_end_mark()\n if start_mark == \"\" or end_mark == \"\":\n Message(self.app, _(\"Warning\"), _('Cannot have blank text marks'), \"warning\").exec()\n return\n msg = _(\"Auto assign text to case: \") + self.case['name']\n msg += _(\"\\nUsing \") + start_mark + _(\" and \") + end_mark + _(\"\\nIn files:\\n\")\n msg += filenames\n warning_msg = \"\"\n already_assigned = \"\"\n entries = 0\n cur = self.app.conn.cursor()\n for f in selected_files:\n cur.execute(\"select name, id, fulltext, memo, owner, date from source where id=?\",\n [f[0]])\n currentfile = cur.fetchone()\n text = currentfile[2]\n text_starts = [match.start() for match in re.finditer(re.escape(start_mark), text)]\n text_ends = [match.start() for match in re.finditer(re.escape(end_mark), text)]\n # Add new code linkage items to database\n already_assigned = \"\"\n for start_pos in text_starts:\n text_end_iterator = 0\n try:\n while start_pos >= text_ends[text_end_iterator]:\n text_end_iterator += 1\n except IndexError:\n text_end_iterator = -1\n warning_msg += _(\"Auto assign. Could not find an end mark: \") + f[1] + \" \" + end_mark + \"\\n\"\n if text_end_iterator >= 0:\n pos1 = text_ends[text_end_iterator]\n item = {'caseid': self.case['caseid'], 'fid': f[0],\n 'pos0': start_pos, 'pos1': pos1,\n 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().astimezone().strftime(\"%Y-%m-%d %H:%M:%S\"), 'memo': \"\"}\n # Check if already assigned to case_text\n sql = \"select id from case_text where caseid=? and fid=? and pos0=? and pos1=?\"\n cur.execute(sql, [item['caseid'], item['fid'], item['pos0'], item['pos1']])\n res = cur.fetchone()\n if res is None:\n sql = \"insert into case_text (caseid,fid,pos0,pos1,owner,date,memo) values(?,?,?,?,?,?,?)\"\n cur.execute(sql, (item['caseid'], item['fid'], item['pos0'], item['pos1'],\n item['owner'], item['date'], item['memo']))\n entries += 1\n self.app.conn.commit()\n else:\n already_assigned = _(\"\\nAlready assigned.\")\n # Update messages and table widget\n self.get_files()\n self.fill_table()\n # Text file is loaded in browser then update the highlights\n self.load_case_text()\n self.highlight()\n msg += \"\\n\" + str(entries) + _(\" sections found.\")\n Message(self.app, _(\"File added to case\"), msg + \"\\n\" + warning_msg + \"\\n\" + already_assigned).exec()\n self.parent_textEdit.append(msg)\n self.parent_textEdit.append(warning_msg)\n self.app.delete_backup = False",
"def mark_selected():\n (buffer, start, end) = get_selection_or_word()\n selection = buffer.get_chars(start, end)\n\n if selection != \"\":\n for m in buffer.file().search(selection, regexp=False):\n GPS.Locations.add(\"Local occurrences\",\n m.file(), m.line(), m.column(),\n selection,\n highlight=\"dynamic occurrences\",\n length=len(selection))",
"def unmark(self, position):\n\n if self.selected_text_file is None:\n return\n if len(self.case_text) == 0:\n return\n cursor = self.ui.textBrowser.cursorForPosition(position)\n self.ui.textBrowser.setTextCursor(cursor)\n\n location = self.ui.textBrowser.textCursor().selectionStart()\n unmarked = None\n for item in self.case_text:\n if item['pos0'] <= location <= item['pos1']:\n unmarked = item\n if unmarked is None:\n return\n\n # Delete from database, remove from case_text and update gui\n cur = self.app.conn.cursor()\n cur.execute(\"delete from case_text where fid=? and caseid=? and pos0=? and pos1=?\",\n (unmarked['fid'], unmarked['caseid'], unmarked['pos0'], unmarked['pos1']))\n self.app.conn.commit()\n if unmarked in self.case_text:\n self.case_text.remove(unmarked)\n self.unlight()\n self.highlight()\n # The file may be assigned Yes in the table widget but should be empty\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False",
"def highlightingTextInFile():\n savingFilePDF = re.sub('\\t', '', item_text[0] + \".pdf\")\n doc = fitz.open(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n page = doc[0]\n\n with open(newTextFile, \"r\") as file2:\n time.sleep(0.5)\n text1 = file2.read()\n\n # Search for the text in the PDF in order to highlight it\n text_instances = page.searchFor(text1, hit_max=200)\n\n # Loop though the text and add highlight to the text in the HighlightedText.txt file\n for inst in text_instances:\n print(inst, type(inst))\n page.addHighlightAnnot(inst)\n\n try:\n doc.save(gradedFilesFolder + \"\\\\\" + \"Corrected - \" + savingFilePDF,\n garbage=4, deflate=True, clean=True)\n doc.close()\n os.remove(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n\n except RuntimeError as error:\n print(\"PDF file may be open\" + str(error))",
"def highlightCode(self, _event=None):\n count = 0\n if self.text.tag_ranges('sel'):\n self.text.tag_add('color' + str(count), tk.SEL_FIRST, tk.SEL_LAST)\n self.text.tag_configure('color' + str(count), foreground='black', background='yellow')\n count += 1\n else:\n # Do this if you want to overwrite all selection colors when you change color without selection\n # for tag in text.tag_names():\n # text.tag_delete(tag)\n self.text.config(foreground='yellow')\n\n fileContainingText = open(newTextFile, \"a\")\n\n hText = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n fileContainingText.write(hText)",
"def set_text(self):\n\n if not self.text and len(self.get_files()) > 0:\n self.text = self.files[0].get_title()\n # if \"_\" in str(self.text):\n if re.match(\"[0-9]_[0-9]\", self.text) is not None:\n self.text = self.files[0].get_parent()[\"title\"]\n else:\n try: \n int(self.text)\n # is a simple int\n if int(self.text) > 20:\n self.text = self.files[0].get_parent()[\"title\"]\n except Exception as e:\n # not a simple int\n # do nothing cause probably set already\n pass\n self.text = self.text.replace(\"_\", \" \")\n self.set_keywords()",
"def visit_text(self, sytext):\n self.current.update(sytext)",
"def visit_text(self, sytext):\n self.current.update(sytext)",
"def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)",
"def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)",
"def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1",
"def set_alien_file(self):\n self.alien_filename = select_file(self.alien_filename)\n if self.alien_filename is not None:\n self.alien_file.setStyleSheet(\"Text-align:left\")\n self.alien_file.setText(self.alien_filename)\n else:\n self.alien_file.setText('')",
"def __bookmarkSelected(self, act):\n bmList = act.data()\n filename = bmList[0]\n line = bmList[1]\n self.openSourceFile(filename, line)",
"def save_as_file(self, event=None):\n\n file = fd.asksaveasfile(title=\"Save as\", defaultextension=\".txt\",\n filetypes=[(\"Text(default)\", \"*.txt\"), (\"Python\", \"*.py\"), (\"Java\", \"*.java\"),\n (\"All files\", \"*.*\")])\n if file == None:\n return\n else:\n # self.file_list.append(file.name)\n file.write(self.get_current().get('1.0', 'end-1c'))\n file.close()\n self.add_tab(file=file.name, open_file=1)\n from syntax_highlight import Highlighting\n Highlighting().highlight2()",
"def set_text(self):\n pass",
"def setText(self, path, toNative=True):\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n self._setEditorText(path)\n else:\n if toNative:\n path = Utilities.toNativeSeparators(path)\n self._setEditorText(path)\n if self._completer:\n self._completer.setRootPath(path)",
"def _select_file(self, change):\n selected_file = change.get(\"new\")\n self.file = str(Path(self.current_folder).joinpath(selected_file).resolve())",
"def row_selection_changed(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n self.ui.textBrowser.setText(\"\")\n self.selected_text_file = None\n index = rows[0]\n # A fulltext source is displayed if fulltext is present\n # If the mediapath is None, this represents an A/V transcribed file\n self.ui.label_file.setText(_(\"Displayed file: \") + self.allfiles[index][NAME])\n if self.allfiles[index][FULLTEXT] != \"\" and self.allfiles[index][FULLTEXT] is not None:\n self.selected_text_file = self.allfiles[index]\n self.ui.textBrowser.setText(self.allfiles[index][FULLTEXT])\n self.load_case_text()\n self.unlight()\n self.highlight()\n return",
"def fileprefixChanged(self, text):\n self.fileprefixText = str(text)\n\n self.setFirstFileLabel()",
"def save_file(self, event=None):\n try:\n text_area = self.get_current()\n except:\n print('error at save_file')\n return\n current_tab = self.nb.index('current')\n from syntax_highlight import Highlighting\n if self.file_list[current_tab] == None:\n file = fd.asksaveasfile(title=\"Save file\", defaultextension=\".txt\",\n filetypes=[(\"Python(default)\", \"*.py\"), (\"Text\", \"*.txt\"),\n (\"Java\", \"*.java\"), (\"JavaScript\", \"*.js\"),\n (\"HTML\", \"*.html\"), (\"CSS\", \"*.css\"),\n (\"All files\", \"*.*\")])\n if file is None:\n return\n else:\n self.file_list[current_tab] = file.name\n # file = open(self.file_list[current_tab], mode='w+')\n file.write(text_area.get(\"1.0\", \"end-1c\"))\n self.rename_tab(os.path.basename(self.file_list[current_tab]))\n file.close()\n print(\"save_file() first time\")\n text_area.edit_modified(arg=False)\n # from syntax_highlight import Highlighting\n Highlighting().highlight2()\n return True\n else:\n file = open(self.file_list[current_tab], \"w+\")\n file.write(text_area.get(\"1.0\", \"end-1c\"))\n file.close()\n print(\"save_file() already\")\n print(self.file_list[current_tab], 'saved')\n text_area.edit_modified(arg=False)\n Highlighting().highlight2()\n return True",
"def find_file(self):\n selected_file = tk.filedialog.askopenfilename(initialdir='/', title='Select File',\n filetypes=(('txt Files', '*.txt'), ('All Files', '*.*')))\n self.markov_chain.add_file(selected_file)",
"def SetSelection(self, start, end):\n # STC HELL - some methods require UTF-8 offsets while others work\n # with Unicode...\n # Calculate UTF-8 offsets in buffer\n unicode_txt = self.GetText()\n if start != 0:\n start = len(ed_txt.EncodeString(unicode_txt[0:start], 'utf-8'))\n if end != 0:\n end = len(ed_txt.EncodeString(unicode_txt[0:end], 'utf-8'))\n del unicode_txt\n super(EditraBaseStc, self).SetSelection(start, end)",
"def set_selected_file(self, filename, options=None):\n self._add_recent(filename, options)\n self._invalidate()",
"def _mark(self, query, doc):\n raise NotImplementedError()",
"def handle_text_search(self, text):\n log.debug(\"Handling text search: %s\", text)\n\n self.current_selected = 0\n self._refresh()",
"def SetSelectedFont(self, font):\r\n\r\n self._art.SetSelectedFont(font)",
"def SetActiveFontFile(self,path):\n\t\tself.acad.ActiveDocument.ActiveTextStyle.fontFile=path",
"def fileprefixChanged(self, text):\n self.fileprefixText = str(text)",
"def WriteText( self, text ) :\n # Always adjust the insertion point BEFORE the insertion.\n self.folderTxtCtl.SetInsertionPointEnd()\n self.folderTxtCtl.WriteText( text )"
] | [
"0.66256595",
"0.6417178",
"0.63301915",
"0.60256416",
"0.5986319",
"0.59574735",
"0.5922415",
"0.5900653",
"0.5900653",
"0.58704287",
"0.58704287",
"0.5820807",
"0.5796124",
"0.5772516",
"0.57527184",
"0.57029754",
"0.56840825",
"0.5661256",
"0.5641625",
"0.56144583",
"0.5593301",
"0.55891424",
"0.5587493",
"0.55758923",
"0.55654496",
"0.5543085",
"0.5541362",
"0.55224067",
"0.5522313",
"0.5511583"
] | 0.72556025 | 0 |
Remove case marking from selected text in selected file. | def unmark(self, position):
if self.selected_text_file is None:
return
if len(self.case_text) == 0:
return
cursor = self.ui.textBrowser.cursorForPosition(position)
self.ui.textBrowser.setTextCursor(cursor)
location = self.ui.textBrowser.textCursor().selectionStart()
unmarked = None
for item in self.case_text:
if item['pos0'] <= location <= item['pos1']:
unmarked = item
if unmarked is None:
return
# Delete from database, remove from case_text and update gui
cur = self.app.conn.cursor()
cur.execute("delete from case_text where fid=? and caseid=? and pos0=? and pos1=?",
(unmarked['fid'], unmarked['caseid'], unmarked['pos0'], unmarked['pos1']))
self.app.conn.commit()
if unmarked in self.case_text:
self.case_text.remove(unmarked)
self.unlight()
self.highlight()
# The file may be assigned Yes in the table widget but should be empty
self.get_files()
self.fill_table()
self.app.delete_backup = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_files_from_case(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n selected_files = []\n remove_msg = \"\"\n for r in rows:\n selected_files.append(self.allfiles[r])\n remove_msg += \"\\n\" + self.allfiles[r][1]\n del_ui = DialogConfirmDelete(self.app, remove_msg)\n ok = del_ui.exec()\n if not ok:\n return\n cur = self.app.conn.cursor()\n sql = \"delete from case_text where caseid=? and fid=?\"\n for f in selected_files:\n try:\n cur.execute(sql, [self.case['caseid'], f[0]])\n self.app.conn.commit()\n self.parent_textEdit.append(f[1] + \" removed from case \" + self.case['name'])\n except Exception as e:\n print(e)\n logger.debug(str(e))\n # Update assigned files and table widget\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False",
"def unlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n cursor = self.ui.textBrowser.textCursor()\n try:\n cursor.setPosition(0, QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(len(self.selected_text_file[FULLTEXT]) - 1, QtGui.QTextCursor.MoveMode.KeepAnchor)\n cursor.setCharFormat(QtGui.QTextCharFormat())\n except Exception as e:\n logger.debug((str(e) + \"\\n unlight, text length\" + str(len(self.ui.textBrowser.toPlainText()))))",
"def removeOwnPunctuation(self):\n\t\tself.textFile = self.removePunctuation(self.open(self.filePath)).split()",
"def test_clear_selected_text(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.4\", \"4.4\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"clear-selected-text\",\n )",
"def cancel(self):\n end = self.start\n start = self.start + f'-{self.chars}c'\n self.text.tag_delete('found', 1.0, tk.END)\n self.text.tag_delete('found.focus', 1.0, tk.END)\n self.text.tag_add(tk.SEL, start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.focus_set()\n self.destroy()",
"def cancel(self):\n end = self.start\n start = self.start + f'-{self.chars}c'\n self.text.tag_delete('found', 1.0, tk.END)\n self.text.tag_delete('found.focus', 1.0, tk.END)\n self.text.tag_add(tk.SEL, start, end)\n self.text.mark_set(tk.INSERT, start)\n self.text.focus_set()\n self.destroy()",
"def clean_cases(text):\n return text.lower()",
"def delete_text(self,\n\t text,\n\t fname,\n\t pattern=None,\n\t expect=None,\n\t shutit_pexpect_child=None,\n\t note=None,\n\t before=False,\n\t force=False,\n\t line_oriented=True,\n\t loglevel=logging.DEBUG):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\treturn self.change_text(text,\n\t\t fname,\n\t\t pattern,\n\t\t expect,\n\t\t shutit_pexpect_child,\n\t\t before,\n\t\t force,\n\t\t note=note,\n\t\t delete=True,\n\t\t line_oriented=line_oriented,\n\t\t loglevel=loglevel)",
"def remove_highlights(bv=None):\n if no_coverage_warn():\n return\n if bv is None:\n bv = gbv\n clear_highlights(covdb.total_coverage, bv)\n log.log_info(\"Highlights cleared.\")",
"def highlight(self):\n\n if self.selected_text_file is None:\n return\n if self.selected_text_file[FULLTEXT] is None:\n return\n format_ = QtGui.QTextCharFormat()\n cursor = self.ui.textBrowser.textCursor()\n for item in self.case_text:\n try:\n cursor.setPosition(int(item['pos0']), QtGui.QTextCursor.MoveMode.MoveAnchor)\n cursor.setPosition(int(item['pos1']), QtGui.QTextCursor.MoveMode.KeepAnchor)\n format_.setFontUnderline(True)\n format_.setUnderlineColor(QtCore.Qt.GlobalColor.red)\n cursor.setCharFormat(format_)\n except Exception as err:\n msg = \"highlight, text length \" + str(len(self.ui.textBrowser.toPlainText()))\n msg += \"\\npos0:\" + str(item['pos0']) + \", pos1:\" + str(item['pos1'])\n msg += \"\\n\" + str(err)\n logger.debug(msg)",
"def __init__(self, keep, case_sens=True):\n if not case_sens:\n low = keep.lower()\n up = keep.upper()\n keep = low + up\n self.delchars = ''.join([c for c in self.allchars if c not in keep])",
"def new_file():\n text.delete('1.0', tk.END)",
"def text_select_context_menu_click (selectedtext) :\n\tsettings = Composition.CompositionManager.Get[Interfaces.Settings.IApplicationSettingsProvider]()\n\tsettings.GlobalExclude.Add(selectedtext)",
"def removeOwnContractions(self):\n\t\tself.textFile = self.removeContractions(text=self.textFile)",
"def __unhighlight(self):\n self.unhighlight()",
"def change_match_type(self):\n self.term = None\n self.chars = None\n self.text.tag_remove('found', '1.0', tk.END)\n self.text.tag_remove('found.focus', '1.0', tk.END)",
"def change_match_type(self):\n self.term = None\n self.chars = None\n self.text.tag_remove('found', '1.0', tk.END)\n self.text.tag_remove('found.focus', '1.0', tk.END)",
"def deCopIfy(text):\n\tif text == \"\":\n\t\treturn text\n\n\tfor lingo in coplingo:\n\t\ttext = re.sub(lingo['regex'], lingo['str'], text)\n\n\treturn text[0].upper() + text[1:]",
"def delete_file(self):\n os.remove(self.id+\"-input.txt\")\n if(self.lang == \"PYTHON\"):\n os.remove(self.id+\".py\")\n elif(self.lang == \"C\"):\n os.remove(self.id+\".c\")\n if(self.status == 1):\n os.remove(self.id+\"_c\")\n elif(self.lang == 'CPP'):\n os.remove(self.id+\".cpp\")\n if(self.status == 1):\n os.remove(self.id+\"_cpp\")\n elif(self.lang == 'JAVA'):\n os.remove(self.id+\".java\")\n if(self.status == 1):\n os.remove(self.id+\"_java\") \n elif(self.lang == \"JS\"):\n os.remove(self.id+\".js\")\n # if(self.status == 1):\n # os.remove(self.id+\"_js\")s",
"def clean_text(self, num='substitute'):\n for i, doc in enumerate(self.documents):\n if num is 'spell':\n doc = doc.replace('0', ' zero ')\n doc = doc.replace('1', ' one ')\n doc = doc.replace('2', ' two ')\n doc = doc.replace('3', ' three ')\n doc = doc.replace('4', ' four ')\n doc = doc.replace('5', ' five ')\n doc = doc.replace('6', ' six ')\n doc = doc.replace('7', ' seven ')\n doc = doc.replace('8', ' eight ')\n doc = doc.replace('9', ' nine ')\n elif num is 'substitute':\n doc = re.sub('(\\\\d+)', ' NUM ', doc)\n elif num is 'remove':\n doc = re.sub('[0-9]', ' ', doc)\n doc = doc.replace('$', ' dollar ')\n doc = doc.lower()\n doc = re.sub('[^a-z]', ' ', doc)\n doc = ' '.join(doc.split())\n self.documents[i] = doc",
"def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text",
"def mark(self):\n\n if self.selected_text_file is None:\n return\n # selectedText = self.textBrowser.textCursor().selectedText()\n pos0 = self.ui.textBrowser.textCursor().selectionStart()\n pos1 = self.ui.textBrowser.textCursor().selectionEnd()\n if pos0 == pos1:\n return\n # add new item to case_text list and database and update GUI\n item = {'caseid': self.case['caseid'],\n 'fid': self.selected_text_file[ID],\n 'pos0': pos0, 'pos1': pos1,\n 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().astimezone().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'memo': \"\"}\n self.case_text.append(item)\n self.highlight()\n\n cur = self.app.conn.cursor()\n # Check for an existing duplicated linkage first\n cur.execute(\"select * from case_text where caseid=? and fid=? and pos0<=? and pos1>=?\",\n (item['caseid'], item['fid'], item['pos0'], item['pos1']))\n result = cur.fetchall()\n if len(result) > 0:\n Message(self.app, _(\"Already Linked\"),\n _(\"This segment has already been linked to this case\"), \"warning\").exec()\n return\n cur.execute(\"insert into case_text (caseid,fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)\",\n (\n item['caseid'], item['fid'], item['pos0'], item['pos1'], item['owner'], item['date'], item['memo']))\n self.app.conn.commit()\n # File may not be assigned in the table widget as Yes\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False",
"def clean(c):",
"def cleanup(text):\n with open(text, 'r') as uncleaned_text:\n no_chapters = re.sub('[A-Z]{3,}', ' ', uncleaned_text.read())\n remove_periods = re.sub('(\\s\\.){4,}', '', no_chapters)\n new_text = re.sub('\\*', '', remove_periods)\n return new_text",
"def clean_files(self):\n self.filenames.clear()",
"def clean_filename(self, filename):\n return remove(filename,self.unwanted_chars_in_filenames)",
"def uppercase(self):\n\n file = open(self.filename, 'r')\n new_file = open(self.temp_filename, 'w')\n for line in file:\n for keyword in self.KEYWORDS:\n if keyword in line:\n line = line.replace(keyword, keyword.upper())\n new_file.write(line)\n file.close()\n new_file.close()\n self.overwrite_file()",
"def deleteconvert(self):\n filename = os.path.join(self.docx_path, self.name.docx)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.html_path, self.name.html)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.docbook_path, self.name.xml)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.markdown_path, self.name.md)\n if os.path.isfile(filename):\n os.remove(filename)",
"def unhighlight(self, current=False):\n if current:\n if self.currentEditor is not None:\n self.currentEditor.highlight()\n else:\n for editor in self.editors:\n editor.highlight()",
"def invert_selection(self):\n pass"
] | [
"0.6281262",
"0.62588793",
"0.5885497",
"0.56027263",
"0.5555215",
"0.5555215",
"0.5484803",
"0.54414994",
"0.5382209",
"0.53529084",
"0.52868575",
"0.5248693",
"0.5205808",
"0.51960534",
"0.51928306",
"0.5141761",
"0.5141761",
"0.51227355",
"0.50720865",
"0.50587964",
"0.50519913",
"0.5046324",
"0.50364673",
"0.5030763",
"0.5024906",
"0.50048125",
"0.50033146",
"0.5001006",
"0.49734443",
"0.4972718"
] | 0.68767476 | 0 |
Automark text in one or more files with selected case. Each selected_file is a tuple of id, name,fulltext, mediapath, memo, owner, date | def automark(self):
index_list = self.ui.tableWidget.selectionModel().selectedIndexes()
rows = []
for i in index_list:
rows.append(i.row())
rows = list(set(rows)) # duplicate rows due to multiple columns
if len(rows) == 0:
return
selected_files = []
filenames = ""
for r in rows:
if self.allfiles[r][2] is not None and self.allfiles[r][2] != "":
selected_files.append(self.allfiles[r])
filenames += self.allfiles[r][1] + " "
ui_se = DialogGetStartAndEndMarks(self.case['name'], filenames)
ok = ui_se.exec()
if not ok:
return
start_mark = ui_se.get_start_mark()
end_mark = ui_se.get_end_mark()
if start_mark == "" or end_mark == "":
Message(self.app, _("Warning"), _('Cannot have blank text marks'), "warning").exec()
return
msg = _("Auto assign text to case: ") + self.case['name']
msg += _("\nUsing ") + start_mark + _(" and ") + end_mark + _("\nIn files:\n")
msg += filenames
warning_msg = ""
already_assigned = ""
entries = 0
cur = self.app.conn.cursor()
for f in selected_files:
cur.execute("select name, id, fulltext, memo, owner, date from source where id=?",
[f[0]])
currentfile = cur.fetchone()
text = currentfile[2]
text_starts = [match.start() for match in re.finditer(re.escape(start_mark), text)]
text_ends = [match.start() for match in re.finditer(re.escape(end_mark), text)]
# Add new code linkage items to database
already_assigned = ""
for start_pos in text_starts:
text_end_iterator = 0
try:
while start_pos >= text_ends[text_end_iterator]:
text_end_iterator += 1
except IndexError:
text_end_iterator = -1
warning_msg += _("Auto assign. Could not find an end mark: ") + f[1] + " " + end_mark + "\n"
if text_end_iterator >= 0:
pos1 = text_ends[text_end_iterator]
item = {'caseid': self.case['caseid'], 'fid': f[0],
'pos0': start_pos, 'pos1': pos1,
'owner': self.app.settings['codername'],
'date': datetime.datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S"), 'memo': ""}
# Check if already assigned to case_text
sql = "select id from case_text where caseid=? and fid=? and pos0=? and pos1=?"
cur.execute(sql, [item['caseid'], item['fid'], item['pos0'], item['pos1']])
res = cur.fetchone()
if res is None:
sql = "insert into case_text (caseid,fid,pos0,pos1,owner,date,memo) values(?,?,?,?,?,?,?)"
cur.execute(sql, (item['caseid'], item['fid'], item['pos0'], item['pos1'],
item['owner'], item['date'], item['memo']))
entries += 1
self.app.conn.commit()
else:
already_assigned = _("\nAlready assigned.")
# Update messages and table widget
self.get_files()
self.fill_table()
# Text file is loaded in browser then update the highlights
self.load_case_text()
self.highlight()
msg += "\n" + str(entries) + _(" sections found.")
Message(self.app, _("File added to case"), msg + "\n" + warning_msg + "\n" + already_assigned).exec()
self.parent_textEdit.append(msg)
self.parent_textEdit.append(warning_msg)
self.app.delete_backup = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def selectFiles(self):\n\n filenames = []\n self.fileIDs = \"\"\n self.caseIDs = \"\" # clears any case selections\n cur = self.settings['conn'].cursor()\n cur.execute(\"select id, name, status from source\")\n result = cur.fetchall()\n for row in result:\n filenames.append({'id': row[0], 'name': row[1], 'status': row[2]})\n self.fileIDs += \",\" + str(row[0])\n if len(self.fileIDs) > 0:\n self.fileIDs = self.fileIDs[1:]\n\n Dialog_selectfile = QtGui.QDialog()\n ui = Ui_Dialog_selectfile(filenames)\n ui.setupUi(Dialog_selectfile, \"Select file(s) to view\", \"many\")\n ok = Dialog_selectfile.exec_()\n if ok:\n tmp_IDs = \"\"\n selectedFiles = ui.getSelected() # list of dictionaries\n for row in selectedFiles:\n tmp_IDs += \",\" + str(row['id'])\n if len(tmp_IDs) > 0:\n self.fileIDs = tmp_IDs[1:]",
"def select_files(self):\n pass",
"def select_files(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(self.parent,\n \"File Export\",\n os.path.expanduser('~/'),\n \"Ensemble Files (*.ens, *.bin);;Binary Files (*.bin);;All Files (*)\",\n options=options)\n if files:\n # Store the list of results\n self.selected_files = files\n\n # Analyze the files\n self.analyze_files()",
"def add_files_to_case(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n selected_files = []\n for r in rows:\n selected_files.append(self.allfiles[r])\n msg = \"\"\n for file_ in selected_files:\n msg += self.add_file_to_case(file_)\n # Update messages and table widget\n self.get_files()\n self.show_or_hide_rows()\n Message(self.app, _(\"File added to case\"), msg, \"information\").exec()\n self.parent_textEdit.append(msg)\n self.app.delete_backup = False",
"def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)",
"def mark(self):\n\n if self.selected_text_file is None:\n return\n # selectedText = self.textBrowser.textCursor().selectedText()\n pos0 = self.ui.textBrowser.textCursor().selectionStart()\n pos1 = self.ui.textBrowser.textCursor().selectionEnd()\n if pos0 == pos1:\n return\n # add new item to case_text list and database and update GUI\n item = {'caseid': self.case['caseid'],\n 'fid': self.selected_text_file[ID],\n 'pos0': pos0, 'pos1': pos1,\n 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().astimezone().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'memo': \"\"}\n self.case_text.append(item)\n self.highlight()\n\n cur = self.app.conn.cursor()\n # Check for an existing duplicated linkage first\n cur.execute(\"select * from case_text where caseid=? and fid=? and pos0<=? and pos1>=?\",\n (item['caseid'], item['fid'], item['pos0'], item['pos1']))\n result = cur.fetchall()\n if len(result) > 0:\n Message(self.app, _(\"Already Linked\"),\n _(\"This segment has already been linked to this case\"), \"warning\").exec()\n return\n cur.execute(\"insert into case_text (caseid,fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)\",\n (\n item['caseid'], item['fid'], item['pos0'], item['pos1'], item['owner'], item['date'], item['memo']))\n self.app.conn.commit()\n # File may not be assigned in the table widget as Yes\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False",
"def get_files(self):\n\n cur = self.app.conn.cursor()\n sql = \"select distinct case_text.fid, source.name from case_text join source on case_text.fid=source.id where \"\n sql += \"caseid=? order by lower(source.name) asc\"\n cur.execute(sql, [self.case['caseid'], ])\n self.casefiles = cur.fetchall()\n sql = \"select id, name, fulltext, mediapath, memo, owner, date, av_text_id from source order by source.name asc\"\n cur.execute(sql)\n self.allfiles = cur.fetchall()\n msg = _(\"Files linked: \") + str(len(self.casefiles)) + \" / \" + str(len(self.allfiles))\n self.ui.label_files_linked.setText(msg)",
"def get_file_ids(self):\n\n if self.attribute_file_ids:\n file_ids = \"\"\n for id_ in self.attribute_file_ids:\n file_ids += \",\" + str(id_)\n return _(\"Attributes: \") + self.attributes_msg + \" \", f\" in ({file_ids[1:]})\"\n\n file_name = self.ui.comboBox_file.currentText()\n case_name = self.ui.comboBox_case.currentText()\n if file_name == \"\" and case_name == \"\":\n return \"\", \"\"\n if file_name != \"\":\n for f in self.files:\n if f['name'] == file_name:\n return _(\"File: \") + file_name + \" \", f\"={f['id']}\"\n case_id = -1\n for c in self.cases:\n if c['name'] == case_name:\n case_id = c['id']\n break\n cur = self.app.conn.cursor()\n sql = \"select distinct fid from case_text where caseid=?\"\n cur.execute(sql, [case_id, ])\n res = cur.fetchall()\n file_ids = \"\"\n for r in res:\n file_ids += \",\" + str(r[0])\n if file_ids == \"\":\n return \"\", \"\"\n return _(\"Case: \") + case_name + \" \", f\" in ({file_ids[1:]})\"",
"def selectCases(self):\n\n casenames = []\n self.fileIDs = \"\"\n self.caseIDs = \"\" # default for all cases and allows the file selection search method to occur\n cur = self.settings['conn'].cursor()\n cur.execute(\"select id, name, status from cases\")\n result = cur.fetchall()\n for row in result:\n casenames.append({'id': row[0], 'name': row[1], 'status': row[2]})\n\n Dialog_selectcase = QtGui.QDialog()\n ui = Ui_Dialog_selectfile(casenames)\n ui.setupUi(Dialog_selectcase, \"Select case(s) to view\", \"many\")\n ok = Dialog_selectcase.exec_()\n if ok:\n tmp_IDs = \"\"\n selectedCases = ui.getSelected() # list of dictionaries\n for row in selectedCases:\n tmp_IDs += \",\" + str(row['id'])\n if len(tmp_IDs) > 0:\n self.caseIDs = tmp_IDs[1:]",
"def view_file(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n index = None\n if len(index_list) > 0:\n index = index_list[0].row()\n if index is None:\n return\n\n # Need the data as a dictionary to view images and audio/video\n dictionary = {'name': self.allfiles[index][NAME], 'mediapath': self.allfiles[index][MEDIAPATH],\n 'owner': self.allfiles[index][OWNER], 'id': self.allfiles[index][0],\n 'date': self.allfiles[index][DATE],\n 'memo': self.allfiles[index][MEMO], 'fulltext': self.allfiles[index][FULLTEXT],\n 'av_text_id': self.allfiles[index][AV_TEXT_ID]}\n # Mediapath will be None for a .transcribed empty text media entry, and 'docs:' for a linked text document\n if self.allfiles[index][MEDIAPATH] is None or self.allfiles[index][MEDIAPATH][0:5] == 'docs:':\n return\n # Added checks to test for media presence\n if self.allfiles[index][MEDIAPATH][:6] in (\"/video\", \"video:\"):\n if self.allfiles[index][MEDIAPATH][:6] == \"video:\":\n abs_path = self.allfiles[index][MEDIAPATH].split(':')[1]\n if not os.path.exists(abs_path):\n return\n if self.allfiles[index][MEDIAPATH][:6] == \"/video\":\n abs_path = self.app.project_path + self.allfiles[index][MEDIAPATH]\n if not os.path.exists(abs_path):\n return\n ui_av = DialogViewAV(self.app, dictionary)\n ui_av.exec()\n if self.allfiles[index][MEDIAPATH][:6] in (\"/audio\", \"audio:\"):\n if self.allfiles[index][MEDIAPATH][0:6] == \"audio:\":\n abs_path = self.allfiles[index][MEDIAPATH].split(':')[1]\n if not os.path.exists(abs_path):\n return\n if self.allfiles[index][MEDIAPATH][0:6] == \"/audio\":\n abs_path = self.app.project_path + self.allfiles[index][MEDIAPATH]\n if not os.path.exists(abs_path):\n return\n ui_av = DialogViewAV(self.app, dictionary)\n ui_av.exec()\n if self.allfiles[index][MEDIAPATH][:7] in (\"/images\", \"images:\"):\n if self.allfiles[index][MEDIAPATH][0:7] == \"images:\":\n abs_path = self.allfiles[index][MEDIAPATH].split(':')[1]\n if not os.path.exists(abs_path):\n return\n if self.allfiles[index][MEDIAPATH][0:7] == \"/images\":\n abs_path = self.app.project_path + self.allfiles[index][MEDIAPATH]\n if not os.path.exists(abs_path):\n return\n # Requires {name, mediapath, owner, id, date, memo, fulltext}\n ui_img = DialogViewImage(self.app, dictionary)\n ui_img.exec()",
"def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)",
"def select_files():\n\n if not Settings.is_prompt(): return [File.get_random_file()]\n category = Settings.select_category()\n if not category: return File.select_file_upload_method()\n # if not Settings.confirm(category): return File.select_files()\n Settings.print(\"Select Files or a Folder\")\n files = []\n while True:\n file = File.select_file(category)\n if not file: break\n ##\n if \"performer\" in str(category):\n cat = Settings.select_category([cat for cat in Settings.get_categories() if \"performer\" not in cat])\n performerName = file.get_title()\n file = File.select_file(cat, performer=performerName)\n if not file: break\n setattr(file, \"performer\", performerName)\n files.append(file)\n if \"galler\" in str(cat) or \"video\" in str(cat): break\n ##\n files.append(file)\n if \"galler\" in str(category) or \"video\" in str(category): break\n if str(files[0]) == \"unset\": return files\n if not Settings.confirm([file.get_title() for file in files]): return File.select_files()\n return files",
"def load_case_text(self):\n\n self.case_text = []\n if self.selected_text_file is None:\n return\n cur = self.app.conn.cursor()\n cur.execute(\"select caseid, fid, pos0, pos1, owner, date, memo from case_text where fid = ? and caseid = ?\",\n [self.selected_text_file[ID], self.case['caseid']])\n result = cur.fetchall()\n for row in result:\n self.case_text.append({'caseid': row[0], 'fid': row[1], 'pos0': row[2],\n 'pos1': row[3], 'owner': row[4], 'date': row[5], 'memo': row[6]})",
"def execute_file(self, files, **kw):\n\n mode = kw['mode'] if 'mode' in kw else 0\n\n # ranger can act as a file chooser when running with --choosefile=...\n if mode == 0 and 'label' not in kw:\n if ranger.args.choosefile:\n open(ranger.args.choosefile, 'w').write(self.fm.thisfile.path)\n\n if ranger.args.choosefiles:\n paths = []\n for hist in self.fm.thistab.history:\n for fobj in hist.files:\n if fobj.marked and fobj.path not in paths:\n paths += [fobj.path]\n paths += [f.path for f in self.fm.thistab.get_selection() if f.path not in paths]\n\n with open(ranger.args.choosefiles, 'w') as fobj:\n fobj.write('\\n'.join(paths) + '\\n')\n\n if ranger.args.choosefile or ranger.args.choosefiles:\n raise SystemExit\n\n if isinstance(files, set):\n files = list(files)\n elif not isinstance(files, (list, tuple)):\n files = [files]\n\n flags = kw.get('flags', '')\n if 'c' in squash_flags(flags):\n files = [self.fm.thisfile]\n\n self.signal_emit('execute.before', keywords=kw)\n filenames = [f.path for f in files]\n label = kw.get('label', kw.get('app', None))\n try:\n return self.rifle.execute(filenames, mode, label, flags, None)\n finally:\n self.signal_emit('execute.after')",
"def find_file(self):\n selected_file = tk.filedialog.askopenfilename(initialdir='/', title='Select File',\n filetypes=(('txt Files', '*.txt'), ('All Files', '*.*')))\n self.markov_chain.add_file(selected_file)",
"def choose_file(self):\n pass",
"def add_file_to_case(self, file_):\n\n cur = self.app.conn.cursor()\n text_len = 0\n if file_[2] is not None:\n text_len = len(file_[2]) - 1\n link = {'caseid': self.case['caseid'], 'fid': file_[0], 'pos0': 0,\n 'pos1': text_len, 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().astimezone().strftime(\"%Y-%m-%d %H:%M:%S\"), 'memo': \"\"}\n\n # Check for an existing duplicated linked file first\n cur.execute(\"select * from case_text where caseid = ? and fid=? and pos0=? and pos1=?\",\n (link['caseid'], link['fid'], link['pos0'], link['pos1']))\n result = cur.fetchall()\n if len(result) > 0:\n msg = _(\"This file has already been linked to this case \") + file_[1] + \"\\n\"\n return msg\n # Even non-text files can be assigned to the case here\n sql = \"insert into case_text (caseid, fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)\"\n cur.execute(sql, (link['caseid'], link['fid'], link['pos0'], link['pos1'],\n link['owner'], link['date'], link['memo']))\n self.app.conn.commit()\n msg = file_[1] + _(\" added to case.\") + \"\\n\"\n\n # Update table entry assigned to Yes\n rows = self.ui.tableWidget.rowCount()\n for row in range(0, rows):\n fid = int(self.ui.tableWidget.item(row, 0).text())\n if fid == file_[0]: # file_[0] is fid\n item = QtWidgets.QTableWidgetItem(_(\"Yes\"))\n item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)\n self.ui.tableWidget.setItem(row, 2, item)\n return msg",
"def saveTexts(self):\n if self.currentItem is not None:\n # Get name of selected file in the List\n currentItempath = path.join(self.workDir, self.currentItem)\n # Pure-text annotation\n filepath_cor = currentItempath + TEXT_ANNO_EXT\n cor_text = self.TextCorr.GetValue().strip()\n self.editFile(filepath_cor, cor_text, self.PlayList.setTextAnno)\n # XML annotation\n filepath_xcor = currentItempath + XML_ANNO_EXT\n xcor_text = self.XMLCorr.GetValue().strip()\n self.editFile(filepath_xcor, xcor_text, self.PlayList.setXMLAnno)\n # Command annotation\n filepath_cmd = currentItempath + CMD_ANNO_EXT\n cmd_text = self.CorrCommand.GetValue().strip()\n self.editFile(filepath_cmd, cmd_text, self.PlayList.setCommandAnno)\n # Annotator comments\n filepath_nfo = currentItempath + COMMENT_EXT\n nfo_text = self.Comments.GetValue().strip()\n self.editFile(filepath_nfo, nfo_text, None)",
"def __bookmarkSelected(self, act):\n bmList = act.data()\n filename = bmList[0]\n line = bmList[1]\n self.openSourceFile(filename, line)",
"def select_file(category, performer=None):\n\n files = File.get_files_by_category(category, performer=performer)\n files_ = []\n for file in files:\n if isinstance(file, str):\n files_.append(PyInquirer.Separator())\n continue\n file.category = category\n file_ = {\n \"name\": file.get_title(),\n \"value\": file,\n }\n files_.append(file_)\n if len(files_) == 0:\n Settings.print(\"Missing Files\")\n return\n files_.append({\n \"name\": 'Back',\n \"value\": None,\n })\n question = {\n 'type': 'list',\n 'name': 'file',\n 'message': 'File Path:',\n 'choices': files_,\n # 'filter': lambda file: file.lower()\n }\n answer = PyInquirer.prompt(question)\n if not answer: return File.select_files()\n file = answer[\"file\"]\n if not Settings.confirm(file.get_path()): return None\n return file",
"def _open_files(view, sel):\n schema, word = get_names(view, sel)\n file_name = word + '.sql'\n path = [schema, None, file_name]\n files = find_file(view.window().folders(), path)\n if len(files) > 5:\n print('something is wrong; too many files; aborting')\n return\n for f in files:\n view.window().open_file(f)",
"def complete_file(self, text, line, *_):\n leading = line[len(\"file \") :]\n curpath = os.path.join(os.path.curdir, leading)\n\n def isdql(parent, filename):\n \"\"\"Check if a file is .dql or a dir\"\"\"\n return not filename.startswith(\".\") and (\n os.path.isdir(os.path.join(parent, filename))\n or filename.lower().endswith(\".dql\")\n )\n\n def addslash(path):\n \"\"\"Append a slash if a file is a directory\"\"\"\n if path.lower().endswith(\".dql\"):\n return path + \" \"\n else:\n return path + \"/\"\n\n if not os.path.exists(curpath) or not os.path.isdir(curpath):\n curpath = os.path.dirname(curpath)\n return [\n addslash(f)\n for f in os.listdir(curpath)\n if f.startswith(text) and isdql(curpath, f)\n ]",
"def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)",
"def on_coding_standard_file_browse(self, *args):\n file = GPS.MDI.file_selector()\n if file.path != \"\":\n self.fileEntry.set_text(file.path)",
"def row_selection_changed(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n self.ui.textBrowser.setText(\"\")\n self.selected_text_file = None\n index = rows[0]\n # A fulltext source is displayed if fulltext is present\n # If the mediapath is None, this represents an A/V transcribed file\n self.ui.label_file.setText(_(\"Displayed file: \") + self.allfiles[index][NAME])\n if self.allfiles[index][FULLTEXT] != \"\" and self.allfiles[index][FULLTEXT] is not None:\n self.selected_text_file = self.allfiles[index]\n self.ui.textBrowser.setText(self.allfiles[index][FULLTEXT])\n self.load_case_text()\n self.unlight()\n self.highlight()\n return",
"def tag_file_chooser(self):\n filename_list = tk.filedialog.askopenfilenames()\n self._tag_path_var.set(filename_list)",
"def _select_file(self, change):\n selected_file = change.get(\"new\")\n self.file = str(Path(self.current_folder).joinpath(selected_file).resolve())",
"def remove_files_from_case(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n selected_files = []\n remove_msg = \"\"\n for r in rows:\n selected_files.append(self.allfiles[r])\n remove_msg += \"\\n\" + self.allfiles[r][1]\n del_ui = DialogConfirmDelete(self.app, remove_msg)\n ok = del_ui.exec()\n if not ok:\n return\n cur = self.app.conn.cursor()\n sql = \"delete from case_text where caseid=? and fid=?\"\n for f in selected_files:\n try:\n cur.execute(sql, [self.case['caseid'], f[0]])\n self.app.conn.commit()\n self.parent_textEdit.append(f[1] + \" removed from case \" + self.case['name'])\n except Exception as e:\n print(e)\n logger.debug(str(e))\n # Update assigned files and table widget\n self.get_files()\n self.fill_table()\n self.app.delete_backup = False",
"def file_open(self):\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File')\n\n with open(filename, 'r', encoding=\"utf8\") as file:\n self.file_cont = file.readlines()\n self.textToAnalize.setText(''.join(self.file_cont))",
"def do_files(self, args):\n file_names = self.regexprutils.get_file_names()\n print 'File names:'\n for name in file_names:\n print ' %s' % (name, )"
] | [
"0.660735",
"0.63067484",
"0.6029698",
"0.60066223",
"0.59928775",
"0.59761703",
"0.59170103",
"0.58575195",
"0.58572716",
"0.58405566",
"0.58404976",
"0.5827184",
"0.57653266",
"0.5730854",
"0.56902236",
"0.5616098",
"0.5498412",
"0.5475154",
"0.54671156",
"0.54535323",
"0.5422051",
"0.54206663",
"0.5413312",
"0.5413312",
"0.53774107",
"0.5357259",
"0.53542423",
"0.5338281",
"0.53351736",
"0.5328986"
] | 0.6344962 | 1 |
To generate a homogeneous Poisson point pattern in space S X T, it basically | def __homogeneous_poisson_sampling(T, S, maximum):
_S = [T] + S
# sample the number of events from S
n = utils.lebesgue_measure(_S)
N = tf.random.poisson(lam=maximum * n, shape=[1], dtype=tf.int32)
# simulate spatial sequence and temporal sequence separately.
points = [ tf.random.uniform(shape=N, minval=_S[i][0], maxval=_S[i][1]) for i in range(len(_S)) ]
# sort the temporal sequence ascendingly.
points[0] = tf.contrib.framework.sort(points[0], direction="ASCENDING")
points = tf.transpose(tf.stack(points))
return points | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def homogenous_poisson_gen():\n pass",
"def inhomogenous_poisson_gen():\n pass",
"def toy_poisson_1d(seed=default_seed):\n\n X = np.arange(0,100,5)[:,None]\n F = np.round(np.sin(X/18.) + .1*X) + np.arange(5,25)[:,None]\n E = np.random.randint(-5,5,20)[:,None]\n Y = F + E\n\n kernel = GPy.kern.rbf(1)\n distribution = GPy.likelihoods.likelihood_functions.Poisson()\n likelihood = GPy.likelihoods.EP(Y,distribution)\n\n m = GPy.models.GP(X,likelihood,kernel)\n m.ensure_default_constraints()\n\n # Approximate likelihood\n m.update_likelihood_approximation()\n\n # Optimize and plot\n m.optimize()\n #m.EPEM FIXME\n print m\n\n # Plot\n pb.subplot(211)\n m.plot_f() #GP plot\n pb.subplot(212)\n m.plot() #Output plot\n\n return m",
"def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)",
"def rfd_poisson(ps,n):\n lam = sum(ps)\n G = len(ps)\n sample_q = lambda:nprandom.poisson(lam) # chromosomal occupancy approximately poisson.\n sampler = make_sampler(ps)\n return [direct_sampling_ps(ps,sample_q(),sampler) for i in xrange(n)]",
"def phantom_squares(n_points,S):\n \n #Rescaling according to image size \n S[:,0] = S[:,0]*n_points/2\n S[:,1] = S[:,1]*n_points/2\n S[:,2] = S[:,2]*n_points/2\n S[:,3] = S[:,3]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 ) \n nrow,ncol = S.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow)) \n\n for k in range(nrow): #itero sui quadrati\n x_new = x - S[k,0]\n y_new = y - S[k,1]\n\n u = abs(x_new*math.cos(S[k,3])+y_new*math.sin(S[k,3]))\n v = abs(-x_new*math.sin(S[k,3])+y_new*math.cos(S[k,3]))\n\n cond = np.maximum(u,v)\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] < S[k,2]/2):\n phantom1[i,j,k] = S[k,4]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom",
"def createDist(N):\n return np.random.normal(loc=1000.,scale=5.,size=np.random.poisson(lam=N))",
"def sgfxy2p(s, N):\n x = sgf_coord.index(s[0])\n y = sgf_coord.index(s[1])\n\n p = rc2p(y + 1, x + 1, N)\n #print('x:{} y:{} p:{}'.format(x, y,p))\n return p",
"def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V",
"def convert_to_poisson(dp):\n return np.random.poisson(dp)",
"def patterns(N,P,mode):\r\n if mode=='iterative': #only works when N is small enough (N<20) for enumeration\r\n spins = list(itertools.product([-1, 1], repeat=N)) \r\n pp = np.random.choice(2**N,P,replace=False)\r\n eps = list([spins[p] for p in pp])\r\n if mode=='random':\r\n eps = [2*(np.random.randint(0,2,N)-0.5) for p in range(P)]\r\n return eps",
"def Poisson(ni, f, x, v, n):\n ne = DECSKS.lib.density.single_integration(f[n,:,:], of = x, wrt = v)\n\n xi = np.zeros(x.N)\n E_hat = np.zeros(x.N, dtype = complex) # container\n\n\n # define wave indices\n for r in range(x.N):\n if r <= x.N/2 :\n xi[r] = 2*np.pi*r / x.L\n else:\n xi[r] = 2*np.pi*(r - x.N) / x.L\n\n # total charge density, n(x), len(n) = Nx\n\n n = ni - ne\n\n N = np.fft.fft(n)\n A = max(N)\n eps = 2.0e-15\n xi_min = A*eps\n for r in range(x.N):\n if np.abs(N[r]) < xi_min:\n N[r] = 0\n\n # E_hat[0] = 0 from periodic BCs, i.e. because N[0] = 0, quasineutrality\n # equivalently, E_hat[0] is the DC field, of which there is none in\n # a quasineutral system of charged particles, only flucutations are present\n # E_hat[0] = 0 already from E_hat vector initialization\n\n for each in range(1,len(xi)):\n\n E_hat[each] = 1 / (1j*xi[each]) * N[each] # Electric field in Fourier space\n\n\n E = np.real(np.fft.ifft(E_hat)) # Electric field in configurational space\n\n\n return E",
"def phantom_ellipses(n_points,E):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*n_points/2 #semiaxis a\n E[:,1] = E[:,1]*n_points/2 #semiaxis b\n E[:,2] = E[:,2]*n_points/2 #x\n E[:,3] = E[:,3]*n_points/2 #y\n E[:,4] = E[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = E.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sulle ellissi\n x_new = x - E[k,2]\n y_new = y - E[k,3]\n\n #find(( (x.*cosp + y.*sinp).^2)./asq + ((y.*cosp - x.*sinp).^2)./bsq <= 1); \n cosp = math.cos(E[k,4])\n sinp = math.sin(E[k,4])\n cond = np.square( x_new * cosp + y_new * sinp )*1/(E[k,0]*E[k,0]) + \\\n np.square(y_new * cosp - x_new * sinp)*1/(E[k,1]*E[k,1]) - 1\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] <= 0.0):\n phantom1[i,j,k] = E[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom",
"def poisson_example(en, small2):\n from scipy.stats import poisson\n\n B = 1 << small2\n z = np.zeros(B); z[1] = 1\n wrap = irfft(np.exp(en * (rfft(z) - 1)))\n k = en // B + 1\n xs = k * B - (B >> 1) + np.arange(B)\n pmf = roll(wrap, B >> 1)\n df = pd.DataFrame({'x': xs, 'FFT pmf': pmf})\n po = poisson(en)\n df['Exact pmf'] = po.pmf(df.x)\n df = df.set_index('x', drop=True)\n fig, [ax0, ax1] = plt.subplots(1, 2, figsize=(FIG_W * 2, FIG_H + 0.3), constrained_layout=True)\n ax0.plot(wrap);\n ax0.set(title=f'Raw FFT-based output, wrapped to [0, {B}]',\n xlabel=f'Wrapped outcome, n mod {B}',\n ylabel='Probability mass, Pr(N=n)');\n df[['FFT pmf', 'Exact pmf']].plot(style=['-', ':'], ax=ax1, logy=True,\n title='Shifted FFT vs exact Poisson probabilities\\n(log scale)',\n xlabel='Outcome, n', ylabel='Probability mass, Pr(N=n)');\n ax1.set(ylim=[1e-17, 2 * df['FFT pmf'].max()])\n ax1.yaxis.set_minor_locator(ticker.LogLocator(subs='all'))",
"def make_pattern(self):\n probability = random.SystemRandom().random()\n if probability < 0.1:\n _pattern = [0 for x in range(32)]\n elif probability > 0.5:\n pattern_num = SECURE_RANDOM.choice(CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.80:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n elif _probability < 0.40:\n _offset = random.SystemRandom().randint(2, 16)\n _pattern = [1 if (x == _offset) or (x % pattern_num == _offset) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n pattern_num = SECURE_RANDOM.choice(INNER_CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.50:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n\n if not self.global_swing:\n _probability = random.SystemRandom().random()\n if _probability > 0.3:\n _pattern.extend([random.SystemRandom().uniform(0.01, 0.5), random.SystemRandom().randint(1, 14), 0])\n else:\n _pattern.extend([0,1,0])\n else: \n _pattern.extend([0,1,1]) \n\n return _pattern",
"def generatePos(self):\n self.pos = np.zeros((self.num_points, 2), dtype='int32')\n self.pos[:, 1] = np.repeat(list(reversed(np.arange(1, self.x*2, 2))), self.y)\n self.pos[:, 0] = np.tile(np.arange(1, self.x*2, 2), self.y)",
"def poisson(random_state, size=None, lam=1.0, ndim=None, dtype='int64'):\r\n lam = tensor.as_tensor_variable(lam)\r\n \r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size)\r\n\r\n op = RandomFunction(\"poisson\", tensor.TensorType(dtype=dtype,\r\n broadcastable=bcast))\r\n return op(random_state, size, lam)",
"def psfTemplateModel(n, params):\n psf_template = params[\"psf_template\"]\n self.m_psf = psf_template\n print(\"PSF template shape\", np.shape(psf_template))\n dim = int(n)\n m = np.shape(psf_template)[0]\n #if m != dim:\n # raise ValueError(\"PSF template dimension not equal patch size\")\n \n if np.sum(psf_template) != 1:\n print(\"Normalizing PSF template to sum = 1\")\n psf_template = psf_template/np.sum(psf_template) \n return psf_template",
"def poisson(random_state, lam=1.0, size=None, chunk_size=None, gpu=None, dtype=None):\n if dtype is None:\n dtype = np.random.RandomState().poisson(handle_array(lam), size=(0,)).dtype\n size = random_state._handle_size(size)\n seed = gen_random_seeds(1, random_state.to_numpy())[0]\n op = TensorPoisson(size=size, seed=seed, gpu=gpu, dtype=dtype)\n return op(lam, chunk_size=chunk_size)",
"def GeneratePointsImg(self, n, ppa):\n x = np.linspace(0,self.camera.sensorSize,n)+ppa[0]\n y = np.linspace(0,self.camera.sensorSize,n)+ppa[1]\n\n return np.meshgrid(x, y)",
"def poisson(data, time, dt):\n shape = tf.shape(data).numpy()\n size = tf.size(data).numpy()\n data = tf.reshape(data, [-1]).numpy()\n\n num_bins = int(time/dt)\n\n randnums = np.random.rand(size, num_bins)\n\n spikes = np.zeros((size, num_bins))\n\n\n\n for i in range(size):\n rate = data[i] * dt\n for j in range(num_bins):\n if rate > randnums[i][j]:\n spikes[i][j] = 1\n\n\n\n spikes = np.reshape(spikes, (shape[0], shape[1], num_bins))\n\n return spikes",
"def poisson(gp_link=None):\r\n if gp_link is None:\r\n gp_link = noise_models.gp_transformations.Log_ex_1()\r\n #else:\r\n # assert isinstance(gp_link,noise_models.gp_transformations.GPTransformation), 'gp_link function is not valid.'\r\n analytical_mean = False\r\n analytical_variance = False\r\n return noise_models.poisson_noise.Poisson(gp_link,analytical_mean,analytical_variance)",
"def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")",
"def model(r, p0, n=1):\n# print \"oi\"\n Pt = zeros(n, float) # initialize the output vector\n P = p0\n for i in xrange(n):\n Pt[i] = r*P\n P = Pt[i]\n \n return Pt",
"def generar_polinomio(self):\n\t\tself.poli = 0\n\t\tfor i in range(len(self.v)):\n\t\t\tpoli2 = n(self.diferencias_divididas(self.v[0:i+1]))\n\t\t\tfor j in range(i):\n\t\t\t\tpoli2 *= self.x-self.v[j][0]\n\t\t\tself.poli = self.poli + poli2",
"def MSP_data(Ts, N=4000, N_trans=100):\r\n Xs = []\r\n Ys = []\r\n Ps = []\r\n M = len(Ts[0])\r\n N_matrices = range(len(Ts))\r\n\r\n Z = [1/3., 1/3., 1/3.]\r\n for n in range(N_trans):\r\n p = [prob(Z, T) for T in Ts]\r\n T = Ts[np.random.choice(N_matrices, p=p)]\r\n Z = evolve(Z, T)\r\n for n in range(N):\r\n p = [prob(Z, T) for T in Ts]\r\n T = Ts[np.random.choice(N_matrices, p=p)]\r\n Z = evolve(Z, T)\r\n x = np.sqrt(2)*(Z[0] + Z[1]/2.0)\r\n y = Z[1]*np.sqrt(6.0)/2.0\r\n Ps.append(Z)\r\n\r\n return Ps",
"def generate_sphere_points(n):\r\n points = []\r\n inc = math.pi * (3 - math.sqrt(5))\r\n offset = 2 / float(n)\r\n for k in range(int(n)):\r\n y = k * offset - 1 + (offset / 2)\r\n r = math.sqrt(1 - y*y)\r\n phi = k * inc\r\n points.append([math.cos(phi)*r, y, math.sin(phi)*r])\r\n return points",
"def successive_poisson(tau1, tau2, size=1):\n # Draw samples out of first exponential distribution: t1\n t1 = np.random.exponential(tau1, size=1)\n\n # Draw samples out of second exponential distribution: t2\n t2 = np.random.exponential(tau2, size=1)\n\n return t1 + t2",
"def successive_poisson(tau1, tau2, size=1):\r\n # Draw samples out of first exponential distribution: t1\r\n t1 = np.random.exponential(tau1,size=tau1)\r\n\r\n # Draw samples out of second exponential distribution: t2\r\n t2 = np.random.exponential(tau2,size=tau2)\r\n\r\n return t1 + t2",
"def gen_seq(self,ntrials=20,pm_trial_position=None):\n # insert ranomly positioned pm trials\n if type(pm_trial_position)==type(None):\n ntrials -= 1+self.num_pm_trials\n pm_trial_position = np.random.randint(self.min_start_trials,ntrials,self.num_pm_trials) \n else:\n ntrials -= 1+len(pm_trial_position)\n pm_trial_position = pm_trial_position\n # generate og stim\n seq = np.random.randint(0,self.ntokens_og,ntrials)\n X = np.insert(seq,[0,*pm_trial_position],self.pm_token)\n # form Y \n Xroll = np.roll(X,self.nback)\n Y = (X == Xroll).astype(int) # nback trials\n Y[X==self.pm_token]=2 # pm trials\n return X,Y"
] | [
"0.7008877",
"0.70041984",
"0.6226595",
"0.6033562",
"0.60180074",
"0.5884325",
"0.5813987",
"0.57646567",
"0.57316214",
"0.5635626",
"0.56014806",
"0.5587823",
"0.55146223",
"0.5510471",
"0.55041295",
"0.5473622",
"0.5464378",
"0.5455472",
"0.54425746",
"0.53956705",
"0.5387785",
"0.5364923",
"0.5357816",
"0.53544337",
"0.53496194",
"0.53439325",
"0.5343284",
"0.5342101",
"0.53401357",
"0.5338046"
] | 0.70617217 | 0 |
generate samples with batch_size by thining algorithm, return sampling sequences and corresponding elementwise loglikelihood value. | def sampling(self, T, S, batch_size, keep_latest_k):
points_list = []
size_list = []
# generate inhomogeneous poisson points iterately
for b in range(batch_size):
homo_points = self.__homogeneous_poisson_sampling(T, S, self.maximum)
points = self._inhomogeneous_poisson_thinning(homo_points, self.maximum)
n_points = tf.shape(points)[0]
points_list.append(points)
size_list.append(n_points)
# initialize tensor for sequences
max_size = tf.reduce_max(tf.stack(size_list))
seqs = []
logliks = []
# organize generated samples into tensor seqs
for b in range(batch_size):
n_points = tf.shape(points_list[b])[0]
points = points_list[b]
logpdfs = tf.scan(
lambda a, i: self.log_conditional_pdf(points[:i, :], S, keep_latest_k),
tf.range(1, n_points+1), # from the first point to the last point
initializer=np.array(0., dtype=np.float32))
seq_paddings = tf.zeros((max_size - n_points, 1 + len(S)))
lik_paddings = tf.zeros(max_size - n_points)
seq = tf.concat([points, seq_paddings], axis=0)
loglik = tf.concat([logpdfs, lik_paddings], axis=0)
seqs.append(seq)
logliks.append(loglik)
seqs = tf.stack(seqs, axis=0)
logliks = tf.expand_dims(tf.stack(logliks, axis=0), -1)
return seqs, logliks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n batch.append((*data, idx))\n idx = np.array([i[5] for i in batch])\n #TD errors are only updated for transitions that are replayed\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n s_batch = np.array([i[0] for i in batch])\n a_batch = np.array([i[1] for i in batch])\n r_batch = np.array([i[2] for i in batch])\n d_batch = np.array([i[3] for i in batch])\n new_s_batch = np.array([i[4] for i in batch])\n\n return s_batch, a_batch, r_batch, d_batch, new_s_batch, idx",
"def next_sample(self, batch_size=1):\n\n X = []\n y = []\n\n for count in range(batch_size):\n #check for abrupt drift\n if count % self.abrupt_drift_rate == 0:\n dimfaks = [round(np.random.rand() * 4, 1) for _ in range(self.dims)]\n dimpots = [1 + round(np.random.rand() * 2) for _ in range(self.dims)]\n dimvars = [np.random.rand() * self.var for _ in range(self.dims)]\n dimmeans = [5 + np.random.rand() * 10 for _ in range(self.dims)]\n print(\"Random Polynomconcept: \", end=\"\")\n for i in range(self.dims):\n print(dimfaks[i],\" * x\", i+1, \"^\", dimpots[i], \" + \",end=\"\", sep=\"\")\n print()\n\n value = 0\n sample = []\n for i in range(self.dims):\n sample.append(np.random.normal(loc=dimmeans[i], scale=dimvars[i]))\n value += dimfaks[i] * (sample[i] ** dimpots[i])\n \n X.append(sample)\n y.append(value)\n\n self._x_idx += batch_size\n\n return (X, y)",
"def batchwise_sample(gen, num_samples, batch_size):\n\n samples = []\n for i in range(int(ceil(num_samples/float(batch_size)))):\n samples.append(gen.sample(batch_size))\n\n return torch.cat(samples, 0)[:num_samples]",
"def random_sample(self, batch_size=1, dtype=None, tp=None, **kw):\n \n batch = []\n labels = []\n for _ in range(batch_size):\n self.sample_counter += 1\n we_are_still_looking = True\n while (we_are_still_looking):\n we_are_still_looking = False\n if tp is not None:\n self.curr_tps = tp\n else:\n self.curr_tps = self.randomstate.randint(0, len(self.tps))\n folder = self.tps[self.curr_tps]\n try:\n features, masks = self._get_features_and_masks(folder)\n except Exception as e:\n print('could not load all data from {}, will now move to next random sample'.format(folder))\n logging.getLogger('data').error(\n 'could not load all data from {}, will now move to next random sample'.format(folder))\n return self.random_sample(batch_size, dtype, tp, **kw)\n shapev = [x for x in np.shape(features[0]) if x > 1]\n paddedw = np.ones(len(shapev))\n paddedw[:len(self.w)] = self.w\n valid_range = [x for x in shapev - paddedw]\n weonlyacceptwithlabels = self.each_with_labels > 0 and self.sample_counter % self.each_with_labels == 0\n if weonlyacceptwithlabels:\n if np.sum(np.asarray(masks) >= self.minlabel) > 0:\n # choose one of the labels, by picking a random index from lesion voxel 0 to nlesions-1\n l = self.randomstate.randint(0, np.sum(np.asarray(masks) >= self.minlabel))\n # now get the label coordinates\n lab = [np.int64(ll[l]) for ll in np.where(np.squeeze(masks) >= self.minlabel)]\n # now make sure, we look in a subwindowgroup of all possible windows, where this voxel is present\n imin = [self.randomstate.randint(max(f - self.w[ind], 0), max(1, min(f + 1, valid_range[ind])))\n for ind, f in enumerate(lab)]\n else:\n we_are_still_looking = True\n continue\n else:\n if self.pyramid_sampling:\n r = self.randomstate.rand(len(valid_range))\n rr = [(1 - (2 * rrr - 1) ** 2) / 2 if rrr < 0.5 else (1 + (2 * rrr - 1) ** 2) / 2 for rrr in r]\n imin = [np.int32(rr[ind] * (max(j + self.p[ind], 1) + self.p[ind]) - self.p[ind]) for ind, j in\n enumerate(valid_range)]\n else:\n imin = [self.randomstate.randint(0 - self.p[ind], max(j + self.p[ind], 1)) for ind, j in\n enumerate(valid_range)]\n imax = [imin[i] + paddedw[i] for i in range(len(imin))]\n\n tempdata, templabels = self._extract_sample(features, masks, imin, imax, shapev,\n needslabels=weonlyacceptwithlabels,\n one_hot=self.perform_one_hot_encoding)\n if weonlyacceptwithlabels and len(templabels) == 0:\n we_are_still_looking = True\n continue\n batch.append(tempdata)\n labels.append(templabels)\n\n if dtype is not None:\n batch = np.asarray(batch, dtype=dtype)\n else:\n batch = np.asarray(batch)\n labels = np.asarray(labels)\n\n # if not self.perform_one_hot_encoding:\n # order = [x for x in range(len(labels.shape))]\n # order.pop(1)\n # order.append(1)\n # labels = np.transpose(labels, order)\n\n return batch, labels",
"def _sample_propagation_indices(\n self, batch_size: int, _rng: torch.Generator\n ) -> torch.Tensor:\n model_len = (\n len(self.elite_models) if self.elite_models is not None else len(self)\n )\n if batch_size % model_len != 0:\n raise ValueError(\n \"To use GaussianMLP's ensemble propagation, the batch size must \"\n \"be a multiple of the number of models in the ensemble.\"\n )\n # rng causes segmentation fault, see https://github.com/pytorch/pytorch/issues/44714\n return torch.randperm(batch_size, device=self.device)",
"def generate_samples(self, n_samples):",
"def generate_samples(self, n_samples):",
"def p_sample(self, rng, params, batch_size, context, chain_out_size):\n rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))\n\n assert batch_size % jax.local_device_count() == 0\n per_device_batch_size = batch_size // jax.local_device_count()\n logging.info('Sampling from model, hope you are patient...')\n\n rng, rng_perm = jax.random.split(rng)\n sigmas = ardm_utils.get_batch_permutations(rng_perm, per_device_batch_size,\n self.num_steps)\n\n x = jnp.full((per_device_batch_size, *self.config.data_shape),\n fill_value=self.absorbing_state,\n dtype=jnp.int32)\n\n def next_sample_step(state, t):\n chain, x = state\n x = self.sample_step(\n jax.random.fold_in(rng, t), x,\n t, sigmas, params, context)\n\n # Compute the write index. Minimum is 0, maximum is chain_out_size - 1.\n write_index = (t * chain_out_size) // self.num_steps\n\n # May simply overwrite if write_index lands on the same index again, this\n # is desired behaviour and as a result the final index will also be the\n # complete sample.\n chain = jax.lax.dynamic_update_slice(\n chain, jnp.expand_dims(x, axis=0), (write_index,) + (0,) * x.ndim)\n return (chain, x), None\n\n # Every step of the generative process.\n ts = jnp.arange(self.num_steps)\n\n # `chain` is an output buffer that will contain intermediate states.\n chain = jnp.zeros(\n (chain_out_size, per_device_batch_size) + self.config.data_shape,\n dtype=x.dtype)\n state, _ = jax.lax.scan(\n next_sample_step, init=(chain, x), xs=ts)\n chain, _ = state\n\n return chain",
"def generate_batch(self, batch_size):\n n_words = len(self.center_words)\n while self.data_index <= n_words:\n self.data_index += batch_size\n yield self.center_words[self.data_index-batch_size:self.data_index], self.context_words[self.data_index-batch_size:self.data_index], self.neg_samples[self.data_index-batch_size:self.data_index, :]",
"def generate_batch():\n\n # Initialize variables\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n alphas = np.zeros(self.batch_size)\n n_items = 0\n index = 0\n\n while index < len(data):\n reduced_window = random.randint(0, self.window_size)\n if data[index] is not None:\n\n left = max(0, index - self.window_size + reduced_window)\n right = min((index + self.window_size + 1 -\n reduced_window), len(data) - 1)\n for pos2 in range(left, right, 1):\n\n if n_items == self.batch_size:\n queue.put((example, labels, index))\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n n_items = 0\n\n if pos2 != index and data[pos2] is not None:\n example[n_items] = data[pos2]\n labels[n_items] = data[index]\n alpha = self.learning_rate - \\\n (self.learning_rate - 0.001) * (index / self.n_words)\n alphas[n_items] = max(0.001, alpha)\n n_items += 1\n index += 1\n\n # Poison pills\n for _ in range(n_workers):\n queue.put(None)",
"def next_sample(self, batch_size=1):\n\n X = []\n y = []\n self.cont = 2.\n for x in range(batch_size):\n if x > batch_size/2 and self.cont == 2:\n self.cont = 5.*np.random.rand()\n if self._random_state.rand() < 1/self.abrupt_drift_rate:\n #self.offset += 1\n pass\n y.append(self.offset + (x*self.cont)/self.granularity + self._random_state.normal(scale=1))\n X.append(float(x))\n self._x_idx += batch_size\n zipped = list(zip(X, y))\n if self.shuffleData:\n shuffle(zipped)\n \n\n return ([float(zipped[i][0]) for i in range(len(X))], [float(zipped[i][1]) for i in range(len(X))])",
"def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n #print(\"T is \",T)\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n #print(\"sampled data \", s, \" \",data, end=\" \")\n batch.append((*data, idx))\n\n idx = np.array([i[2] for i in batch])\n #idx in the offline buffer\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n names_batch = np.array([i[1] for i in batch])\n\n return names_batch, idx",
"def sample(self, batch_size, **_kwargs):\n all_idxes = []\n buf_lengths = [len(buf) for buf in self.buffers]\n if self.no_waste:\n buf_lengths.insert(0,len(self.overflow_buffer))\n\n buffer_batch_sizes = proportional(batch_size, buf_lengths)\n #print(buffer_batch_sizes)\n for i in range(len(buf_lengths)):\n idxes = [random.randint(0, buf_lengths[i] - 1) for _ in range(buffer_batch_sizes[i])]\n all_idxes.append(idxes)\n return self._encode_sample(all_idxes)",
"def sample(self, n_samples, batch_size, word_0=-1, gen_type='multinom'):\n # Compute the number of batches\n if n_samples != batch_size:\n n_batches = n_samples // batch_size + 1\n else:\n n_batches = 1\n\n samples = torch.zeros(n_batches * batch_size, self.max_len).long()\n\n # Produce samples by batches\n for batch in range(n_batches):\n hidden = self.init_hidden(batch_size)\n if word_0 < 0:\n # Initialize every sequence with a random word from the vocabulary\n input = torch.randint(low=0, high=self.voc_dim, size=(batch_size,))\n else:\n # Initialize every sequence with 'word_0' as starting token\n input = torch.LongTensor([word_0] * batch_size)\n if self.gpu:\n input = input.cuda()\n\n # Iterate the generator until we reach the maximum length allowed for the sequence\n for i in range(self.max_len):\n # Forward pass where we keep track of the hidden states of the network\n output, hidden = self.forward(input, hidden, require_hidden=True)\n\n if gen_type == 'multinom':\n # Generate the next token in the sequence randomly using the output as a multinomial distribution\n next_token = torch.multinomial(torch.exp(output), 1)\n elif gen_type == 'argmax':\n # Choose the most probable token in the sequence deterministically\n next_token = torch.argmax(torch.exp(output), 1)\n\n # Append generated ith tokens to batch #'batch'\n samples[batch * batch_size:(batch + 1) * batch_size, i] = next_token.view(-1)\n\n # Add generated tokens to the input\n input = next_token.view(-1)\n\n # We need this because the number of samples might not be divisible by the size of batches\n samples = samples[:n_samples]\n\n return samples",
"def sample(self, batch_size):\n last_sampled, p = self.st.batch_sample(batch_size, self.theta)\n self.last_sampled = np.asarray(last_sampled)\n self.p = np.asarray(p)\n return self.last_sampled.reshape(-1), self.p",
"def sample(self,\n data: Sequence[Sequence[torch.Tensor]],\n n_epochs: int = 1) -> Tuple[List[List[int]], List[List[int]], List[int]]:\n\n all_queries = []\n all_targets = []\n for q, t in data:\n all_queries.append(q)\n all_targets.append(t)\n\n print(f'sampler size: {len(all_queries)}')\n\n\n self.n_batch = int(np.ceil(data.__len__() / self.batch_size))\n print(\"n_batch:\", self.n_batch)\n\n for i in range(self.n_batch):\n # position = i * self.batch_size\n # queries = all_queries[position:position + self.batch_size]\n # targets = all_targets[position:position + self.batch_size]\n sample_index = np.random.choice(len(all_queries), self.batch_size)\n queries = [all_queries[i] for i in sample_index]\n targets_label = [all_targets[i] for i in sample_index]\n\n # targets = self.transform_label(targets_label)\n\n # labels = np.arange(len(queries))\n\n # queriess = np.array(queries)\n all_targets_text = self.all_targets\n queries = pad_sequence(queries, batch_first=self.batch_first, padding_value=0)\n\n # targets, queries, labels = torch.tensor(targets), torch.tensor(labels)\n # print(queries[:5])\n # print(len(all_targets_text))\n\n\n targets_label = torch.tensor(targets_label)\n yield (queries, all_targets_text, targets_label)",
"def batch_label_generator(a_walk, random_walk_length, window_size):\n grams_1 = [a_walk[j+1:j+1+window_size] for j in range(random_walk_length-window_size)]\n grams_2 = [a_walk[j-window_size:j] for j in range(window_size, random_walk_length)]\n return np.array(grams_1 + grams_2)",
"def batch_gen():\n i = 0\n while len(all_sentences) - i >= batch_size:\n # TODO this is a mess...\n yield np.stack([\n np.pad(\n np.stack(\n [embeddings[id]\n for id in sentence[:max_sentence_length]]), [[\n 0, max_sentence_length -\n min(len(sentence), max_sentence_length)\n ], [0, 0]],\n 'constant',\n constant_values=0)\n for sentence in all_sentences[i:i + batch_size]\n ])\n\n i += batch_size",
"def gen_batch(batch_size,\n num_voters,\n num_alternatives,\n voter_experience='default',\n max_experience=50,\n pull_variance=100.,\n count_noise=0.,\n random_count_prob=0.):\n gt = lambda: gen_trial(num_voters, num_alternatives, voter_experience, max_experience, pull_variance, count_noise, random_count_prob)\n arm_means, c, _, rankings = list(zip(*[gt() for _ in range(batch_size)]))\n return torch.tensor(arm_means).float(), torch.tensor(c).float(), torch.tensor(rankings).float()",
"def resample_sequence(model, length, context, resample_num=5, num_samples=1, temperature=0.5, repetition_penalty=1.0,\n top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):\n generated = context\n idxs = np.arange(generated.shape[1])[-length:]\n np.random.shuffle(idxs)\n with torch.no_grad():\n inputs = {'input_ids': generated}\n update_pos = idxs[:min(resample_num, len(idxs))]\n input_ids = generated\n perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]),\n dtype=torch.float, device=device)\n perm_mask[:, :, update_pos] = 1.0\n\n\n target_mapping = torch.zeros((1, len(update_pos), input_ids.shape[1]), dtype=torch.float, device=device)\n target_mapping[0, torch.arange(len(update_pos)), update_pos] = 1.0\n\n inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}\n outputs = model(**inputs)\n\n # next_token_logits = outputs[0][0, update_pos, :] / temperature\n next_token_logits = outputs[0][0,:,:] / (temperature if temperature > 0 else 1.)\n for _ in set(generated.view(-1).tolist()):\n next_token_logits[:,_] /= repetition_penalty\n\n filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)\n if temperature == 0: #greedy sampling:\n next_token = torch.argmax(filtered_logits).unsqueeze(0)\n else:\n next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)\n\n generated[:, update_pos] = next_token[:, 0]\n\n return generated, update_pos",
"def sample(self, batch_size):\n raise NotImplementedError",
"def next_batch(self, batch_size):\n\n all_idx = np.arange(0, self.length)\n np.random.shuffle(all_idx)\n batch_idx = all_idx[:batch_size]\n batch_imgs = [self.images[i] for i in batch_idx]\n batch_traces = [self.traces[i] for i in batch_idx]\n return batch_imgs, batch_traces",
"def gen_batch(self):\n batch_size = self.batch_size\n shuffle = self.shuffle\n data = np.array(self.sentences)\n\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n while True:\n # shuffle the data at starting of each epoch\n shuffled_data = data\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield self._format_samples(shuffled_data[start_index:end_index], self.max_length)\n\n if self.mode in ['train', \"pred\"]:\n break",
"def get_batch(self, batch_size):\n return random.sample(self.buffer, batch_size)",
"def next(self, batch_size=np.inf):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n # shuffle the data each pass over it.\n rng_state = np.random.get_state()\n np.random.shuffle(self.data)\n np.random.set_state(rng_state)\n np.random.shuffle(self.labels)\n \n end_idx = min(self.batch_id + batch_size, len(self.data))\n batch_data = (self.data[self.batch_id:end_idx])\n batch_labels = self.labels[self.batch_id:end_idx]\n batch_seqlen = (self.seqlen[self.batch_id:end_idx])\n self.batch_id = end_idx\n return batch_data, batch_labels, batch_seqlen",
"def batch_generate(self, inputs, labels, batch_size=64):\n inputs_image, inputs, labels = check_inputs_labels(inputs, labels)\n arr_x = inputs\n arr_y = labels\n len_x = inputs_image.shape[0]\n batch_size = check_int_positive('batch_size', batch_size)\n batches = int(len_x / batch_size)\n rest = len_x - batches*batch_size\n res = []\n for i in range(batches):\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[i*batch_size: (i + 1)*batch_size] for sub_items in arr_x])\n else:\n x_batch = arr_x[i*batch_size: (i + 1)*batch_size]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[i*batch_size: (i + 1)*batch_size] for sub_labels in arr_y])\n else:\n y_batch = arr_y[i*batch_size: (i + 1)*batch_size]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n if rest != 0:\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[batches*batch_size:] for sub_items in arr_x])\n else:\n x_batch = arr_x[batches*batch_size:]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[batches*batch_size:] for sub_labels in arr_y])\n else:\n y_batch = arr_y[batches*batch_size:]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n adv_x = np.concatenate(res, axis=0)\n return adv_x",
"def get_seq_batch(self):\n inps = []\n outs = []\n lens = []\n for i in range(self.batch_size):\n leng = np.random.randint(1, self.max_length)\n sent = np.random.choice(range(self.vocab_size), leng)\n labl = sent % 2\n\n lens.append(leng)\n inps.append(np.pad(sent, (0, self.max_length - leng), 'constant', constant_values=0))\n outs.append(np.pad(labl, (0, self.max_length - leng), 'constant', constant_values=0))\n return np.array(inps), np.array(outs), np.array(lens)",
"def sample(self, batch_size):\n buffer_size = len(self.buffer)\n print(\"**\",buffer_size)\n index = np.random.choice(np.arange(buffer_size), size=batch_size, replace=False)\n return [self.buffer[i] for i in index]",
"def sample_X(self, X_L, X_U, size):\n # find the batch number that the iterator of the labelled (or unlabelled) images finishes\n bn_end = np.min([self.conf.unlabelled_image_num, self.conf.labelled_image_num]) / self.conf.batch_size\n if self.batch < bn_end:\n all = np.concatenate([X_L, X_U], axis=0)\n idx = np.random.choice(all.shape[0], size=size, replace=False)\n X = np.array([all[i] for i in idx])\n elif self.conf.labelled_image_num > self.conf.unlabelled_image_num:\n idx = np.random.choice(X_L.shape[0], size=size, replace=False)\n X = np.array([X_L[i] for i in idx])\n else:\n idx = np.random.choice(X_U.shape[0], size=size, replace=False)\n X = np.array([X_U[i] for i in idx])\n\n return X",
"def next_sample(self, batch_size=1):\n\n X = []\n y = []\n for x in range(batch_size):\n if self._random_state.rand() < 1/self.abrupt_drift_rate:\n self.offset = self.offset + self._random_state.rand()\n X.append(x + self._x_idx)\n y.append(math.sin(self.offset + x/(2*math.pi*self.granularity))\n + self._random_state.normal(scale=0.05) + self.cont)\n\n self._x_idx += batch_size\n\n return (X, y)"
] | [
"0.6539411",
"0.6458321",
"0.6340587",
"0.63209933",
"0.630707",
"0.6266073",
"0.6266073",
"0.625791",
"0.62345713",
"0.6177827",
"0.6166631",
"0.6160624",
"0.61466736",
"0.612205",
"0.6086653",
"0.60320735",
"0.6026833",
"0.600623",
"0.597137",
"0.59568256",
"0.59458864",
"0.59310555",
"0.59298205",
"0.59243685",
"0.5900356",
"0.5861632",
"0.5860464",
"0.58493763",
"0.58476603",
"0.58364064"
] | 0.6672451 | 0 |
Decorator to pass in instance_uuid as instance_id | def uuidize(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if 'instance_id' in kwargs and 'instance_uuid' in kwargs:
kwargs['instance_id'] = kwargs['instance_uuid']
del kwargs['instance_uuid']
return f(*args, **kwargs)
return wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_instance_id(self):\n return self.__instance_id",
"def uuid(self):\n raise NotImplementedError",
"def uuid(self):\n raise NotImplementedError",
"def uuid(self):\n raise NotImplementedError",
"def get_uuid(self, obj):\n return IUUID(obj, None)",
"def get_uuid(self, obj):\n return IUUID(obj, None)",
"def _get_uuid(self, *args: Any, **kwargs: Any) -> str:\n return \"lane_invasions\"",
"def function_uuid():\r\n yield uuid.uuid4()",
"def _get_uuid(self, *args: Any, **kwargs: Any) -> str:\n return \"distance\"",
"def id(self, *args, **kwargs) -> Any:\n pass",
"def __init__(self):\n self.id = uuid.uuid4().hex",
"def _get_rec_uuid(self, uuid, context=None):\n if context is not None:\n moduuid = context.get('moduuid')\n if moduuid:\n return get_uuid(moduuid, uuid)\n return uuid",
"def get_instance_id(self):\n return self.instance_id",
"def uuid(self) -> str:\n return self.obj.uuid",
"def uuid(self):\n return self.__uuid",
"def uuid(self):\n return self.__uuid",
"def uuid(self) -> UUIDFilter:\n return self.__uuid",
"def _uuid(self):\n u = self.__uuid\n if u is None:\n u = str(uuid.uuid1())\n self._set_uuid(u)\n return u",
"def uuid(self) -> str:\n return self.__uuid",
"def __init__(__self__, *,\n instance_id: pulumi.Input[int]):\n pulumi.set(__self__, \"instance_id\", instance_id)",
"def id(obj):\n return obj",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id",
"def _get_id(self):\n return self.__id"
] | [
"0.67995924",
"0.6435182",
"0.6435182",
"0.6435182",
"0.6259262",
"0.6259262",
"0.62581223",
"0.61940676",
"0.6163332",
"0.5998131",
"0.5985018",
"0.5976193",
"0.5933036",
"0.5872257",
"0.5870427",
"0.5870427",
"0.5848155",
"0.58157057",
"0.5784641",
"0.5778465",
"0.5777457",
"0.5776713",
"0.5776713",
"0.5776713",
"0.5776713",
"0.5776713",
"0.5776713",
"0.5776713",
"0.5776713",
"0.5776713"
] | 0.8623325 | 0 |
Utility to return a message with kwarg variables appended | def _log_kwargs(msg='', **kwargs):
kwarg_msg = ' '.join([('%s: |%s|' % (str(key), kwargs[key]))
for key in kwargs])
return "%s %s" % (msg, kwarg_msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_message(self, *args, **kwargs):\n\n message = ''\n message += ', '.join([str(key) + ': ' + str(val) for key, val in kwargs.items()]) + '; ' if kwargs else ''\n message += ', '.join(str(val) for val in args) if args else ''\n\n return message",
"def format(self, kwmsg):\n return kwmsg[\"msg\"]",
"def _print_message(self, msg):\n if msg.arguments:\n arg_str = \" \" + \" \".join(msg.arguments)\n else:\n arg_str = \"\"\n\n if msg.mid is not None:\n mid_str = \"[%s]\" % msg.mid\n else:\n mid_str = \"\"\n\n return \"%s%s%s%s\" % (msg.TYPE_SYMBOLS[msg.mtype], msg.name,\n mid_str, arg_str)",
"def extras_msg(extras):\r\n\r\n if len(extras) == 1:\r\n verb = \"was\"\r\n else:\r\n verb = \"were\"\r\n return \", \".join(repr(extra) for extra in extras), verb",
"def format(self, **kw):\n params = self.defaults.copy()\n params.update(kw)\n if self.filter:\n self.filter(self, params)\n msg = self.msg\n if self.key is not None:\n key = self.key.format(**params)\n msg = msg[key]\n return msg.format(**params)",
"def ExceptionAppend(e, msg):\n if not e.args:\n e.args = (msg,)\n elif len(e.args) == 1:\n e.args = (str(e.args[0]) + ' ' + msg,)\n else:\n e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]",
"def message_warning(msg, *a, **kwargs):\n return str(msg) + '\\n'",
"def get_message(self, metadata=False, asctime=True):\n msg = self.msg if is_string(self.msg) else str(self.msg)\n if self.args:\n try:\n msg = msg % self.args\n except:\n msg += str(self.args)\n\n if asctime: msg = \"[\" + self.asctime + \"] \" + msg\n\n # Add metadata\n if metadata:\n msg += \"\\nCalled by %s at %s:%s\\n\" % (self.func_name, self.pathname, self.lineno)\n\n return msg",
"def _custom_actioner(message: ActionMessage, defined_keyword_arg, **kwargs):\n print(message.additional_fields)\n print(defined_keyword_arg)\n print(kwargs)",
"def msg(txt, *args):\n if QUIET:\n return\n if args:\n txt = txt % args\n sys.stderr.write(txt + '\\n')",
"def msg(self, severity, text, *args):\n assert isinstance(severity, int), severity\n assert isinstance(text, basestring), text\n assert severity >= 0, severity\n msg = Message(\n severity=severity,\n text='%s%s' % (self.prefixes[-1], text % args),\n )\n self.messages.append(msg)\n if self.on_message:\n self.on_message(msg)",
"def _concat_message(msg, *args):\n # coerce msg to unicode if it's not already\n msg = _to_unicode(msg)\n if args:\n # coerce all args to unicode as well\n args = [_to_unicode(arg) for arg in args]\n try:\n msg = msg % tuple(args)\n except TypeError, dummy_err:\n warnings.warn(\n \"String format does not match concat args: %s\" % (str(sys.exc_info()))\n )\n return msg.rstrip()",
"def _format_msg(self, format_str, *args):\r\n return u\"{0} - - [{1}] {2}\\n\".format(\r\n self.client_address[0],\r\n self.log_date_time_string(),\r\n format_str % args\r\n )",
"def _format_msg(self, format_str, *args):\n if not args:\n format_str = six.moves.urllib.parse.unquote(format_str)\n return \"{} - - [{}] {}\\n\".format(\n self.client_address[0],\n self.log_date_time_string(),\n format_str % args\n )",
"def built_error_message(self, key: str, params: List[str]) -> str:\n if key in self.errors:\n error_msg = self.errors[key]\n error_msg = re.sub(\"{..}\", \"\", error_msg)\n return error_msg.format(*params)\n else:\n return \"\"",
"def display_message():\n message = \"I am learning about functions, function calls, parameters and \"\n message+= \"arguments.\"\n print(message)",
"def pacman_msg(*args, **kwargs):\n msg = YELLOW + ':: informant: ' + CLEAR\n for arg in args:\n msg += arg\n print(msg, **kwargs)",
"def log_message(self, format, *args):",
"def process(self, msg, kwargs) -> Tuple[str, Dict]:\n return msg, kwargs",
"def form(msg, opts):\n return '{} ({}): '.format(msg, '/'.join(opts.keys()))",
"def sfherrormessage(func, *args, **kwargs):\n def wrapper(*args, **kwargs):\n e_message = \"DEBUGGING ASSISTANT: make sure the parameters_dict contains \" \\\n \"all the necessary parameters spelt correctly. \" \\\n \"Accepted parameters are: 'tau', 'T0', 'constant', 'alpha', 'beta'\"\n try:\n func(*args, **kwargs)\n except KeyError as e:\n raise HokiKeyError(\n f\"{e} has not been defined and I need it. \"+e_message)\n return wrapper",
"def generateKwargsAsString(self):\n args = \"\"\n axisList = self.tabWidget.currentWidget()\n\n for axisWidget in axisList.getAxisWidgets():\n args += \"%s = %s, \" % (axisWidget.axis.id,\n axisWidget.getCurrentValuesAsStr())\n\n # Generate additional args\n args += 'squeeze = 0'\n args += \", order = '%s' \" % axisList.getAxesOrderString()\n return args",
"def foo(self, message = None):\n if message:\n return 'foo ' + message\n else:\n return 'foo'",
"def itkFormatWarning(msg, *a, **kwa):\n\n return str(msg) + '\\n'",
"def msg(self, target: str, *msg: str,\n tags: Optional[dict[str, Union[str, bool]]] = None) -> None:\n ...",
"def log_message(self, fmt, *args):\n pass",
"def with_additional_message(self: _Diagnostic, message: str) -> _Diagnostic:\n if self.additional_message is None:\n self.additional_message = message\n else:\n self.additional_message = f\"{self.additional_message}\\n{message}\"\n return self",
"def build_irc_msg(command, params, final_param_multi_word=False,\n source=None):\n\n if final_param_multi_word:\n final_param = ':' + params[-1]\n else:\n final_param = params[-1]\n\n if source:\n prefix = ':' + source\n else:\n prefix = ''\n\n if len(params) > 1:\n parts = [prefix, command, ' '.join(params[:-1]), final_param]\n else:\n parts = [prefix, command, final_param]\n\n return ' '.join(parts).strip() + '\\r\\n'",
"def process(self, msg, kwargs):\n dev_id = self.extra[\"device_id\"]\n return f\"[{dev_id[0:3]}...{dev_id[-3:]}] {msg}\", kwargs",
"def format_args(self, **kwargs: Any) -> str:\n return \"\""
] | [
"0.7438445",
"0.6609422",
"0.63775367",
"0.63635904",
"0.63608533",
"0.62179434",
"0.6209629",
"0.6205746",
"0.61822826",
"0.6123762",
"0.6027747",
"0.60020286",
"0.59981364",
"0.59943974",
"0.59677136",
"0.5967563",
"0.59581226",
"0.594761",
"0.5929051",
"0.5896314",
"0.5854801",
"0.5849868",
"0.58345836",
"0.5815522",
"0.5781691",
"0.5771298",
"0.57476795",
"0.5736141",
"0.57167417",
"0.57057047"
] | 0.7287521 | 1 |
gets mac address and ips from melange for an interface, gets port from quantum for that interface, then updates the allowed address pairs in quantum to match what is in melange Takes no action if unnecessary by CONF or if vif cannot be found | def _update_port_allowed_address_pairs(self, tenant_id, instance_id,
interface_id, network_id):
if (CONF.quantum_use_port_security and
CONF.quantum_default_tenant_id == tenant_id):
# get the whole vif record
vif = self.m_conn.get_interface_for_device(instance_id,
interface_id)
# make sure we got a result
if not vif:
LOG.exception(_('vif could not be found to generate allowed'
'address pairs'))
return
# get the list of ips from the vif (should include/exclude a
# recently added/removed fixed ip)
# TODO(tr3buchet) make sure this isn't a race condition
ips = [ip['address'] for ip in vif.get('ip_addresses', [])]
# append link local to ips if flags are set
if CONF.quantum_port_security_include_link_local:
mac = netaddr.EUI(vif['mac_address'])
ips.append(str(mac.ipv6_link_local()))
# get a list of [{'mac_address': 'xx:xx..',
# 'ip': 'xxx.xxx.xxx.xxx'}, ...]
# for each ip in the ip list
pairs = self._generate_address_pairs(vif, ips)
# get the port id from quantum
port_id = self.q_conn.get_port_by_attachment(tenant_id,
network_id,
interface_id)
# update the port
self.q_conn.update_allowed_address_pairs_on_port(tenant_id,
network_id,
port_id,
pairs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_port_ip_address(self):\n leases = None\n req = dict(ip='0.0.0.0')\n instances = self.get_vms_for_this_req(**req)\n if instances is None:\n return\n\n for vm in instances:\n if not leases:\n # For the first time finding the leases file.\n leases = self._get_ip_leases()\n if not leases:\n # File does not exist.\n return\n\n for line in leases:\n if line.startswith('lease') and line.endswith('{\\n'):\n ip_addr = line.split()[1]\n if 'hardware ethernet' in line:\n if vm.mac == line.replace(';', '').split()[2]:\n LOG.info(_LI('Find IP address %(ip)s for %(mac)s'),\n {'ip': ip_addr, 'mac': vm.mac})\n try:\n rule_info = dict(ip=ip_addr, mac=vm.mac,\n port=vm.port_id,\n status='up')\n self.neutron_event.update_ip_rule(str(vm.host),\n str(rule_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE(\"RPC error: Failed to update\"\n \"rules.\"))\n else:\n params = dict(columns=dict(ip=ip_addr))\n self.update_vm_db(vm.port_id, **params)\n\n # Send update to the agent.\n vm_info = dict(status=vm.status, vm_mac=vm.mac,\n segmentation_id=vm.segmentation_id,\n host=vm.host, port_uuid=vm.port_id,\n net_uuid=vm.network_id,\n oui=dict(ip_addr=ip_addr,\n vm_name=vm.name,\n vm_uuid=vm.instance_id,\n gw_mac=vm.gw_mac,\n fwd_mod=vm.fwd_mod,\n oui_id='cisco'))\n try:\n self.neutron_event.send_vm_info(vm.host,\n str(vm_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE('Failed to send VM info to '\n 'agent.'))",
"def intGet(): \n macchanger, ip, iwconfig = pathGet()\n interfaces = []\n a = str(subprocess.check_output(\"{} link show\".format(ip), shell=True))\n ints = a.split(': ')\n for i in range(len(ints)):\n if len(ints[i].split()) == 1:\n if ints[i] not in [\"1\", \"lo\", \"b'1\"]:\n interface = {'name':str(ints[i])}\n interfaces.append(interface)\n # Get interface properties\n for interface in interfaces:\n name = interface['name']\n macs = subprocess.check_output(\"{} -s {}\".format(macchanger, name), shell=True).decode(\"utf-8\")\n interface['cMac'] = macs.split()[2]\n interface['cVend'] = macs.split(\"(\")[1].split(\")\")[0]\n interface['pMac'] = macs.split(\"\\n\")[1].split()[2]\n interface['pVend'] = macs.split(\"\\n\")[1].split(\"(\")[1].split(\")\")[0]\n try:\n mon = subprocess.check_output(\"{} {} 2> /dev/null\".format(iwconfig, name), shell=True).split()\n mon1 = mon[3].decode(\"utf-8\").split(':')[1]\n if mon1 == 'off/any':\n mon1 = mon[4].decode(\"utf-8\").split(':')[1]\n interface['mon'] = mon1\n except:\n interface['mon'] = 'Wired'\n return(interfaces)",
"def get_physnet(self, port, iface_name, introspection_data):",
"def get_inband_info(cfg_facts):\n ret = {}\n\n if 'VOQ_INBAND_INTERFACE' in cfg_facts:\n intf = cfg_facts['VOQ_INBAND_INTERFACE']\n for a_intf in intf:\n for addrs in intf[a_intf]:\n if \"/\" not in addrs:\n continue\n ret['port'] = a_intf\n\n # Skip fields that are not inband address\n if '/' not in addrs:\n continue\n\n intf_ip = addrs.split('/')\n if ':' in intf_ip[0]:\n ret['ipv6_addr'] = intf_ip[0]\n ret['ipv6_mask'] = intf_ip[1]\n else:\n ret['ipv4_addr'] = intf_ip[0]\n ret['ipv4_mask'] = intf_ip[1]\n return ret",
"def set_static_ip_address(self, payload):\n\n # This request is received from CLI for setting ip address of an\n # instance.\n macaddr = payload.get('mac')\n ipaddr = payload.get('ip')\n\n # Find the entry associated with the mac in the database.\n req = dict(mac=macaddr)\n instances = self.get_vms_for_this_req(**req)\n for vm in instances:\n LOG.info(_LI('Updating IP address: %(ip)s %(mac)s.'),\n {'ip': ipaddr, 'mac': macaddr})\n # Send request to update the rule.\n try:\n rule_info = dict(ip=ipaddr, mac=macaddr,\n port=vm.port_id,\n status='up')\n self.neutron_event.update_ip_rule(str(vm.host),\n str(rule_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE(\"RPC error: Failed to update rules.\"))\n else:\n # Update the database.\n params = dict(columns=dict(ip=ipaddr))\n self.update_vm_db(vm.port_id, **params)\n\n # Send update to the agent.\n vm_info = dict(status=vm.status, vm_mac=vm.mac,\n segmentation_id=vm.segmentation_id,\n host=vm.host, port_uuid=vm.port_id,\n net_uuid=vm.network_id,\n oui=dict(ip_addr=ipaddr,\n vm_name=vm.name,\n vm_uuid=vm.instance_id,\n gw_mac=vm.gw_mac,\n fwd_mod=vm.fwd_mod,\n oui_id='cisco'))\n try:\n self.neutron_event.send_vm_info(vm.host,\n str(vm_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE('Failed to send VM info to agent.'))",
"def _update_ips(self):\n self.ip_others = []\n ips = self.mesh.ipaddr()\n self.rloc16 = self.mesh.rloc()\n for line in ips:\n if line.startswith('fd'):\n # Mesh-Local unicast IPv6\n try:\n addr = int(line.split(':')[-1], 16)\n except Exception:\n continue\n if addr == self.rloc16:\n # found RLOC\n # RLOC IPv6 has x:x:x:x:0:ff:fe00:RLOC16\n self.rloc = line\n elif ':0:ff:fe00:' not in line:\n # found Mesh-Local EID\n self.ip_eid = line\n elif line.startswith('fe80'):\n # Link-Local\n self.ip_link = line\n else:\n self.ip_others.append(line)",
"def _conf_intf(self, conn, interface, mode, pvid, vlan_list):\n\n if not vlan_list:\n raise Exception('The interface should be in at least one vlan')\n\n if (mode == 'access') and (len(vlan_list) > 1):\n raise Exception('An access port cannot be in multiple vlans')\n\n if pvid not in vlan_list:\n raise Exception('The pvid should be in the list of vlans')\n\n req_js = {}\n req_js['if_name'] = interface\n req_js['bridgeport_mode'] = mode\n req_js['pvid'] = pvid\n req_js['vlans'] = vlan_list\n\n obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')\n resp = conn.put(obj, req_js)\n return resp",
"def get_network_info() -> tuple:\n # Getting LAN IP adress\n # A big part of the code here has been extracted from the question of this man.\n # https://stackoverflow.com/questions/41625274/extracting-subnet-mask-from-my-computer-python\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n lan_ip = s.getsockname()[0]\n s.close()\n\n # Checking network interfaces for a convincing Gateway and Mask\n for i in netifaces.interfaces():\n try:\n\n pc_ip = netifaces.ifaddresses(i)[netifaces.AF_INET][0]['addr']\n mask = netifaces.ifaddresses(i)[netifaces.AF_INET][0]['netmask']\n gateway = netifaces.gateways()['default'][netifaces.AF_INET][0]\n\n if pc_ip == lan_ip:\n break\n except:\n pass\n\n else:\n # mask and gateway not found, using default values\n mask = DEFAULT_NETMASK\n gateway = str(lan_ip)\n\n # If invalid netmask we put the default netmask\n if mask == '255.255.255.255': mask = DEFAULT_NETMASK\n\n # Now we need to set to zero the host ports.\n splitted_ip = gateway.split('.')\n splitted_mask = mask.split('.')\n\n for i in range(4):\n if splitted_mask[i] == '0':\n splitted_ip[i] = '0'\n elif splitted_mask[i] != '255':\n num = bin(int(splitted_ip[i]))[2:]\n pat = bin(int(splitted_mask[i]))[2:]\n\n # Adding 0s if needed\n while len(num) < 8:\n num = '0' + num\n while len(pat) < 8:\n pat = '0' + pat\n\n for i in range(8):\n if pat[i] == '0':\n num = num[:i] + '0' + num[i+1:]\n\n splitted_ip[i] = str(int(num, 2))\n\n\n correct_ip = '.'.join(splitted_ip)\n return correct_ip, mask",
"def get_ipv4_interfaces(device_name):\n interfaces = {}\n if DEBUG:\n print note + \"Entering into get_ipv4_interfaces function\"\n # Needs to be fixed. Get list of interfaces first, then IP addresses, then VLAN, then ACLs\n config_element = nipper_xml.find(\"./report/part/[@ref='CONFIGURATION']\")\n\n for section in config_element.findall('./section'):\n device_item = None\n\n for i in section.get('title').split():\n if device_name == i:\n device_item = device_name\n if DEBUG:\n print \"\\t\" + note + \"Set Device: %s\" % device_name\n\n if device_item is not None:\n interface_element = section.find(\"./section/[@ref='CONFIGURATION.ADDRESSES']/section/\"\n \"[@ref='ADDRESSES.IPV4']\")\n if interface_element is not None:\n headings = []\n items = []\n for heading in interface_element.findall(\"./table/[@title='IPv4 addresses']/headings/heading\"):\n headings.append(heading.text)\n if DEBUG:\n print \"\\t\" + note + \"Set Heading: %s\" % heading.text\n for item in interface_element.findall(\"./table/[@title='IPv4 addresses']/tablebody\"\n \"/tablerow/tablecell/item\"):\n items.append(item.text)\n if DEBUG:\n print \"\\t\" + note + \"Set Item: %s\" % item.text\n i = 0\n interface_id = None\n if DEBUG:\n print \"\\t\" + note + \"Heading List: %s\" % headings\n print \"\\t\" + note + \"Items List: %s\" % items\n for item in items:\n if i > (len(headings) - 1):\n i = 0\n if DEBUG:\n print \"\\t\" + info + \"Heading: %s\\t Item: %s\" % (headings[i], item)\n if i is 0:\n interface_id = item\n interfaces[interface_id] = {}\n interfaces[interface_id].update({headings[i]: item})\n i += 1\n\n interfaces_element = section.find(\"./section/[@ref='CONFIGURATION.INTERFACES']/section/\"\n \"[@ref='ETHINTERFACESLAYER3']\")\n if interfaces_element is not None:\n headings = []\n for heading in interfaces_element.findall(\"./table/[@title='Layer 3 Ethernet Interfaces']\"\n \"/headings/heading\"):\n headings.append(heading.text)\n for tr in interfaces_element.findall(\"./table/[@title='Layer 3 Ethernet Interfaces']\"\n \"/tablebody/tablerow\"):\n items = []\n for i in tr.findall(\"./tablecell/item\"):\n items.append(i.text)\n if 'Zone' in headings:\n interfaces[items[headings.index('Interface')]].update({'Zone': items[headings.index('Zone')]})\n if 'VLAN' in headings:\n interfaces[items[headings.index('Interface')]].update({'VLAN': items[headings.index('VLAN')]})\n if DEBUG:\n print info + \"Interfaces object: \"\n print interfaces\n raw_input(warn + \"Press any key to continue\")\n return interfaces",
"def manage_vrf_interfaces(args):\n with IPDB() as ipdb:\n with ipdb.interfaces[args.vrf_name] as vrf:\n if args.action == \"add_interface\":\n vrf.add_port(ipdb.interfaces[args.interface].index)\n logger.info(f\"{args.interface} added to vrf {args.vrf_name}\")\n if args.action == \"remove_interface\":\n subprocess.run(f\"ip link set dev {args.interface} nomaster\", shell=True)\n logger.info(f\"{args.interface} removed from vrf {args.vrf_name}\")",
"def update_interfaces_config(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n all_devices = devices[\"other_devices\"]\n all_devices.update(devices[\"dpdk_devices\"])\n all_devices.update(devices[\"kernel_devices\"])\n\n current_ifcs = {}\n interfaces = {}\n if \"interfaces\" in node:\n current_ifcs = node[\"interfaces\"]\n if current_ifcs:\n for ifc in current_ifcs.values():\n dvid = ifc[\"pci_address\"]\n if dvid in all_devices:\n VppPCIUtil.vpp_create_interface(\n interfaces, dvid, all_devices[dvid]\n )\n node[\"interfaces\"] = interfaces\n\n self.updateconfig()",
"def possible_mac_addresses(interface):\n\n mac_addrs = []\n\n # In case of VLANs, just grab the parent interface\n if interface.interface_type == 'vlan':\n interface = interface.parent\n\n # Bonding/bridge: append the MACs of the physical interfaces\n # TODO: drop the public/bootable check once we decide how to send the extra\n # information to clients\n for slave in interface.all_slaves():\n if slave.mac and (slave.interface_type != \"public\" or slave.bootable):\n mac_addrs.append(slave.mac)\n\n # Handle physical interfaces, and bonding with a dedicated MAC\n # TODO: drop the public/bootable check once we decide how to send the extra\n # information to clients\n if interface.mac and (interface.interface_type != \"public\" or interface.bootable):\n mac_addrs.append(interface.mac)\n\n return mac_addrs",
"def _update_addresses(self, real_ifname, interface, old_interface):\n\n def _gen_cmd(cmd, address):\n \"\"\"\n Generates an `ip addr (add|del) <cidr> dev <ifname>` command.\n \"\"\"\n family = {4: 'inet', 6: 'inet6'}[address[0].version]\n args = ['addr', cmd, '%s/%s' % (address[0], address[1])]\n if family == 'inet' and cmd == 'add':\n args += ['brd', '+']\n args += ['dev', real_ifname]\n if family == 'inet6':\n args = ['-6'] + args\n return args\n\n add = functools.partial(_gen_cmd, 'add')\n delete = functools.partial(_gen_cmd, 'del')\n mutator = lambda a: (a.ip, a.prefixlen)\n\n self._update_set(real_ifname, interface, old_interface,\n 'all_addresses', add, delete, mutator)",
"def _plug_interface(self, context, tenant_id, net_id, port_id,\n remote_interface_id):\n LOG.debug(_(\"QuantumRestProxyV2: _plug_interface() called\"))\n\n # update attachment on network controller\n try:\n port = super(QuantumRestProxyV2, self).get_port(context, port_id)\n mac = port[\"mac_address\"]\n\n for ip in port[\"fixed_ips\"]:\n if ip.get(\"subnet_id\") is not None:\n subnet = super(QuantumRestProxyV2, self).get_subnet(\n context, ip[\"subnet_id\"])\n gateway = subnet.get(\"gateway_ip\")\n if gateway is not None:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\"network\":\n {\"id\": net_id,\n \"gateway\": gateway,\n }\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n\n if mac is not None:\n resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)\n data = {\"attachment\":\n {\"id\": remote_interface_id,\n \"mac\": mac,\n }\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2:Unable to update remote network: \"\n \"%s\"), e.message)\n raise",
"def iface_config(self, iface, *args, **kwargs):\n if not set(kwargs).issubset({'intf_ip_addr', 'netns', 'adminMode'}):\n raise NotImplementedError(\"Method is not implemented for current kwargs.\")\n if kwargs.get('netns', False):\n # Create network namespaces for current iface\n self.create_namespaces(iface)\n del kwargs['netns']\n if 'intf_ip_addr' in kwargs:\n kwargs['ipAddr'] = \"{}/24\".format(kwargs['intf_ip_addr'])\n if iface in self.namespaces:\n self._lhost.ui.enter_namespace(self.namespaces[iface])\n self._lhost.ui.modify_ports([iface], **kwargs)\n if iface in self.namespaces:\n self._lhost.ui.exit_namespace()",
"def get_server_ip_mac(self, server_id):\n port_list = self.list_port()\n if not isinstance(port_list, list):\n return\n interface_list = []\n for port in port_list:\n if port[\"device_id\"] == server_id:\n port_info = {}\n port_info['mac'] = port['mac_address']\n port_info['ip_address'] = port['fixed_ips'][0]['ip_address']\n interface_list.append(port_info)\n\n LOG_OBJ.info(\"VM Interface Info : %s \" % interface_list)\n return interface_list",
"def test_ipam_ip_addresses_update(self):\n pass",
"def scan(graph, connectionInfo, logger, thread):\n logger.info(\"Collecting interface configuration information from network components\")\n\n timeout = int(connectionInfo['timeout'])\n name = connectionInfo['name']\n\n for host in graph.getAllNeighbors(Host):\n if not ((host.getPowerState() is None) or (host.getPowerState() == 'Running')):\n continue\n ssh = base.getSSHConnection(host)\n logger.info(\"Starting interface configuration scan on host: {}\".format(host.getID()))\n if ssh is None: #No ssh connecton is possible -> Skip this host\n logger.info(\"Skipping host {0} as ssh connection failed.\".format(host.getID()))\n continue\n\n interfaceInformation = ssh.getInterfaceInfo()\n ansible = base.getAnsibleInfo(host)\n staticInterface = False\n\n if not ansible:\n continue\n\n #get Information\n for intf in interfaceInformation:\n if intf['type'] == 'loopback' or intf['mac'] != \"00:00:00:00:00:00\":\n continue\n interface = graph.getOrCreateInterface(intf['mac'], name, timeout)\n interface.verify(name, timeout)\n host.addInterface(interface, name, timeout)\n #interface = [interface for interface in hostInterfaces if interface.getMAC() == intf['mac']] #Get the right host interface\n #if len(interface) == 0:\n # continue\n #interface = interface[0]\n\n try:\n interface.setMtu(intf['mtu'], name, timeout)\n interface.setRate(int(intf['speed']) * 1000, name, timeout)\n except:\n pass #Normal case on virtual interfaces\n\n if 'type' in intf:\n if intf['type'] == 'manual' or intf['type'] == 'static':\n staticInterface = True\n else:\n staticInterface = False\n\n\n ansibleIntf = \"ansible_\" + intf['name'].replace(\"-\", \"_\")\n if 'ipv4' not in ansible['ansible_facts'][ansibleIntf].keys():\n continue\n stillExistingAddresses = set() # Set of still existing addresses on this host (insalata.model.Layer3Address)\n if isinstance(ansible['ansible_facts'][ansibleIntf]['ipv4'], list):\n addressesElements = list(ansible['ansible_facts'][ansibleIntf]['ipv4'])\n else: # Single dict\n addressesElements = [ansible['ansible_facts'][ansibleIntf]['ipv4']]\n for addressEl in addressesElements:\n netmask = None\n try:\n interfaceAddress = addressEl['address']\n netmask = addressEl['netmask']\n except KeyError as e:\n logger.error(\"Ansible was not able to detect the {0} on interface {1}\".format(e.args[0], intf['name']))\n continue\n gateway = intf['gateway'] if \"gateway\" in list(intf.keys()) else None\n address = graph.getOrCreateLayer3Address(interfaceAddress, name, timeout, netmask=netmask, gateway=gateway)\n address.setStatic(staticInterface)\n address.verify(name, timeout)\n interface.addAddress(address)\n stillExistingAddresses.add(address)\n\n # Remove old addresses\n for old_adr_edge in [e for e in interface.getEdges() if isinstance(e.getOther(interface), Layer3Address) and e.getOther(interface) not in stillExistingAddresses]:\n logger.critical(str(type(old_adr_edge)))\n logger.critical(old_adr_edge.getOther(interface).getID())\n old_adr_edge.removeVerification(name, timeout)\n old_adr_edge.getOther(interface).removeVerification(name, timeout)\n\n\n # At the end: Create Layer3networks\n for address in stillExistingAddresses:\n netmask = address.getNetmask()\n if not netmask: # we are not able to determine the network address if no netmask is set => Take /32\n netmask = \"255.255.255.255\"\n netAddress = ipAddressHelper.getNetAddress(address.getID(), netmask)\n l3network = graph.getOrCreateLayer3Network(netAddress + \"/\" + str(ipAddressHelper.getPrefix(netmask)), name, timeout, netAddress, netmask)\n l3network.verify(name, timeout)\n address.setNetwork(l3network)\n\n base.releaseSSHConnection(ssh)",
"def arp_scan(interface: str, ip_range: str) -> List[str]:\n ans, unans = srp(Ether(dst='ff:ff:ff:ff:ff:ff') / ARP(pdst=ip_range), iface=interface, timeout=2, verbose=False)\n\n ip_addresses = []\n for req, res in ans:\n ip_addresses.append(req[ARP].pdst)\n\n return ip_addresses",
"def addSNMPInterfaces(self, interfacetable):\n\n ifIndex = 0\n ifDescr = \"\"\n ifType = \"\"\n ifMtu = 0\n ifSpeed = 0\n ifPhysAddress = \"\"\n ifAdminStatus = 0\n ifOperStatus = 0\n ifLastChange = 0\n ifInOctets = 0\n ifUcastPkts = 0\n ifInNUcastPkts = 0\n ifInDiscards = 0\n ifInErrors = 0\n ifInUnknownProtos = 0\n ifOutOctets = 0\n ifOutUcastPkts = 0\n ifOutNUcastPkts = 0\n ifOutDiscards = 0\n ifOutErrors = 0\n ifOutQLen = 0\n ifSpecific = 0\n\n\n for loop_ifIndex in interfacetable:\n for ifAttr in interfacetable[loop_ifIndex]:\n if ifAttr == 1:\n ifIndex = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 2:\n ifDescr = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 3:\n ifType = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 4:\n ifMtu = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 5:\n ifSpeed = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 6:\n ifPhysAddress = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 7:\n ifAdminStatus = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 8:\n ifOperStatus = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 9:\n ifLastChange = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 10:\n ifInOctets = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 11:\n ifUcastPkts = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 12:\n ifInNUcastPkts = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 13:\n ifInDiscards = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 14:\n ifInErrors = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 15:\n ifInUnknownProtos = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 16:\n ifOutOctets = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 17:\n ifOutUcastPkts = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 18:\n ifOutNUcastPkts = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 19:\n ifOutDiscards = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 20:\n ifOutErrors = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 21:\n ifOutQLen = interfacetable[loop_ifIndex][ifAttr]\n elif ifAttr == 22:\n ifSpecific = interfacetable[loop_ifIndex][ifAttr]\n\n self.interfacetable[loop_ifIndex] = device_interface(ifIndex, \\\n ifDescr, ifType, ifMtu, ifSpeed, ifPhysAddress, ifAdminStatus, \\\n ifOperStatus, ifLastChange, ifInOctets, ifUcastPkts, \\\n ifInNUcastPkts, ifInDiscards, ifInErrors, ifInUnknownProtos, \\\n ifOutOctets, ifOutUcastPkts, ifOutNUcastPkts, ifOutDiscards, \\\n ifOutErrors, ifOutQLen, ifSpecific)",
"def adjust_ether (self, ip=None, ether=None):\n# The rules are:\n# 1. send to the group mac address address corresponding to the IP.dst\n if ip != None and ip.haslayer(IP) and ether != None and ether.haslayer(Ether):\n iplong = atol(ip.dst)\n ether.dst = \"01:00:5e:%02x:%02x:%02x\" % ( (iplong>>16)&0x7F, (iplong>>8)&0xFF, (iplong)&0xFF )\n # print \"igmpize ip \" + ip.dst + \" as mac \" + ether.dst \n return True\n else:\n return False",
"def randomize_interface(self, interface):\n mac = self.get_rand_mac()\n command = \"ifconfig %s hw ether %s\" % (interface, mac)\n\n subprocess.call(\"ifconfig %s down\" % interface, shell=True)\n rc = subprocess.call(command, shell=True)\n subprocess.call(\"ifconfig %s up\" % interface, shell=True)\n\n return rc",
"def arptable(inputDict):\n if inputDict['interface'] not in getInterfaces():\n return [], \"Interface is not available on the node\", 3\n command = \"ip neigh\"\n cmdOut = externalCommand(command, False)\n out, err = cmdOut.communicate()\n retOut = []\n for line in out.decode(\"utf-8\").split('\\n'):\n splLine = line.split(' ')\n if len(splLine) > 4 and splLine[2] == inputDict['interface']:\n retOut.append(line)\n return retOut, err.decode(\"utf-8\"), cmdOut.returncode",
"def ipv4_interface_setup(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current interfaces with IP addresses\n current_ints = VPPUtil.get_int_ip(node)\n if current_ints != {}:\n print(\"\\nThese are the current interfaces with IP addresses:\")\n for items in sorted(current_ints.items()):\n name = items[0]\n value = items[1]\n if \"address\" not in value:\n address = \"Not Set\"\n else:\n address = value[\"address\"]\n print(\"{:30} {:20} {:10}\".format(name, address, value[\"state\"]))\n question = \"\\nWould you like to keep this configuration \" \"[Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n continue\n else:\n print(\"\\nThere are currently no interfaces with IP \" \"addresses.\")\n\n # Create a script that add the ip addresses to the interfaces\n # and brings the interfaces up\n ints_with_addrs = self._ipv4_interface_setup_questions(node)\n content = \"\"\n for ints in ints_with_addrs:\n name = ints[\"name\"]\n addr = ints[\"addr\"]\n setipstr = \"set int ip address {} {}\\n\".format(name, addr)\n setintupstr = \"set int state {} up\\n\".format(name)\n content += setipstr + setintupstr\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/set_int_ipv4_and_up\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))",
"def _get_ipv6_addresses(self, host: str) -> Dict[str, List[IPv6Address]]:\n if host == \"self\":\n command = \"show ipv6 interface\"\n elif host == \"peer\":\n command = \"failover exec mate show ipv6 interface\"\n\n show_ipv6_interface = self.show(command)\n show_ipv6_interface_lines: List[str] = show_ipv6_interface.strip().splitlines()\n first_line = show_ipv6_interface_lines.pop(0)\n interface: str = first_line.split()[0]\n ipv6_addresses: List[IPv6Interface] = []\n results: Dict[str, List] = {}\n for line in show_ipv6_interface_lines:\n # match IPv6 addresses under interface line\n if line[0].isspace():\n match = RE_IPV6_INTERFACE_MATCH.match(line)\n if match:\n ipv6_addresses.append(IPv6Interface(f\"{match.group(1)}{match.group(2)}\"))\n # update results mapping interface to matched IPv6 addresses and generate the next interface name\n else:\n if ipv6_addresses:\n results[interface] = ipv6_addresses\n ipv6_addresses = []\n interface = line.split()[0]\n\n # Add final interface in iteration if it has IPv6 addresses\n if ipv6_addresses:\n results[interface] = ipv6_addresses\n\n log.debug(\"Host %s: ip interfaces %s\", self.host, results)\n return results",
"def hh_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_hh1:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n\n for device in ci_addrs.switches_hh2:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')",
"def test_interface_addresses(bf: Session, sot: SoT) -> None:\n interface_props = bf.q.interfaceProperties(nodes=SNAPSHOT_NODES_SPEC).answer().frame()\n for _, row in interface_props.iterrows():\n interface_name = row[\"Interface\"].interface\n\n if not row[\"Active\"] or \\\n row[\"Access_VLAN\"] or \\\n row[\"Description\"] == \"[type=ISP]\" or \\\n interface_name in INTERFACES_WITHOUT_ADDRESS:\n continue\n\n assert row[\"Primary_Address\"], f'No address assigned to {row[\"Interface\"]}'\n\n interface_address = ipaddress.ip_network(row[\"Primary_Address\"], strict=False)\n\n # check prefix length\n expected_prefix_length = sot.get_interface_prefix_length(interface_name)\n assert interface_address.prefixlen == expected_prefix_length, \"Unexpected prefix length {} for {}. Expected {}\".format(\n interface_address.prefixlen, row[\"Interface\"], expected_prefix_length)\n\n # check that IP address is from the right prefix\n address_in_range = any([interface_address.subnet_of(prefix)\n for prefix in sot.get_interface_prefixes(interface_name)])\n assert address_in_range, \"Unexpected address {} for {}. Expected it to be in {}\".format(\n interface_address, row[\"Interface\"], sot.get_interface_prefixes(interface_name))",
"def setMAC( self, intf, mac ):\n result = self.cmd( 'ifconfig', intf, 'down' )\n result += self.cmd( 'ifconfig', intf, 'hw', 'ether', mac )\n result += self.cmd( 'ifconfig', intf, 'up' )\n return result",
"def config_networking(\n self, network_obj, ip, netmask, gateway, domain, dns, guest_hostname\n ):\n\n global_ip = vim.vm.customization.GlobalIPSettings()\n adapter_map = vim.vm.customization.AdapterMapping()\n adapter_map.adapter = vim.vm.customization.IPSettings()\n adapter_map.macAddress = network_obj.macAddress\n if ip:\n adapter_map.adapter.ip = vim.vm.customization.FixedIp()\n adapter_map.adapter.ip.ipAddress = ip\n else:\n adapter_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()\n adapter_map.adapter.subnetMask = netmask\n adapter_map.adapter.gateway = gateway\n global_ip.dnsServerList = dns\n adapter_map.adapter.dnsDomain = domain\n ident = vim.vm.customization.LinuxPrep()\n ident.hostName = vim.vm.customization.FixedName()\n if guest_hostname:\n ident.hostName.name = guest_hostname\n else:\n ident.hostName.name = self.vm_obj.name\n custom_spec = vim.vm.customization.Specification()\n custom_spec.nicSettingMap = [adapter_map]\n custom_spec.identity = ident\n custom_spec.globalIPSettings = global_ip\n return self.vm_obj.Customize(spec=custom_spec)",
"def adjust_ip (self, ip=None):\n if ip != None and ip.haslayer(IP):\n if (self.type == 0x11):\n if (self.gaddr == \"0.0.0.0\"):\n ip.dst = \"224.0.0.1\" # IP rule 1\n retCode = True \n elif isValidMCAddr(self.gaddr):\n ip.dst = self.gaddr # IP rule 3a\n retCode = True\n else:\n print \"Warning: Using invalid Group Address\"\n retCode = False\n elif ((self.type == 0x17) and isValidMCAddr(self.gaddr)):\n ip.dst = \"224.0.0.2\" # IP rule 2\n retCode = True\n elif ((self.type == 0x12) or (self.type == 0x16)) and (isValidMCAddr(self.gaddr)):\n ip.dst = self.gaddr # IP rule 3b\n retCode = True\n else:\n print \"Warning: Using invalid IGMP Type\"\n retCode = False\n else:\n print \"Warning: No IGMP Group Address set\"\n retCode = False\n if retCode == True:\n ip.ttl=1 # IP Rule 4\n ip.options=[IPOption_Router_Alert()] # IP rule 5\n return retCode"
] | [
"0.5800621",
"0.56767285",
"0.56355685",
"0.56091154",
"0.5490392",
"0.54267025",
"0.5423052",
"0.5410452",
"0.5391861",
"0.53898084",
"0.538573",
"0.5363289",
"0.5357894",
"0.5292384",
"0.52906984",
"0.52846986",
"0.52736956",
"0.5267511",
"0.5242984",
"0.52397585",
"0.5200145",
"0.5189012",
"0.51845574",
"0.5181281",
"0.5162586",
"0.51577157",
"0.513986",
"0.51316607",
"0.51281726",
"0.51230055"
] | 0.673502 | 0 |
Advance to the next profile in the current file. | def advance_file_position_to_next_profile(self, fid):
# Each profile record is made up of 80 data characters
# (including blanks at the end of the profile)
# and return characters (LF+CR).
fid.seek(self._calculate_next_profile_position())
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def advance(self) -> None:\n self.current_token = self.jack_file_tokens[self._token_idx]\n self._token_idx += 1",
"def next_file(self):\n raise NotImplementedError()",
"def nextPicture(self):\n\t\tif self.currentPicture == self.totalPictures-1:\n\t\t\tself.currentPicture = 0\n\t\telse:\n\t\t\tself.currentPicture += 1\n\t\tself.loadImage(self.picPaths[self.currentPicture])",
"def advance(self):\n if self.current_index < (len(self.decoded_population) - 1):\n self.current_index += 1",
"def next_step(self):\n self.proceed()\n self.execute_current()",
"def next_run(self):\n self.load_run(run=self.run+1)",
"def continue_next(self):\n\n self.scope_assign = {}\n self.scope_var_id = 0\n self.cont = True",
"def next( self ):\n next(self)",
"def next():",
"def next():",
"def update_next_page(self, current_connection, path):\n base_url = 'http://' + Setup.parse_options()['ip_address']\n profile = page_profile.create_page_profile(base_url)\n resource_found = False\n for page in profile:\n if page['page_name'] == path:\n current_connection['next_resource'] = page['resources']\n resource_found = True\n if not resource_found:\n current_connection['next_resource'] = []",
"def _advance(self):\n self._current += 1",
"def advance(self):\n self._current_inst += 1\n self._line = self._lines[self._current_inst].strip()",
"def _advance_line(self):\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n while self.current_line.startswith('#') or self.current_line == '':\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n self._gobble_comments()",
"def profile_step(self):\n import profile\n\n profile.run(\"world.step()\")",
"def next(self):\n self.jumpahead(1)",
"def next(self):\n pass",
"def next(self):\n pass",
"def next(self):\n pass",
"def next(self):\n pass",
"def profile(filename: str) -> 'Iterator[None]':\n profiler = Profile()\n profiler.enable()\n\n yield\n\n profiler.disable()\n profiler.create_stats()\n profiler.dump_stats('profiles/{}'.format(filename))",
"def _1_profile(self, _1_profile):\n\n self.__1_profile = _1_profile",
"def advance(self):\n in_bytes = self._pre_pos\n for tag in self._reader:\n if isinstance(tag, Tag):\n # skip the Metadata in flv stream.\n if not self.handle_magic_head(tag):\n if tag.type == VIDEO_TAG and tag.is_keyframe:\n self.append_keyframe(tag)\n self._pre_pos = self.position()\n in_bytes = self._pre_pos - in_bytes\n if in_bytes > 0:\n self.active()\n else:\n self.inactive()",
"def setNextFile(self):\n\n if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):\n self.nReadFiles=self.nReadFiles+1\n if self.nReadFiles > self.nTotalReadFiles:\n self.flagNoMoreFiles=1\n raise schainpy.admin.SchainWarning('No more files to read')\n\n print('------------------- [Opening file] ------------------------------',self.nReadFiles)\n self.nReadBlocks = 0\n #if self.nReadBlocks==0:\n # self.readFirstHeader()",
"def next(self):\r\n pass",
"def advance(self) -> None:\n pass",
"def __next_page(self):\n self.current_page = self.current_page + 1\n tree = ET.parse(urlopen(self.url + '&start=' + str(self.current_page)))\n self.iterator = tree.iterfind(self.GLOBAL_NP + 'entry')",
"def _recurse_load_profile(self, text, profile_path):\n try:\n inherited_profile = Profile()\n cwd = os.path.dirname(os.path.abspath(profile_path)) if profile_path else None\n profile_parser = _ProfileParser(text)\n # Iterate the includes and call recursive to get the profile and variables\n # from parent profiles\n for include in profile_parser.includes:\n # Recursion !!\n profile = self._load_profile(include, cwd)\n inherited_profile.compose_profile(profile)\n\n # Current profile before update with parents (but parent variables already applied)\n inherited_profile = _ProfileValueParser.get_profile(profile_parser.profile_text,\n inherited_profile)\n return inherited_profile\n except ConanException:\n raise\n except Exception as exc:\n raise ConanException(\"Error parsing the profile text file: %s\" % str(exc))",
"def go_to_next_state(self):\n pass",
"def add_profile(self, profile):\r\n self.profiles.append(profile)"
] | [
"0.64586663",
"0.6085671",
"0.5835374",
"0.5825647",
"0.58250195",
"0.5777023",
"0.5766665",
"0.5716173",
"0.57027316",
"0.57027316",
"0.5684653",
"0.5662056",
"0.5639793",
"0.5639627",
"0.5628434",
"0.5587666",
"0.5563301",
"0.5563301",
"0.5563301",
"0.5563301",
"0.5551133",
"0.55274373",
"0.55034095",
"0.550273",
"0.5478803",
"0.54767084",
"0.54633766",
"0.54138076",
"0.5370589",
"0.53688043"
] | 0.7001547 | 0 |
Return the file position to the start of the profile. | def return_file_position_to_start_of_profile(self, fid):
fid.seek(self.file_position, 0)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pos(self):\n return self.file.tell()",
"def get_file_position(self, mode):\r\n return bass_call_0(BASS_StreamGetFilePosition, self.handle, mode)",
"def starting_position(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"starting_position\")",
"def get_position(self):\n\n return (self._fileobj.tell() - self._pos) * 8 - self._bits",
"def locus_start(self):\n return int(open(self.locus_file).read().split('\\t')[3])",
"def stat_beg_file(self, filename):\n\n self.batchvals['numfiles'] += 1\n self.filevals['filename'] = filename\n self.filevals['start_time'] = time.time()\n\n return -1",
"def advance_file_position_to_next_profile(self, fid):\n # Each profile record is made up of 80 data characters \n # (including blanks at the end of the profile)\n # and return characters (LF+CR).\n fid.seek(self._calculate_next_profile_position())\n return None",
"def find_file_start(chunks, pos):\n\n\tpos = pos - 1\n\twhile pos > 0:\n\n\t\tif chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102:\n\n\t\t\t# This is not a block\n\t\t\treturn pos\n\n\t\telse:\n\t\t\tpos = pos - 1\n\n\treturn pos",
"def seek_to_start_time(self):\n return 0",
"def start(self) -> pos.Pos:\n return self.__start",
"def get_start_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc",
"def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')",
"def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')",
"def starting_position(self) -> aws_cdk.aws_lambda.StartingPosition:\n return self._values.get('starting_position')",
"def start_offset(self):\n return self.get_info_value(\"D_STARTOFFS\")",
"def start(self):\n return self.__start_line",
"def tell(self):\n return self._seek_pos",
"def start_loc(self) -> str:\n return self._start_loc",
"def tell(self):\n return self.offset",
"def media_seek_position(self):\n return self._state.get(\"seek\", None)",
"def offset_from_start(self, part):\n index = self.parts.index(part)\n return sum([p.length for p in self.parts[:index]])",
"def get_start_point(self):\n return self.first_point",
"def first_dirty_page_offset(self):\n return self._offset + LOG_ENTRY_SIZE_HEADER + 8*self.dirty_pages_count()",
"def tell(self) -> int:\n source_cursor = self.tell_source()\n if self.seekable() or self.header is None:\n return source_cursor\n\n elif self.header.tell() < self.header_buffer_size:\n return self.header.tell()\n\n else:\n return source_cursor",
"def get_startline(self):\n return self.get_attribute(\"startline\")",
"def page_from(self):\n return 0 if self.page_size == 0 else self.page_slice[0] + 1",
"def max_offset(self):\n return self.offset + self.filesize - 1",
"def position(self):\n\n return self.scanner.position()",
"def get_begin(self):\n return self.__begin",
"def getStart(self) -> long:\n ..."
] | [
"0.681188",
"0.6521577",
"0.6516624",
"0.6506252",
"0.64529335",
"0.64495826",
"0.6444314",
"0.6391559",
"0.62811625",
"0.62764347",
"0.6224913",
"0.62020075",
"0.62020075",
"0.62020075",
"0.6185171",
"0.61651117",
"0.61568314",
"0.61335886",
"0.6129055",
"0.61160004",
"0.6082463",
"0.6076087",
"0.6074585",
"0.606632",
"0.6048695",
"0.6036181",
"0.6026175",
"0.59898424",
"0.5981768",
"0.59486985"
] | 0.7984832 | 0 |
Returns true if this is the last profile in the data file. | def is_last_profile_in_file(self, fid):
return self._calculate_next_profile_position() == os.fstat(fid.fileno()).st_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_last(self) -> Optional[bool]:\n return pulumi.get(self, \"is_last\")",
"def is_last(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_last\")",
"def isLast(self):\n index = self.parentNode.idevices.index(self)\n return index == len(self.parentNode.idevices) - 1",
"def is_last_page(self):\n return self.page == self.last_page",
"def is_last(self, level):\n\n return level == self.levels[-1]",
"def _is_last_dataset_id(self, instance_id):\n res = self._db.Query(\"\"\"SELECT report_data_set_instance_id\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if not res:\n return False\n last_data_set_instance = self._db.record[0]\n if last_data_set_instance['report_data_set_instance_id'] == instance_id:\n return True\n\n return False",
"def is_profile_complete(self):\n return self.height is not None and self.target_weight is not None and self.sex is not None",
"def _is_last_chunk(self, bytes_read, previous_read):\n return bytes_read == previous_read and bytes_read != 0",
"def is_profile_complete(self):\n return bool(self.fullname and self.username and self.email)",
"def is_last_job_failed(self):\n return self._data.get('last_job_failed')",
"def at_last_stich(self):\n return len(self.cards) == 1",
"def eof_check(self) -> bool:\n eof = False\n curr_pos = self.fileobject.tell()\n # print(curr_pos, self.st_size)\n chunk = self.fileobject.read(25)\n if chunk == '':\n # Is there something on the back burner??\n if len(self._backburner) > 0:\n self.fileobject = self._backburner.pop()\n # TODO: what if it is the end of the back burner file? Is that handled?\n else:\n eof = True\n else:\n self.fileobject.seek(curr_pos)\n return eof",
"def is_data_format_channel_last(data_format):\n if data_format is None:\n return True\n return data_format.endswith(\"C\")",
"def is_zero_profile(in_file):\n profile = restore_profile_from_csv(in_file)\n for i in range(0, profile.shape[0]):\n for j in range(0, profile.shape[1]):\n if profile[i, j] != 0:\n return False\n return True",
"def isLastSection(section):\n board = section.board\n sectionsQueryset = board.section_set.all()\n numSections = len(sectionsQueryset)\n\n return section.id == sectionsQueryset[numSections - 1].id",
"def read_last_ts_written(self):\n try:\n logging.info(\"Reading last timestamp written from previous run.\")\n with open(self.state_file_path, \"r\") as file:\n self.last_ts_written = int(file.read())\n logging.info(\n \"Last timestamp from previous run is {}.\".format(\n self.last_ts_written\n )\n )\n return True\n except FileNotFoundError:\n self.last_ts_written = 0\n logging.warning(\n \"No state file found at {}, setting last timestamp written to 0.\".format(\n self.state_file_path\n )\n )\n return False",
"def is_last_position(self):\r\n return self.position >= len(self.rule.rightside)",
"def is_last_page(soup):\n meta = soup.find(\"meta\", {\"property\": \"og:url\"})\n if meta.has_attr(\"content\"):\n page_number = re.search(r\".*/(\\d+)_p/\", meta.attrs[\"content\"])\n if page_number:\n return page_number.group(1) == \"20\"\n if re.search(r\"/rentals/$\", meta.attrs[\"content\"]):\n return False # we are at the first page\n logging.warning(\"cannot determine if last page was reached, stopping\")\n return True",
"def is_last_process(self, process_group: ProcessGroup = None) -> bool:\n rank = dist.get_rank(group=process_group)\n world_size = dist.get_world_size(group=process_group)\n return rank == world_size - 1",
"def is_saved(self):\n last_path = self.__key._Key__reference.path().element_list()[-1]\n return ((last_path.has_name() ^ last_path.has_id()) and\n self.__key.has_id_or_name())",
"def has_end(self):\n return bool(self._end)",
"def is_partition_the_last(dbapi, partition):\n idisk_uuid = partition.get('idisk_uuid')\n onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid)\n part_number = get_part_number(partition.get('device_path'))\n\n if int(part_number) != len(onidisk_parts):\n return False\n\n return True",
"def GetLastUsedWiredNetwork(self):\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile,\"lastused\"):\n if misc.to_bool(config.get(profile,\"lastused\")):\n return profile\n return None",
"def is_last_update_failed(self):\n return self._data.get('last_update_failed')",
"def end_of_epoch(self):\n return not self._cur_epoch_itr.has_next()",
"def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))",
"def is_eof(self) -> bool:\n ...",
"def is_update_success():\n file_success = os.getcwd() + \"\\\\last_success.txt\"\n if os.path.exists(file_success):\n return True\n else:\n return False",
"def game_over(self):\n return bool(self.last_round and self.last_player == self.current_player)",
"def __is_new_save(self):\n last_save = self.__get_last_save()\n new_save = self.__create_save()\n for signal in new_save:\n if signal in last_save:\n for attribut in new_save[signal]:\n if attribut in last_save[signal]:\n if new_save[signal][attribut] == last_save[signal][attribut]:\n return False\n else:\n return True\n else:\n return True\n else:\n return True"
] | [
"0.69448036",
"0.65554166",
"0.6446005",
"0.63820344",
"0.6316995",
"0.60817087",
"0.5886655",
"0.58826596",
"0.58378875",
"0.5808014",
"0.57928777",
"0.5792088",
"0.5735816",
"0.5724117",
"0.5715869",
"0.56836313",
"0.5673833",
"0.56549674",
"0.56470406",
"0.56379104",
"0.55866706",
"0.55777615",
"0.5558102",
"0.55512613",
"0.5527792",
"0.55054915",
"0.5505137",
"0.5492661",
"0.5465331",
"0.54652417"
] | 0.8079507 | 0 |
Returns a list of keys in the primary header. | def primary_header_keys(self):
return [d for d in self.primary_header] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keys(self):\n return [k for k, v in self._headers]",
"def keys(self) -> List:\n pass",
"def keys(self) -> List[str]:\n raise NotImplementedError",
"def key_columns(self):\n return [str(column) for id, column in self._columns.iteritems() if column.is_key]",
"def keys(self):\r\n return [key for key, value in self.iteritems()]",
"def keys(self):\r\n return [k for k in self]",
"def primary_key(self) -> List[int]:\n return self.doc.get('primaryKey')",
"def keys(self):\n return [ x for x in self ]",
"def keys(self) -> Sequence[str]:\n raise NotImplementedError",
"def get_keys(self) -> list:\r\n keys = []\r\n for key, value in self._items:\r\n keys.append(key)\r\n return keys",
"def get_keys(self):\r\n return self._keys",
"def getkeys(self):\n return list(self.keys)",
"def _primary_key_names(obj):\n return [key.name for key in _get_mapper(obj).primary_key]",
"def return_keys(self):\r\n\r\n keys = list(self.piDD.keys())\r\n return keys",
"def keys(self):\n return [key for key, value in self.items()]",
"def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')",
"def keys(self) -> t.Tuple[str, ...]:\n return self._keys",
"def keys():",
"def getPrimaryKeys(data: List[Dict]) -> List[str]:\n return list(\n filter(\n partial(is_not, None),\n list(\n map(\n lambda x: x.get(\"col_name\")\n if x.get(\"col_ordinal\") == x.get(\"primary_key_pos\")\n else None,\n data,\n )\n ),\n )\n )",
"def keys(self):\n return",
"def list_all_keys(self):\n \n return self.keys",
"def headers(schema: Schema) -> List[str]:\n\n headers = Entry.headers()\n headers.extend([attr.uid for attr in schema.attributes])\n\n return [header for header in headers if header != \"item-hash\"]",
"def keys(self):\n return list(self.iterkeys())",
"def keys(self):\n return _keys(self)",
"def keys(self):\n return _keys(self)",
"def keys(self):\n sql = u\"\"\"\n SELECT `key` FROM `{table}` WHERE 1\n \"\"\".format(table=self.name)\n\n for row in self.conn.execute(sql):\n yield row['key']",
"def Keys(self) -> _n_1_t_4:",
"def keys(self):\n query = \"\"\"SELECT column_name, data_type, character_maximum_length\n FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = 'ngc2236';\"\"\"\n result = self.wifsip.query(query)\n keys = [r[0] for r in result]\n return keys",
"def keys(self):\n return list(self.token2id.values())",
"def keys(self):\n return self.get_list(self.cloudman.list_keypairs(),\n kind=\"key\")"
] | [
"0.73658276",
"0.70956033",
"0.7050593",
"0.6924669",
"0.6828751",
"0.6824346",
"0.6806696",
"0.68024904",
"0.67742836",
"0.6763231",
"0.67398864",
"0.67392325",
"0.67380315",
"0.6735044",
"0.67280453",
"0.67249167",
"0.6717226",
"0.6676533",
"0.66698045",
"0.665202",
"0.6630245",
"0.6617457",
"0.659358",
"0.6588401",
"0.6588401",
"0.65860283",
"0.6571454",
"0.6561906",
"0.65505385",
"0.65467644"
] | 0.91860604 | 0 |
return the cruise number | def cruise(self):
return self.primary_header['Cruise number'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maCruise(self):\n return .77",
"def getNumber():",
"def getChrNum(self):\n chrLookup = {\"X\":23,\"x\":23,\"Y\":24,\"y\":24}\n if self.chr.startswith(\"chr\"):\n num = self.chr[3:]\n if num in (\"X\",\"x\",\"Y\",\"y\"):\n num = chrLookup[num]\n return int(num)\n else: return self.chr",
"def build_number(self):\n return self.get_data(\"build_number\")",
"def _num(self):\n try:\n num = int(self.__rId[3:])\n except ValueError:\n num = 9999\n return num",
"def originator_cruise(self):\n\n # decide if there is an originator cruise code object by looking for something with data type '1' in the character header\n cruise = None\n if 'entries' in self.character_data_and_principal_investigator:\n for obj in self.character_data_and_principal_investigator['entries']:\n if 'Type of data' in obj:\n if obj['Type of data'] == 1:\n cruise = obj['Character data']\n\n return cruise",
"def _get_cu(self):\n c_undrained=0\n #group_index = self._data['GI']\n if self.is_clayey():\n c_undrained = self.qu(self._data[SoilProperty.N60])/2\n #c_undrained=_clamp(c_undrained, 10, 103)\n # Plasix calculation needs very small c_undrained\n #if c_undrained<0.21:\n # c_undrained = 0.21\n #use 0.2 as per plasix recommendation\n return c_undrained#the cu is always 103 check with small value of n_60, some mistake maybe",
"def run_number(self):\n return self._runNumber",
"def get_thrust(self):\n amtstr = input(\"Thrust amount?\")\n return int(amtstr)",
"def crd(self):\r\n return self.__trajectory[0]",
"def get_reynolds_number(self, velocity, refLength):\n\t\tre_num = self.Density * velocity * refLength / self.Dynamic_viscosity\n\t\treturn re_num",
"def _get_sprint_number() -> int:\n sprint = get_value_from_redis('sprint-number')\n if not sprint:\n sprint = JIRA_SPRINT\n return int(sprint)",
"def Cnum(self, default=None):\n return self.data.get('cnum', default)",
"def Cnum(self, default=None):\n return self.data.get('cnum', default)",
"def _get_n(self):#@Needs to fix it for general case\n n_60 = 0.55 * 1 * 1 * 0.75 * self._data[SoilProperty.SPT_N] /0.6\n if not self.is_clayey() and n_60>15: #apply dilitracy correction\n n_60 = 15 + 0.5 * (n_60 - 15)\n return n_60",
"def crtime(self):\n return safeInt(self.tag(\"crtime\"))",
"def build_number(self):\n return self._build_number",
"def string_u_broj(self):\n if self.player_input == \"rock\":\n self.player_number = 0\n elif self.player_input == \"spock\":\n self.player_number = 1\n elif self.player_input == \"paper\":\n self.player_number = 2\n elif self.player_input == \"lizard\":\n self.player_number = 3\n elif self.player_input == \"scissors\":\n self.player_number = 4\n else:\n self.player_number = -1\n raise RpslsError(102)\n return self.player_number",
"def num(self):\n return self.num",
"def __int__(self) -> int:\n\n return self.centi",
"def getBuild(number):",
"def getBuild(number):",
"def nze(self) -> int:",
"def nze(self) -> int:",
"def getSlipNum():\n query = Slip.query()\n results = query.fetch(limit = MAX_SLIPS)\n temp = 0\n for result in results:\n if result.number > temp:\n temp = result.number\n slipNum = temp\n slipNum += 1\n return slipNum",
"def recid(self):\n return self.record[\"control_number\"]",
"def 取项目数(self): # real signature unknown; restored from __doc__\n return self.GetCount()",
"def getGlideinCpusNum(glidein):\n \n glidein_cpus = 1\n cpus = str(glidein['attrs'].get('GLIDEIN_CPUS', 1))\n if cpus.upper() == 'AUTO':\n glidein_cpus = 1\n else:\n glidein_cpus = int(cpus)\n\n return glidein_cpus",
"def tracenb(self):\n trace_nb = self._pna.query('CALC{}:PAR:MNUM?'.format(self._channel))\n if trace_nb:\n return int(trace_nb)\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n trace number on channel {} '''.format(self._channel)))",
"def get_rank() -> int:\n return collective.get_rank()"
] | [
"0.6323028",
"0.60737234",
"0.59981036",
"0.586891",
"0.5847294",
"0.5836213",
"0.58244544",
"0.5757564",
"0.5728439",
"0.5711663",
"0.5686197",
"0.5684461",
"0.5636235",
"0.5636235",
"0.56309515",
"0.5627673",
"0.56155443",
"0.56026185",
"0.55899507",
"0.556885",
"0.5544796",
"0.5544796",
"0.55311686",
"0.55311686",
"0.55113596",
"0.5478076",
"0.5470827",
"0.54510087",
"0.54340744",
"0.5407426"
] | 0.8592072 | 0 |
return the originator cruise ID | def originator_cruise(self):
# decide if there is an originator cruise code object by looking for something with data type '1' in the character header
cruise = None
if 'entries' in self.character_data_and_principal_investigator:
for obj in self.character_data_and_principal_investigator['entries']:
if 'Type of data' in obj:
if obj['Type of data'] == 1:
cruise = obj['Character data']
return cruise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def source_computer_id(self) -> str:\n return pulumi.get(self, \"source_computer_id\")",
"def source_computer_id(self) -> str:\n return pulumi.get(self, \"source_computer_id\")",
"def cruise(self):\n return self.primary_header['Cruise number']",
"def recid(self):\n return self.record[\"control_number\"]",
"def correlation_id(self):\n return self._correlation_id",
"def creator_request_id(self) -> str:\n return pulumi.get(self, \"creator_request_id\")",
"def reactor_id(self):\n return self.__reactor_id",
"def cen_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cen_id\")",
"def cen_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cen_id\")",
"def identity(self) -> str:\n return self.requester.uuid",
"def get_cid(self):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.cid",
"def get_customer_id(self):\n return self.machine_config_file_value(\"DEFAULT.CID\").strip('\"')",
"def getID():",
"def cen_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cen_id\")",
"def client_request_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"client_request_id\")",
"def unique_id(self):\n return self._light.address",
"def client_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"client_id\")",
"def reactornet_id(self):\n return self.__reactornet_id",
"def reference_id(self) -> str:\n return pulumi.get(self, \"reference_id\")",
"def unique_id(self):\r\n return f\"{DOMAIN}_{self.charge_point_id}_{self.connector_id}\"",
"def clnvr_origin(record):\n try:\n origin = int(re.search(r\"(ORIGIN=)([0-9]+)\", record[7]).group(2))\n except:\n origin = -1\n\n return origin",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")",
"def _rId(self):\n return self.__rId",
"def customer_id(self) -> str:\n return self._customer_id",
"def client_id(self) -> str:",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")",
"def client_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_id\")"
] | [
"0.6497666",
"0.6497666",
"0.63244545",
"0.62699574",
"0.62401617",
"0.6209456",
"0.6149779",
"0.6148121",
"0.61367226",
"0.60899115",
"0.6029654",
"0.59821427",
"0.59728366",
"0.5929902",
"0.59158194",
"0.59058",
"0.58951896",
"0.58922875",
"0.5880611",
"0.5879405",
"0.58566326",
"0.5843077",
"0.5843077",
"0.5843077",
"0.5837539",
"0.5808004",
"0.5805277",
"0.5791494",
"0.5791494",
"0.5791494"
] | 0.70046216 | 0 |
return the originator station ID | def originator_station(self):
# decide if there is a station code object by looking for something with data type '2' in the character header
station = None
if 'entries' in self.character_data_and_principal_investigator:
for obj in self.character_data_and_principal_investigator['entries']:
if 'Type of data' in obj:
if obj['Type of data'] == 2:
station = obj['Character data']
return station | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def station_id(self) -> str:\n return self._station_id",
"def unique_id(self) -> str:\n return str(self.coordinator.gios.station_id)",
"def station(unit, date):\r\n req = 'select ParentLocId from PI_PlaceRelationship where RelationId = 4 and LocId = \"{}\" and EndDate > \"{}\"'.format(unit,d2d(date))\r\n try:\r\n station = pd.read_sql(req, engine).values[0][0]\r\n return station if unit != station else 0\r\n except:\r\n logging.warning('error in getting station ID for {} ({})'.format(name(unit), unit))\r\n return 0",
"def get_origin_name(event):\n return event['result']['parameters']['origin_station']['origin']",
"def station_serial(self) -> str:\n return self.camera_info[\"station_sn\"]",
"def id_for_station(station_name: str) -> Optional[int]:\n for s in STATIONS:\n if s[\"name\"] == station_name:\n return s[\"id\"]\n return None",
"def transit_router_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def transit_router_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def getIdent (self) :\n return self.id",
"def get_station_name(self, station=0):\n return self.statuslist()[station][1]",
"def transit_router_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def transit_router_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"transit_router_id\")",
"def unique_id(self):\n return self._light.address",
"def _get_locator_id(self):\n return self.__locator_id",
"def getRunningId(self):\n return( int(self.id.split('.')[2]) )",
"def source_computer_id(self) -> str:\n return pulumi.get(self, \"source_computer_id\")",
"def source_computer_id(self) -> str:\n return pulumi.get(self, \"source_computer_id\")",
"def transit_router_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"transit_router_id\")",
"def transit_router_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"transit_router_id\")",
"def srid(self) -> ir.IntegerValue:\n return ops.GeoSRID(self).to_expr()",
"def determine_station(self, station=None):\n stations = self.statuslist()\n if station and stations and isinstance(station, str):\n for plug in stations:\n plug_name = plug[1]\n if plug_name and plug_name.strip() == station.strip():\n return int(plug[0])\n try:\n station_int = int(station)\n if station_int <= 0 or station_int > self.__len__():\n raise OpSprException('Station %d out of range' % station_int)\n return station_int\n except ValueError:\n raise OpSprException('Station name \\'%s\\' unknown' % station)",
"def source_zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"source_zone_id\")",
"def getID():",
"def getStationName(analyzer,stationId):\n name = model.getStationName(analyzer,stationId)\n return name",
"def station_id2id(cls, station_id, end_train_timestamp=None, ignore_station=False):\n station_row = StationDAO.get(station_id)\n return cls.station_row2id(station_row, end_train_timestamp, ignore_station)",
"def get_initiator(self):\n out, err = self.execute('/usr/sbin/iscsiadm', 'list', 'initiator-node')\n\n # Sample first line of command output:\n # Initiator node name: iqn.1986-03.com.sun:01:e00000000000.4f757217\n initiator_name_line = out.splitlines()[0]\n return initiator_name_line.rsplit(' ', 1)[1]",
"def _get_origin_ted(self):\n return self.__origin_ted",
"def get_id(self):\n try:\n return self.inst.query('*IDN?')[:36]\n except errors.VisaIOError as e:\n logger.warning(e)\n return 'Device not connected.'",
"def sensor_id(raft, ccd):\n return 'R%s%s_S%s%s' % (raft[2], raft[4], ccd[2], ccd[4])",
"def reactor_id(self):\n return self.__reactor_id"
] | [
"0.7285652",
"0.6812059",
"0.65527874",
"0.6491249",
"0.6453471",
"0.6050535",
"0.6019179",
"0.6019179",
"0.6019161",
"0.59847265",
"0.59809613",
"0.59809613",
"0.5934799",
"0.5903671",
"0.5888546",
"0.5888137",
"0.5888137",
"0.58826435",
"0.58826435",
"0.5865075",
"0.5855892",
"0.5848224",
"0.5837392",
"0.5829674",
"0.58088243",
"0.5797001",
"0.578145",
"0.57592744",
"0.57545865",
"0.5720893"
] | 0.7152931 | 1 |
Returns the contents of secondary header if it exists, otherwise None. | def extract_secondary_header(self, index):
header = None
for item in self.secondary_header['entries']:
if item['Code'] == index:
header = item['Value']
return header | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getHeaderVal2(self, key):\n lowerKey = key.lower()\n if key in self.header.header.keys():\n return self.header.header[key]\n elif lowerKey in self.header.header.keys():\n return self.header.header[lowerKey]\n else:\n print('error: bStack.getHeaderVal() did not find key \"' + key + '\" in self.header.header. Available keys are:', self.header.header.keys())\n return None",
"def _get_header(self, header):\n if header is None:\n html = self.header()\n else:\n html = header\n return html",
"def get_subheader(self):\n element = self.driver.find_element(*self.subheader_selector)\n return element.text",
"def extract_from_header(header, left, right):\n return re.search(r'{}(.*?){}'.format(left, right), header).group(1)",
"def header(self):\n return self[0]",
"def dataOrHeader(self, name, doH):\r\n f = open(self.location + \"/\" + name)\r\n r = f.read()\r\n f.close()\r\n index = r.find(self.dividerString_)\r\n dataOrHeader = r[index+1:len(r)] if doH else r[0:index]\r\n #hacky fix for random \\r\r\n dataOrHeader = dataOrHeader.replace(\"\\r\", \"\") \r\n return dataOrHeader",
"def get_header(filepath):\n header = None\n for i, x in enumerate(open(filepath)):\n if i == 0:\n header = x\n return(header)",
"def __getHeaderInfo(self, decoded_data):\n\t\tip = decoded_data.child()\n\t\ttcp = ip.child()\n\t\t#src = (ip.get_ip_src(), tcp.get_th_sport())\n\t\ttry:\tsrc = ip.get_ip_src()\n\t\texcept:\tsrc = '?'\n\t\t#dst = (ip.get_ip_dst(), tcp.get_th_dport())\n\t\ttry:\tdst = ip.get_ip_dst()\n\t\texcept:\tdst = '?'\n\t\t#data = tcp.get_data_as_string()\n\t\tdata = tcp.get_packet()\n\t\treturn (src, dst, data)",
"def getHeader():\n return _HEADER",
"def peek_header(self):\n header = None\n if self._headers:\n # returns the last element on the list\n header = self._headers[-1:]\n\n return header",
"def edx_get_subtitle(url, headers):\n \"\"\" or None if no subtitles are available \"\"\"\n try:\n jsonString = get_page_contents(url, headers)\n jsonObject = json.loads(jsonString)\n return edx_json2srt(jsonObject)\n except URLError as e:\n print('[warning] edX subtitles (error:%s)' % e.reason)\n return None\n except ValueError as e:\n print('[warning] edX subtitles (error:%s)' % e.message)\n return None",
"def parse_header(self):",
"def getHeader(self):\n return self.data.header",
"def common_html_header(outfile: TextIO, title: str, indexpath: str = \"\") -> None:\n common_header_part1(outfile, title, indexpath=indexpath)\n common_header_part2(outfile, indexpath=indexpath)",
"def probe_type(self):\n return self.extract_secondary_header(29)",
"def readFrom(self,fn):\n hdrs = {}\n try:\n f = open(fn+\".headers\",\"tr\")\n for l in f:\n if l[-1:]==\"\\n\":\n l = l[:-1]\n i = l.find(\": \")\n if -1!=i:\n hdrs[l[:i]] = l[i+2:]\n f.close()\n except (Exception,Error) as err:\n log(\"readFrom: header: error: \"+str(err))\n try:\n f2 = open(fn,\"br\")\n data = f2.read()\n f2.close()\n except (Exception,Error) as err:\n log(\"readFrom: body: error: \"+str(err))\n return (hdrs,data)",
"def header(self):\n if not self._ast:\n return None\n else:\n return self._ast[0]",
"def find_match(second_file, title):\r\n # Initialize variables/ open files\r\n seq2 = \"\"\r\n header2 = \"\"\r\n match_fh = open(second_file, \"r\")\r\n # parse through lines of file\r\n for lines in match_fh:\r\n # If > found assume its header\r\n if lines[0] == \">\":\r\n # header2 = lines\r\n # If a header has been found, pull strain name, orgainism and subtype for new header\r\n if len(header2) > 0:\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n # if new header equals input header then return it and the sequence\r\n if header2 == title:\r\n match_fh.close()\r\n print(\"match\")\r\n return header2, seq2\r\n # Reset the header and seq\r\n header2 = lines\r\n seq2 = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n seq2 = seq2 + lines\r\n\r\n # to return the last entry in the file, since loop won't be able to return it\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n match_fh.close()\r\n return header2, seq2",
"def header(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"header\")",
"def load_header(base_path, subvolume):\n with h5py.File(file_path(base_path, subvolume, 'subvolume'), 'r') as f:\n header = dict(f['Header'].attrs.items())\n header.update({key: f['Header'][key][:] for key in f['Header'].keys()})\n \n return header",
"def getHeader(key):",
"def parse_header(self, header):\n\n m = re.search(HEADER_REGEX, header)\n if m:\n type = m.group(1)\n version = m.group(2)\n return type, version\n else:\n return None",
"def get_best_ref(self, header):\n try:\n return self._get_best_ref(header)\n except crexc.IrrelevantReferenceTypeError:\n return \"NOT FOUND n/a\"\n except crexc.OmitReferenceTypeError:\n return None\n except Exception as exc:\n if log.get_exception_trap():\n return \"NOT FOUND \" + str(exc)\n else:\n raise",
"def _get_compose_header(ctx):\n return get_artifact('compose_header.yml')",
"def readHeader():\n with open(\"./header.conf\", \"r\") as fd:\n header = fd.readlines()\n return header",
"def header(self, header, default=None):\r\n return self._get_headers().get(header.upper(), default)",
"def get_main_headline(self, default=''):\n for segment in self.segments:\n if segment.headlines:\n return segment.headlines[0]\n return default",
"def getheader(self, name, default=None):\n if not self.__headers.has_hey(name):\n return default\n else: self.__headers[name]",
"def get_headline(driver):\r\n headline = ''\r\n privacy_statement = 'We value your privacy'\r\n try:\r\n try:\r\n h1 = driver.find_element_by_tag_name('h1') #Some headlines written under <h1> tag\r\n if h1.text.lower().find(privacy_statement.lower()) == -1:\r\n headline += h1.text #Only want to return one variable\r\n except NoSuchElementException:\r\n None\r\n try:\r\n h2 = driver.find_element_by_tag_name('h2') #Some headlines written under <h2? tag\r\n if h2.text.lower().find(privacy_statement.lower()) == -1:\r\n headline += h2.text #Only want to return one variable\r\n except NoSuchElementException:\r\n None\r\n try:\r\n video_headline = driver.find_element_by_class_name('video-headline')\r\n if video_headline.text.lower().find(privacy_statement.lower()) == -1:\r\n headline += video_headline.text\r\n except NoSuchElementException:\r\n None\r\n except:\r\n None\r\n return headline",
"def secondary(self) -> Optional['outputs.GetTrafficPolicyDocumentRuleSecondaryResult']:\n return pulumi.get(self, \"secondary\")"
] | [
"0.620934",
"0.6046969",
"0.59697276",
"0.5900436",
"0.5695183",
"0.56296515",
"0.54620403",
"0.5399244",
"0.5356341",
"0.53513354",
"0.5280269",
"0.5278987",
"0.5261967",
"0.525186",
"0.52477497",
"0.5237682",
"0.5227424",
"0.52173036",
"0.5211859",
"0.518127",
"0.51782846",
"0.51717025",
"0.51665634",
"0.515316",
"0.5120552",
"0.5117589",
"0.5116686",
"0.51046735",
"0.50944465",
"0.50921845"
] | 0.7348706 | 0 |
Returns a numpy masked array of depth quality control flags. Set the originator option if the originator flags are required. | def z_level_qc(self, originator=False):
data = np.ma.array(np.zeros(self.n_levels()), mask=True, dtype=int)
for i in range(self.n_levels()):
if self.profile_data[i]['Missing']: continue
if originator:
data[i] = self.profile_data[i]['Originator depth error flag']
else:
data[i] = self.profile_data[i]['Depth error code']
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _mask(self):\n if self.__mask is None:\n # need this to be *exactly* the numpy boolean False\n return nomask\n return self.__mask",
"def var_level_qc(self, index, originator=False):\n data = np.ma.array(np.zeros(self.n_levels()), mask=True, dtype=int)\n if index is not None:\n for i in range(self.n_levels()):\n if self.profile_data[i]['variables'][index]['Missing']: continue\n if originator:\n data[i] = self.profile_data[i]['variables'][index]['Value originator flag']\n else:\n data[i] = self.profile_data[i]['variables'][index]['Value quality control flag']\n return data",
"def cmask(self):\n mask = np.zeros(18)\n if 'full' in self.CONS: mask[:] = 1\n if 'f0' in self.CONS: mask[0] = 1\n if 'f1' in self.CONS: mask[1:4] = 1\n if 'f2' in self.CONS: mask[4:10] = 1\n if 'vx' in self.CONS: mask[10] = 1\n if 'vy' in self.CONS: mask[11] = 1\n if 'vz' in self.CONS: mask[12] = 1\n if 'TG' in self.CONS: mask[13:18] = 1\n return mask>0",
"def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask",
"def _mask(self) -> np.ndarray:\n mask = np.ones(self.limits, dtype=bool)\n for ax, shape, limit in zip(\n range(1, len(self.limits)), self.shape, self.limits[1:]\n ):\n ax_mask = np.arange(limit) < np.expand_dims(shape, 1)\n new_shape = np.ones(len(self.limits), dtype=int)\n new_shape[0], new_shape[ax] = self.limits[0], limit\n mask = mask & ax_mask.reshape(*new_shape)\n return mask",
"def get_noise_mask(frame, noise_reference_margin):\r\n center = int((frame.shape[0] - 1) / 2)\r\n radius = center - noise_reference_margin\r\n tmp = Aperture(center, center, radius, data=frame, crop=False)\r\n annulus_mask = np.logical_not(tmp.data.mask)\r\n return annulus_mask",
"def getMask(self):\r\n mask = np.array(self.array, dtype=np.float32)\r\n mask[mask == 0] = np.nan\r\n return mask",
"def from_masked(cls, arr: np.masked.masked_array) -> JaggedArray:\n return cls._from_arr_and_mask(arr.compressed(), arr.mask)",
"def Circ_Aperture_Mask(propagator_size):\n\n x = np.arange(0, propagator_size)\n y = np.arange(0, propagator_size)\n arr = np.zeros((y.size, x.size))\n diam = propagator_size\n r= diam/2\n\n # The two lines below could be merged, but I stored the mask\n # for code clarity.\n mask = (x[np.newaxis,:]-propagator_size/2)**2 + (y[:,np.newaxis]-propagator_size/2)**2 < r**2\n arr[mask] = 1.\n\n return arr",
"def get_mask(self):\n # use the feature array a to calculate which channels to include etc\n sums = np.sum(self.feature_array, 0)\n feature_mask = np.repeat(np.ones(4, dtype=int), self.n_features)\n # if there are \"missing\" channels use the older version of KK\n zero_sums = sums == 0\n if np.any(zero_sums):\n self.distribution = 1\n feature_mask[zero_sums] = 0\n self.feature_mask = feature_mask\n return feature_mask",
"def _get_support_mask(self):\n mask = np.zeros(self.scores_.shape, dtype=bool)\n mask[self.scores_ >= self.min_count] = True\n return mask",
"def to_masked(self) -> np.mask.masked_array:\n mask = self._mask()\n res = np.ma.masked_all(self.limits, dtype=self.dtype)\n res[mask] = self.data\n return res",
"def mask_depth_image(depth_image, min_depth, max_depth):\n # print ('mask min max', min_depth, max_depth)\n ret, depth_image = cv2.threshold(depth_image, min_depth, 100000, cv2.THRESH_TOZERO)\n ret, depth_image = cv2.threshold(depth_image, max_depth, 100000, cv2.THRESH_TOZERO_INV)\n depth_image = np.expand_dims(depth_image, 2)\n return depth_image",
"def get_masked_scene(orig, mask, local_context_size = 80, dilation=False):\n orig_scene = orig.copy()\n mask_scene = mask.copy()\n orig_scene_no_mask = orig.copy()\n \n mask_info = np.where(mask_scene == 0) \n min_x = max(min(mask_info[0]) - local_context_size, 0)\n max_x = max(mask_info[0]) + local_context_size\n min_y = max(min(mask_info[1]) - local_context_size, 0)\n max_y = max(mask_info[1]) + local_context_size\n \n orig_scene = orig_scene[min_x:max_x,min_y:max_y]\n orig_scene_no_mask = orig_scene_no_mask[min_x:max_x,min_y:max_y]\n mask_scene = mask_scene[min_x:max_x,min_y:max_y]\n \n dialation_mask = np.zeros(mask_scene.shape) + 255\n \n if dilation:\n dialation_mask = cv2.dilate(255-mask_scene, np.ones((local_context_size,local_context_size)))\n \n #implot(dialation_mask)\n #plt.imshow(dialation_mask, 'gray')\n \n for x in range(mask_scene.shape[0]):\n for y in range(mask_scene.shape[1]):\n if mask_scene[x, y] == 0:\n orig_scene[x, y, :] = 0\n orig_scene_no_mask[x,y,:] = 0\n if dilation:\n if dialation_mask[x,y] == 0:\n orig_scene[x, y, :] = 0\n \n return orig_scene, mask_scene, orig_scene_no_mask, dialation_mask",
"def optimization_mask(self) -> Sequence[bool]:\n return self._optimization_mask",
"def get_front_door_mask(self) -> np.array:\n front_door_mask = self.boundary == 255\n region = measure.regionprops(front_door_mask.astype(int))[0]\n return np.array(region.bbox, dtype=int)",
"def _fixed_masks_arg(mask):\n return [\"NULL\", mask]",
"def mask(self):\n return self._mask",
"def mask(self):\n return self._mask",
"def mask(self):\n return self._mask",
"def mask(self):\n return self._mask",
"def detectBackgroundDepth_deprecated(\n camera_params, camera_pose, depth_image,\n mask_left_side=0.4, plane_distance_thresh=10):\n\n background_mask = imageprocessing.maskDepthArtifacts(depth_image)\n\n background_plane, plane_distance_image = fitBackgroundDepth(\n depth_image, ~background_mask, plane_distance_thresh,\n camera_params=camera_params, camera_pose=camera_pose,\n max_trials=50\n )\n background_mask |= plane_distance_image < plane_distance_thresh\n\n background_mask = imageprocessing.makeBackgroundMask(depth_image, background_mask)\n\n if mask_left_side:\n background_mask = imageprocessing.maskOutsideBuildArea(\n background_mask, mask_left_side=mask_left_side, mask_bottom=0\n )\n\n return background_mask, background_plane",
"def bad_pixel_mask(self):\n from mkidpipeline.pipeline import PROBLEM_FLAGS # This must be here to prevent a circular import!\n return self.flagged(PROBLEM_FLAGS)",
"def test_quality_mask():\n quality = np.array([0, 0, 1])\n assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask=0))\n assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask=None))\n assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask='none'))\n assert (KeplerQualityFlags.create_quality_mask(quality, bitmask=1)).sum() == 2\n assert (KeplerQualityFlags.create_quality_mask(quality, bitmask='hardest')).sum() == 2\n # Do we see a ValueError if an invalid bitmask is passed?\n with pytest.raises(ValueError) as err:\n KeplerQualityFlags.create_quality_mask(quality, bitmask='invalidoption')\n assert \"not supported\" in err.value.args[0]",
"def build_mask(dqarr, bitvalue):\n bitvalue = interpret_bit_flags(bitvalue, mnemonic_map=pixel)\n\n if bitvalue is None:\n return (np.ones(dqarr.shape, dtype=np.uint8))\n return np.logical_not(np.bitwise_and(dqarr, ~bitvalue)).astype(np.uint8)",
"def get_mask(self, anno, img_info) -> np.ndarray:\n m = np.zeros((img_info[\"height\"], img_info[\"width\"]), dtype=np.float32)\n\n for obj in anno:\n if obj[\"iscrowd\"]:\n rle = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n m += mask\n elif obj[\"num_keypoints\"] == 0:\n rles = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n for rle in rles:\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n\n m += mask\n\n return (m < 0.5).astype(np.float32)",
"def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask",
"def var_qc_mask(self, v, flagtype='orig'):\n data = numpy.ma.array(numpy.zeros(self.n_levels()), mask=False, dtype=bool)\n prof = self.var_profile_qc(v)\n if prof is not None and prof > 0:\n data[:] = True\n else:\n zqc = self.z_level_qc(flagtype)\n data[(zqc.mask == False) & (zqc > 0)] = True\n lqc = self.var_level_qc(v, flagtype)\n data[(lqc.mask == False) & (lqc > 0)] = True\n return data",
"def getSignedMask(self):\r\n signedMask = np.array(self.array * self.sign, dtype=np.float32)\r\n signedMask[signedMask == 0] = np.nan\r\n return signedMask",
"def generate_mask(self):\n\n polymer_length = len(self.sequence)\n protein_length = len(self.particle_order) - polymer_length\n\n if self.filter_specification == 'type':\n mask = np.in1d(self.particle_order, self.monomer_id)\n elif self.filter_specification == 'id':\n if self.molecule == 'polymer':\n offset = protein_length\n else:\n offset = 0\n mask = np.array([False] * (polymer_length + protein_length))\n absolute_id = [x+offset for x in self.monomer_id]\n mask[absolute_id] = True\n else:\n raise NotImplementedError(\"Filter is unknown. Use 'type' or 'id'!\")\n\n # if molecule == 'full', nothing needs to be done\n if self.molecule == 'polymer':\n mask[:protein_length] = [False] * protein_length\n elif self.molecule == 'protein':\n mask[protein_length:] = [False] * polymer_length\n\n return mask"
] | [
"0.54861605",
"0.5434514",
"0.54197353",
"0.53197765",
"0.5294576",
"0.52716345",
"0.5260115",
"0.524445",
"0.5227943",
"0.5212965",
"0.5184435",
"0.5177439",
"0.5145151",
"0.5117524",
"0.51014876",
"0.50980514",
"0.5093009",
"0.50760245",
"0.50760245",
"0.50760245",
"0.50760245",
"0.5073763",
"0.5048962",
"0.5043401",
"0.50308055",
"0.50282186",
"0.5018631",
"0.50000995",
"0.49975523",
"0.49690577"
] | 0.61880827 | 0 |
Returns the variable index for a variable. Either the variable code can be specified or s can be set to True to return the salinity index. Otherwise temperature index is returned. | def var_index(self, code=1, s=False):
if s:
code = 2
index = None
for i, var in enumerate(self.primary_header['variables']):
if var['Variable code'] == code:
assert index is None, 'Appears to be two sets of same data in profile'
index = i
return index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def indices_of_var(v):\n name = v.varName\n indices = name[2:].split(',')\n i, j = int(indices[0]), int(indices[1])\n return i, j",
"def index(self, varname):\n if not isinstance(varname, str):\n raise TypeError(\"argument must be str\")\n varname = self._find_vars(varname, empty_ok=False, single=True)[0]\n return self._varlist.index(varname)",
"def get_variable(self, variable_name):\n assert self.variable_name_to_index is not None\n return self.variable_name_to_index[variable_name]",
"def getSolRatioVarIndx( self, var ):\n \n self.updateAdb( )\n\n if var in self.solNames:\n return self.solNames[ var ]\n elif var in self.solNames.values():\n return var\n else:\n return -1",
"def s(self):\n index = self.var_index(s=True)\n return self.var_data(index)",
"def get_ivar(data, s):\n return data.ivar.value / (1 + s**2 * data.ivar.value)",
"def speciesIndex(self, species):\n nsp = self.nSpecies()\n if type(species) == types.ListType:\n s = []\n for sp in species:\n s.append(self.speciesIndex(sp))\n return s\n\n if type(species) == types.IntType or type(species) == types.FloatType:\n k = species\n else:\n k = _cantera.phase_speciesindex(self._phase_id,species)\n if k < 0 or k >= nsp:\n raise CanteraError(\"\"\"Species \"\"\"+`species`+\"\"\" not in set \"\"\"\n +`self.speciesNames()`)\n return k",
"def getvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)",
"def index(self, variables):\n return [self._variables.index(v) for v in variables]",
"def getResRatioVarIndx( self, var ):\n \n self.updateAdb( )\n\n if var in self.resNames:\n return self.resNames[ var ]\n elif var in self.resNames.values():\n return var\n else:\n return -1",
"def getvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value",
"def getLinIterVarIndx( self, var ):\n \n self.updateAdb( )\n\n if var in self.iterNames:\n return self.iterNames[ var ]\n elif var in self.iterNames.values():\n return var\n else:\n return -1",
"def _find_index(string):\n if string[0] == 'X':\n return 0\n elif string == 'D':\n return 1\n else:\n return np.where(sym == string)[0][0]",
"def state_index_for_symbol(self, symbol):\n for idx, state in enumerate(self):\n if state.symbol == symbol:\n return idx\n if value in self.symbol_synonyms:\n return self.index(self.symbol_synonyms[value])\n raise Exception(\"State with symbol of '%s' not defined\" % symbol)",
"def encode_var(variable: str) -> int:\n\tif variable == \"Y\":\n\t\treturn 1\n\tindex = int(variable[1:]) if len(variable) > 1 else 1\n\toffset = {\"X\": 0, \"Z\": 1}[variable[0]]\n\treturn index * 2 + offset",
"def getIndex(condition='', component=''):\n if component == 'IC2' or component == 'IC14':\n index = '.nii[0]'\n elif component == 'IC7' or component == 'IC29':\n index = '.nii[1]'\n elif component == 'IC25':\n index = '.nii[2]'\n elif component == 'IC31':\n index = '.nii[3]'\n elif component == 'IC39':\n index = '.nii[4]'\n else:\n index = '.nii'\n\n return index",
"def spark_index(n):\n return int(round((clamp(n) - minimum) * coefficient))",
"def getWeatherIndex(code, return_if_none=Constants.return_value_index_of_weather_not_found):\n # Start the index with 0\n index = 0\n for i in [100, 200, 300, 400]:\n for j in [0, 33, 66]:\n if inWeatherCodeRange(code, i + j, i + j + 33):\n return index\n index += 1\n return return_if_none",
"def lookup(self, var):\n\t\tsearched = self._search(self._root, var)\n\t\tif searched is not None:\n\t\t\treturn searched._value\n\t\telse:\n\t\t\tself._assign(var, 0)\t#creates the new variable to 0\n\t\t\treturn 0",
"def _var_key(var):\n\n # pylint: disable=protected-access\n # Get the distributed variable if it exists.\n if getattr(var, \"_distributed_container\", None) is not None:\n var = var._distributed_container()\n if var._in_graph_mode:\n return var._shared_name\n return var._unique_id",
"def indexInStateVector(self, s):\n try:\n idx = self.parameterNames.index(s)\n except ValueError:\n return None\n \n if idx < len(self.stateVector):\n return idx\n else:\n return None",
"def sosid(self):\r\n return self.word2idx.get(SOS, 0)",
"def getbarvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)",
"def getVarIndexOffset(self) -> Optional[int]:\n m = self.varIndexBasePlusOffsetRE.search(self.description)\n if not m:\n return None\n return int(m.group(1))",
"def getbarvarnameindex(self,somename_): # 3\n res,resargs = self.__obj.getbarvarnameindex(somename_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _asgn_return_value,_index_return_value = resargs\n return _asgn_return_value,_index_return_value",
"def findStressIdx(self, stress2find, cleanedData=True):\n if stress2find == 0:\n idx = 1\n elif stress2find > self.raw['stress'].max():\n idx = None\n else:\n data4finding = self.cleaned if cleanedData else self.raw\n idx = data4finding.query(f'stress >= {stress2find}').index[0]\n return idx",
"def compute_variable_indexes(path, overwrite=True, multiproc=False):\n if multiproc is True:\n tf.keras.backend.clear_session()\n set_cpu_option()\n\n gin_bindings = [\n \"evaluation.evaluation_fn = @variables_idx\",\n \"variables_idx.num_train = 10000\", \"evaluation.random_seed = 2051556033\",\n \"dataset.name='auto'\", \"evaluation.name = 'variables index'\"\n ]\n path = pathlib.Path(path)\n result_path = path.parent.parent / \"metrics\" / \"variance\" / \"filtered_variables\"\n logger.info(\"Computing variable indexes of {}\".format(path.parent.parent))\n gin_evaluation(path, result_path, overwrite, gin_bindings)",
"def run(data, var):\n try:\n nz = ma.array(data.variables[var][:]).nonzero()\n if not nz:\n #TODO log\n return ALLZERO\n except:\n pass\n \n return 0",
"def netInputIndex(s, current, x, y):\n # if the piece is an ally\n if s[0]:\n # if the piece is the one selected by Environment\n if current is not None and (x, y) == current:\n return 5 if s[1] else 4\n # if it's a normal piece\n else:\n return 1 if s[1] else 0\n else:\n return 3 if s[1] else 2",
"def get_stimulus_index(data, stim_name):\n for i_stim, stim_data in enumerate(data['stimuli']):\n if stim_name in stim_data['stim_path']:\n return i_stim\n\n raise KeyError('Stimulus with stim_name={} not found!'.format(stim_name))"
] | [
"0.6094319",
"0.5992637",
"0.5731462",
"0.5673416",
"0.56549484",
"0.5598223",
"0.5402393",
"0.5396196",
"0.5295413",
"0.52647763",
"0.5249398",
"0.52176803",
"0.5161581",
"0.509128",
"0.5044002",
"0.49797586",
"0.4964434",
"0.4964078",
"0.49553072",
"0.49425614",
"0.49309742",
"0.49285436",
"0.49020404",
"0.48870128",
"0.48696238",
"0.48618186",
"0.48525378",
"0.4811101",
"0.48028377",
"0.47999895"
] | 0.7424934 | 0 |
Returns the data values for a variable given the variable index. | def var_data(self, index):
data = np.ma.array(np.zeros(self.n_levels()), mask=True)
if index is not None:
for i in range(self.n_levels()):
if self.profile_data[i]['variables'][index]['Missing']: continue
data[i] = self.profile_data[i]['variables'][index]['Value']
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getLinIterValues( self, var, index = 0 ):\n\n values = self.getLinIterData( var, index )\n return values[2]",
"def get_variable_values(self, vars):\n raise NotImplementedError()",
"def get_data(self, index=0):\n if index is None:\n return [dd.data for dd in self._data_defs]\n\n return self._data_defs[index].data",
"def get_data(self, variable):\n return self.data.get(variable)",
"def t(self):\n index = self.var_index()\n return self.var_data(index)",
"def p(self):\n index = self.var_index(25)\n return self.var_data(index)",
"def variable(self, id):\n if isinstance(id, str):\n varname = self._find_vars(id, empty_ok=False, single=True)[0]\n col = self._varlist.index(varname)\n elif isinstance(id, int):\n if not -self._nvar <= id < self._nvar:\n raise ValueError(\"data variable index out of range\")\n col = id if id >= 0 else self._nvar + id\n else:\n raise TypeError(\"argument must be str name or int column index\")\n \n varvals = self._varvals\n return [row[col] for row in varvals]",
"def __getitem__(self, varName):\n # Static variables\n if varName in self.statVars:\n staticFV = StaticFileVariable(self, varName)\n return staticFV\n\n # Time variables\n elif varName in self.timeVars:\n timeVariables = TimeFileVariable(self, varName)\n return timeVariables",
"def getDataVariables(self, product):\r\n\r\n data_variable_names = self.getDataVariableNames(product)\r\n data_variables = [self.createDataVariable(product, n) for n in data_variable_names]\r\n data_variables = [self.editDataVariable(product, v) for v in data_variables]\r\n\r\n return data_variables",
"def var(self,i): # TODO: change to property to access (read only?) X?\n return Var(i,self.dims[i])",
"def silicate(self):\n index = self.var_index(6)\n return self.var_data(index)",
"def var_metadata(self, index):\n if index is not None:\n metadata = []\n for m in self.primary_header['variables'][index]['metadata']:\n meta = {\n 'value': m['Value'] / 10**m['Value precision'],\n 'code': m['Variable-specific code'],\n }\n if 'iMeta' in m:\n meta['iMeta'] = m['iMeta']\n else:\n meta['iMeta'] = 0\n metadata.append(meta)\n return metadata\n else:\n return None",
"def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)",
"def extract_var_data(self, var_names):\n variable_dict = {} # Declaring dictionary used to store key-val pairs, var_name as key and the array as the value\n try:\n for var in var_names:\n variable_dict[var] = self.dataset[var].values\n return variable_dict\n except Exception as e:\n print(\"An Error occured:\", e)\n raise e",
"def s(self):\n index = self.var_index(s=True)\n return self.var_data(index)",
"def __getitem__(self, index):\n\n if self._data_indices is not None:\n index = self._data_indices[index]\n data = self._dataset[index]\n return data",
"def _get_data(self, position):\n index = self._indexes[position]\n basename = self._waves[index].with_suffix(\".npy\").name\n return tuple(np.load(self._path / x / basename) for x in self._variables)",
"def __getitem__(self, index):\n return self.dataset[index]",
"def _read_var(self, time_idx, pressure_idx, var_idx, lat_idx, lng_idx):\n offset = self.item_size * (\n time_idx * self.t_idx + pressure_idx * self.p_idx +\n var_idx * self.v_idx + lat_idx * self.l_idx + lng_idx)\n self.mm.seek(offset)\n return self.unpacker.unpack(self.mm.read(self.item_size))[0]",
"def var_data_unc(self, index):\n data = np.ma.array(np.zeros(self.n_levels()), mask=True)\n if index is not None:\n for i in range(self.n_levels()):\n if self.profile_data[i]['variables'][index]['Missing'] or self.profile_data[i]['variables'][index]['Missing_unc']: continue\n data[i] = self.profile_data[i]['variables'][index]['Value_unc']\n return data",
"def get_step_tmp_vals_at_index(self, index):\n return self.routine_template.get_step_tmp_vals_at_index(index)",
"def indices_of_var(v):\n name = v.varName\n indices = name[2:].split(',')\n i, j = int(indices[0]), int(indices[1])\n return i, j",
"def get_value_by_index(self, index):\n return self['value'][index]",
"def getVariableInfo(self, variables, name):\r\n\r\n return [var.return_variable_dict() for var in variables if var.name == name][0]",
"def get_var(dataset, varname):\n \n import xarray as xr\n \n var = dataset[varname]\n time = dataset['TIME']\n lat = dataset['YDim']\n lon = dataset['XDim']\n\n da = xr.DataArray(var[:,:,:], coords=[time[:],lat[:],lon[:]], dims=['time','lat','lon'],\n attrs=var.attributes, name=varname)\n \n da['time'].attrs = time.attributes\n da['lat'].attrs = lat.attributes\n da['lon'].attrs = lon.attributes\n\n # Set _FillValue for coordinate arrays\n da.lat.encoding['_FillValue'] = 9.969209968386869e+36\n da.lon.encoding['_FillValue'] = 9.969209968386869e+36\n\n # To avoid conflicts between _FillValue and missing_value attributes when file is read\n da.attrs.pop('fmissing_value')\n da.attrs.pop('missing_value')\n \n return da",
"def __getitem__(self, key):\n return self.variables[key]",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def var(self, name):\n return self.get_ground_vector('!Var:{}'.format(name))",
"def getLinIterTimes( self, var, index = 0 ):\n\n values = self.getLinIterData( var, index )\n return values[1]"
] | [
"0.7106677",
"0.66892207",
"0.66811925",
"0.65154123",
"0.6507422",
"0.646576",
"0.6418683",
"0.63002825",
"0.617449",
"0.61602163",
"0.6159303",
"0.6150149",
"0.61370397",
"0.61308724",
"0.61231256",
"0.60778934",
"0.6051899",
"0.605164",
"0.6031409",
"0.6006571",
"0.60057646",
"0.5997694",
"0.59976816",
"0.5995979",
"0.5988981",
"0.5967296",
"0.5947718",
"0.5947718",
"0.5947718",
"0.59359324"
] | 0.72681504 | 0 |
Returns a list of dicts of metadata associated with a variable denoted by index | def var_metadata(self, index):
if index is not None:
metadata = []
for m in self.primary_header['variables'][index]['metadata']:
meta = {
'value': m['Value'] / 10**m['Value precision'],
'code': m['Variable-specific code'],
}
if 'iMeta' in m:
meta['iMeta'] = m['iMeta']
else:
meta['iMeta'] = 0
metadata.append(meta)
return metadata
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def t_metadata(self):\n index = self.var_index()\n return self.var_metadata(index)",
"def s_metadata(self):\n index = self.var_index(s=True)\n return self.var_metadata(index)",
"def getVar(inmeta):\n meta = AutoVivification()\n with open(inmeta) as fp:\n for line in fp:\n cols=line.split(',')\n varname=cols[0].strip()\n meta[varname]['agg'] = cols[1].strip()\n meta[varname]['dtyp'] = cols[2].strip()\n meta[varname]['long_name'] = cols[3].strip()\n meta[varname]['units'] = cols[4].strip()\n return meta",
"def get_metadata(self, variable):\n return self.dataset[variable]",
"def showmeta(self,\r\n index):\r\n\r\n return self.get_metadata_from_note(index)",
"def getVariableInfo(self, variables, name):\r\n\r\n return [var.return_variable_dict() for var in variables if var.name == name][0]",
"def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)",
"def summarize_metadata(self):\n meta_dict = {}\n for comp in self.dataset.data_vars:\n for mkey, mvalue in self.dataset[comp].attrs.items():\n meta_dict[f\"{comp}.{mkey}\"] = mvalue\n\n return meta_dict",
"def variable_dict(self,variable):\n return [x for x in self.variable_dicts() if x['variable']==variable][0]",
"def compile_metadata(inventory_dict):\n inventory_meta = {}\n #inventory_meta['InventoryDictionary'] = inventory_dict\n for source, year in inventory_dict.items():\n inventory_meta[source] = stewi.getMetadata(source, year)\n return inventory_meta",
"def get_metadata_from_note (self,\r\n index):\r\n\r\n if self.using_database:\r\n aprint('GET METADATA')\r\n value_tuple = (notebookname, str(index),)\r\n db_cursor.execute(\"SELECT user \"+\r\n \"FROM notes WHERE notebook=? \"+\r\n \"AND note_index=?;\",\r\n value_tuple)\r\n try:\r\n user = db_cursor.fetchone()[0]\r\n except:\r\n user = \"USER\"\r\n db_cursor.execute(\"SELECT size \"\r\n +\" FROM notes WHERE notebook=?\"\r\n +\" AND note_index=?;\",\r\n value_tuple)\r\n try:\r\n size = db_cursor.fetchone()[0]\r\n except:\r\n size = 60\r\n db_cursor.execute(\"SELECT timestamp\"\r\n +\" FROM timestamps WHERE notebook=? \"\r\n +\" AND note_index=?\"\r\n +\" ORDER BY timestamp\",\r\n value_tuple)\r\n dates = db_cursor.fetchall()\r\n try:\r\n date_list = [str(date[0]) for date in dates]\r\n except:\r\n date_list = [str(datetime.datetime.now())]\r\n\r\n metadata = {'user':user,\r\n 'date':date_list,\r\n 'size':size}\r\n\r\n return metadata\r\n\r\n if str(index) in self.note_dict:\r\n\r\n return self.note_dict[str(index)].meta\r\n return {}",
"def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]",
"def variables(self):\n return {u for u in self if u.type == 'var'}",
"def get_variables_list(self):\n variables = self.variables.values()\n # handle reference variables\n for variable in variables:\n name = variable['name']\n if name in self.references:\n variable['data'] = self.references[name]\n return variables",
"def get_varmeta(self):\n\n if self.ref_ds is not None:\n ref_meta = (self.ref_ds.id, self.ref_ds._names_from_attrs('all'))\n else:\n ref_meta = None\n if self.other_dss is not None:\n dss_meta = [(ds.id, ds._names_from_attrs('all')) for ds in self.other_dss]\n else:\n dss_meta = None\n if self.metric_ds is not None:\n mds_meta = (self.metric_ds.id, self.metric_ds._names_from_attrs('all'))\n else:\n mds_meta = None\n\n return ref_meta, dss_meta, mds_meta",
"def index_stats(self):\r\n request = http.Request('GET', '/metadata/index_stats')\r\n return request, parsers.parse_json",
"def __getitem__(self, index):\n item_info = {\n \"ID\": self.ID[index], \n \"turn_id\": self.turn_id[index], \n \"turn_belief\": self.turn_belief[index], \n \"gating_label\": self.gating_label[index], \n \"context_plain\":self.dialog_history[index].split(), \n \"turn_uttr_plain\": self.turn_uttr[index], \n \"turn_domain\": self.turn_domain[index], \n \"generate_y\": [v.split() for v in self.generate_y[index]],\n \"slot_temp\": self.slot_temp\n }\n return item_info",
"def vars(cls):\n for key in dir(cls):\n if key.startswith('var_'):\n yield key[4:]",
"def get_meta(self, *, index=None):\n\n return self.metadata(index=index, exclude_applied=False)",
"def var_data(self, index):\n data = np.ma.array(np.zeros(self.n_levels()), mask=True)\n if index is not None:\n for i in range(self.n_levels()):\n if self.profile_data[i]['variables'][index]['Missing']: continue\n data[i] = self.profile_data[i]['variables'][index]['Value']\n return data",
"def index(self, variables):\n return [self._variables.index(v) for v in variables]",
"def __getitem__(self, idx):\n \n sample = {'num_atoms': self.num_atoms[idx],\\\n 'symbols': self.symbols[idx],\\\n 'charges': self.charges[idx],\\\n 'positions': self.positions[idx],\\\n 'data': self.data[int(np.floor(idx/2))]}\n\n return sample",
"def __getitem__(self, index):\n return (self.train_stats, self.preprocessed_data, self.output_directory)[index]",
"def get_dicom_info(paths, index_col=None, verbose=False):\n meta_info = []\n paths = tqdm_notebook(paths, leave=False) if verbose else paths\n for path in paths:\n first_slice = dicom.read_file(os.path.join(path, os.listdir(path)[0]))\n\n if hasattr(first_slice, 'PatientAge'):\n patient_age = str(first_slice.PatientAge)\n else:\n patient_age = ''\n\n if hasattr(first_slice, 'PatientSex'):\n patient_sex = str(first_slice.PatientSex)\n else:\n patient_sex = ''\n\n locations = []\n for name in os.listdir(path):\n slice_path = os.path.join(path, name)\n dicom_slice = dicom.read_file(slice_path, stop_before_pixels=True)\n locations.append(float(dicom_slice.SliceLocation))\n\n steps_z = np.diff(np.sort(np.array(locations)))\n spacing_z = np.min(steps_z)\n info_dict = {\n \"UniformSpacing\": np.allclose(steps_z, spacing_z),\n 'MinSpacingZ': np.min(steps_z),\n 'MaxSpacingZ': np.max(steps_z),\n 'SliceThickness': float(first_slice.SliceThickness),\n 'SpacingZ': spacing_z,\n 'SpacingY': float(first_slice.PixelSpacing[0]),\n 'SpacingX': float(first_slice.PixelSpacing[1]),\n 'StudyID': str(first_slice.StudyID),\n 'ConvolutionKernel': str(first_slice.ConvolutionKernel),\n 'FilterType': str(first_slice.FilterType),\n 'WindowWidth': str(first_slice.WindowWidth),\n 'WindowCenter': str(first_slice.WindowCenter),\n 'PatientAge': patient_age,\n 'PatientSex': patient_sex,\n 'AccessionNumber': str(first_slice.AccessionNumber),\n 'PatientID': str(first_slice.PatientID),\n 'Rows': int(first_slice.Rows),\n 'Columns': int(first_slice.Columns),\n 'NumSlices': len(os.listdir(path)),\n 'ScanID': os.path.basename(path),\n 'Index': str(first_slice.AccessionNumber) + '_' + os.path.basename(path),\n 'ScanPath': path\n }\n meta_info.append(info_dict)\n return pd.DataFrame(meta_info) if index_col is None else pd.DataFrame(meta_info).set_index(index_col)",
"def get_ch_metadata(self, index):\n\n tag = self.get_ch_tag(index)\n\n return getattr(self, f\"{tag.lower()}_metadata\")",
"def __getitem__(self, varName):\n # Static variables\n if varName in self.statVars:\n staticFV = StaticFileVariable(self, varName)\n return staticFV\n\n # Time variables\n elif varName in self.timeVars:\n timeVariables = TimeFileVariable(self, varName)\n return timeVariables",
"def get_variable_attributes(model_data, header, variable, variable_name):\n header.append('# {}_column: {}\\n'.format(variable, variable_name))\n for attr, value in vars(model_data.variables[variable]).items():\n if '_range' in attr:\n header.append('# {}_{}: {},{}\\n'.format(variable, attr, value[0], value[1]))\n else:\n header.append('# {}_{}: {}\\n'.format(variable, attr, value))\n return header",
"def indices_of_var(v):\n name = v.varName\n indices = name[2:].split(',')\n i, j = int(indices[0]), int(indices[1])\n return i, j",
"def getVariables(self):\n statVars = [self[vn] for vn in self.statVars]\n timeVars = [self[vn] for vn in self.timeVars]\n return statVars + timeVars",
"def __getitem__(self, idx):\n\n text, label = self.data[idx]\n ids = self.get_ids(text)\n\n return {\"ids\": ids, \"label\": label}"
] | [
"0.66395146",
"0.657106",
"0.61420757",
"0.61139995",
"0.607735",
"0.60406494",
"0.5940418",
"0.58379537",
"0.57568055",
"0.57276076",
"0.57131696",
"0.5711036",
"0.56675327",
"0.564926",
"0.5644001",
"0.5615673",
"0.5606115",
"0.5578653",
"0.55681884",
"0.55648166",
"0.55572945",
"0.55549246",
"0.55165446",
"0.54387045",
"0.54377335",
"0.54353935",
"0.542612",
"0.54164696",
"0.538067",
"0.53570247"
] | 0.7705391 | 0 |
return the salinity metadata, if available | def s_metadata(self):
index = self.var_index(s=True)
return self.var_metadata(index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metadata(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']]:\n return pulumi.get(self, \"metadata\")",
"def metadata(self) -> pulumi.Output[Optional['outputs.SecurityAssessmentMetadataPropertiesResponse']]:\n return pulumi.get(self, \"metadata\")",
"def metadata(self) -> Optional[pulumi.Input['SecurityAssessmentMetadataPropertiesArgs']]:\n return pulumi.get(self, \"metadata\")",
"def metadata(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"metadata\")",
"def _getAllMeta(self):\n try:\n metadata = pyexiv2.ImageMetadata(self.imagePath)\n metadata.read()\n return metadata\n except:\n print 'error reading meta data'\n return None",
"def get_salinity(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[2]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'get_salinity error: {err}')\n return -1",
"def metadata(self) -> global___SummaryMetadata:",
"def get_metadata():\n meta_data = {}\n keys = ['ami-id', 'placement/availability-zone', 'instance-id',\n 'instance-type', 'local-hostname', 'local-ipv4',\n 'public-hostname', 'public-ipv4', 'security-groups', 'user-data']\n for key in keys:\n url = \"http://169.254.169.254/latest/meta-data/\" + key\n meta_data[key] = urllib.urlopen(url).read()\n meta_data['security-groups'] = meta_data['security-groups'].split('\\n')\n return meta_data",
"def get_server_metadata(self, name):\n raise NotImplementedError",
"def GetMetadata(self):\n return self.dict['meta']",
"def get_metadata (self, name):\n return self.metadata.get(name)",
"def metadata(self) -> pulumi.Output[Optional['outputs.SyntheticsPrivateLocationMetadata']]:\n return pulumi.get(self, \"metadata\")",
"def calc_meta(self):\n #todo: actually do this correctly\n return self.calc_mapping_hash({\"es_meta\": self.es_meta,\n \"mapping\": self.default_mapping})",
"def get_metadata(self):\n return gdal.Open(self.filename).GetMetadata()",
"def get_metadata(self):\n return self.manager.get_metadata(self)",
"def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")",
"def get_metadata(self):\n meta_data = {}\n if self.beam_energy is not None:\n meta_data['beam_energy'] = self.beam_energy\n if self.collection_angle is not None:\n meta_data['collection_angle'] = self.collection_angle\n return meta_data",
"def get_metadata(self):\n return self._metadata",
"def scene_metadata(self) -> Optional[Any]:\n return pulumi.get(self, \"scene_metadata\")",
"def metadata(self) -> Optional[pulumi.Input['SyntheticsPrivateLocationMetadataArgs']]:\n return pulumi.get(self, \"metadata\")",
"def metadata(self) -> Optional[pulumi.Input['SyntheticsPrivateLocationMetadataArgs']]:\n return pulumi.get(self, \"metadata\")",
"def get_cluster_info(self) -> Dict[str, Any]:\n pass",
"def get_metadata(self, tsid):\n return self._metadata.get(tsid)",
"def get_metadata(self):\n return {}",
"def get_sensor_summary_info(self):\n import statistics\n info_dict = dict()\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Find the scene count.\")\n vld_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Invalid == False).count()\n invld_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Invalid == True).count()\n dwn_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True).count()\n ard_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.ARDProduct == True).count()\n dcload_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.DCLoaded == True).count()\n arch_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Archived == True).count()\n info_dict['n_scenes'] = dict()\n info_dict['n_scenes']['n_valid_scenes'] = vld_scn_count\n info_dict['n_scenes']['n_invalid_scenes'] = invld_scn_count\n info_dict['n_scenes']['n_downloaded_scenes'] = dwn_scn_count\n info_dict['n_scenes']['n_ard_processed_scenes'] = ard_scn_count\n info_dict['n_scenes']['n_dc_loaded_scenes'] = dcload_scn_count\n info_dict['n_scenes']['n_archived_scenes'] = arch_scn_count\n logger.debug(\"Calculated the scene count.\")\n\n logger.debug(\"Find the scene file sizes.\")\n file_sizes = ses.query(EDDSentinel1ASF.Total_Size).filter(EDDSentinel1ASF.Invalid == False).all()\n if file_sizes is not None:\n if len(file_sizes) > 0:\n file_sizes_nums = list()\n for file_size in file_sizes:\n if file_size[0] is not None:\n file_sizes_nums.append(file_size[0])\n if len(file_sizes_nums) > 0:\n total_file_size = sum(file_sizes_nums)\n info_dict['file_size'] = dict()\n info_dict['file_size']['file_size_total'] = total_file_size\n if total_file_size > 0:\n info_dict['file_size']['file_size_mean'] = statistics.mean(file_sizes_nums)\n info_dict['file_size']['file_size_min'] = min(file_sizes_nums)\n info_dict['file_size']['file_size_max'] = max(file_sizes_nums)\n if len(file_sizes_nums) > 1:\n info_dict['file_size']['file_size_stdev'] = statistics.stdev(file_sizes_nums)\n info_dict['file_size']['file_size_median'] = statistics.median(file_sizes_nums)\n if (len(file_sizes_nums) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['file_size']['file_size_quartiles'] = statistics.quantiles(file_sizes_nums)\n logger.debug(\"Calculated the scene file sizes.\")\n\n logger.debug(\"Find download and processing time stats.\")\n download_times = []\n ard_process_times = []\n scns = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True)\n for scn in scns:\n download_times.append((scn.Download_End_Date - scn.Download_Start_Date).total_seconds())\n if scn.ARDProduct:\n ard_process_times.append((scn.ARDProduct_End_Date - scn.ARDProduct_Start_Date).total_seconds())\n\n if len(download_times) > 0:\n info_dict['download_time'] = dict()\n info_dict['download_time']['download_time_mean_secs'] = statistics.mean(download_times)\n info_dict['download_time']['download_time_min_secs'] = min(download_times)\n info_dict['download_time']['download_time_max_secs'] = max(download_times)\n if len(download_times) > 1:\n info_dict['download_time']['download_time_stdev_secs'] = statistics.stdev(download_times)\n info_dict['download_time']['download_time_median_secs'] = statistics.median(download_times)\n if (len(download_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['download_time']['download_time_quartiles_secs'] = statistics.quantiles(download_times)\n\n if len(ard_process_times) > 0:\n info_dict['ard_process_time'] = dict()\n info_dict['ard_process_time']['ard_process_time_mean_secs'] = statistics.mean(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_min_secs'] = min(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_max_secs'] = max(ard_process_times)\n if len(ard_process_times) > 1:\n info_dict['ard_process_time']['ard_process_time_stdev_secs'] = statistics.stdev(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_median_secs'] = statistics.median(ard_process_times)\n if (len(ard_process_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['ard_process_time']['ard_process_time_quartiles_secs'] = statistics.quantiles(\n ard_process_times)\n logger.debug(\"Calculated the download and processing time stats.\")\n\n if self.calc_scn_usr_analysis():\n plgin_lst = self.get_usr_analysis_keys()\n info_dict['usr_plugins'] = dict()\n for plgin_key in plgin_lst:\n info_dict['usr_plugins'][plgin_key] = dict()\n scns = ses.query(EDDSentinel1ASFPlugins).filter(EDDSentinel1ASFPlugins.PlugInName == plgin_key).all()\n n_err_scns = 0\n n_complete_scns = 0\n n_success_scns = 0\n plugin_times = []\n for scn in scns:\n if scn.Completed:\n plugin_times.append((scn.End_Date - scn.Start_Date).total_seconds())\n n_complete_scns += 1\n if scn.Success:\n n_success_scns += 1\n if scn.Error:\n n_err_scns += 1\n info_dict['usr_plugins'][plgin_key]['n_success'] = n_success_scns\n info_dict['usr_plugins'][plgin_key]['n_completed'] = n_complete_scns\n info_dict['usr_plugins'][plgin_key]['n_error'] = n_err_scns\n if len(plugin_times) > 0:\n info_dict['usr_plugins'][plgin_key]['processing'] = dict()\n info_dict['usr_plugins'][plgin_key]['processing']['time_mean_secs'] = statistics.mean(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_min_secs'] = min(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_max_secs'] = max(plugin_times)\n if len(plugin_times) > 1:\n info_dict['usr_plugins'][plgin_key]['processing']['time_stdev_secs'] = statistics.stdev(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_median_secs'] = statistics.median(plugin_times)\n if (len(plugin_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['usr_plugins'][plgin_key]['processing']['time_quartiles_secs'] = statistics.quantiles(plugin_times)\n ses.close()\n return info_dict",
"def get_analytics_metadata(self) -> Dict[str, Any]:\n return {\n \"pinned\": self.pinned,\n \"item_count\": self.tiles.exclude(insight=None).count(),\n \"is_shared\": self.is_sharing_enabled,\n \"created_at\": self.created_at,\n \"has_description\": self.description != \"\",\n \"tags_count\": self.tagged_items.count(),\n }",
"def metadata(self):\n return parse_metadata(self.metadata_path())",
"def generated_scene_metadata(self) -> Optional[Any]:\n return pulumi.get(self, \"generated_scene_metadata\")",
"def _getMetadata(self):\n anno_key = 'collective.sendaspdf'\n annotations = IAnnotations(self)\n\n metadata = annotations.get(anno_key,\n None)\n if metadata is None:\n annotations[anno_key] = PersistentDict()\n metadata = annotations[anno_key]\n\n return metadata",
"def metadata(self):\r\n return self._metadata"
] | [
"0.59349567",
"0.59349567",
"0.588296",
"0.58103037",
"0.5779363",
"0.5764766",
"0.5734396",
"0.5719792",
"0.5718881",
"0.57090056",
"0.568534",
"0.567004",
"0.56124866",
"0.558504",
"0.5567398",
"0.55636936",
"0.55431485",
"0.5529838",
"0.55256444",
"0.5514203",
"0.5514203",
"0.5483789",
"0.5478391",
"0.54553354",
"0.54437035",
"0.5438741",
"0.5426102",
"0.54187113",
"0.5410478",
"0.5408415"
] | 0.60768515 | 0 |
Returns level data as a pandas data frame. Profile metadata recorded as custom attributes on the dataframe. | def df(self):
# populate dataframe with level data
columns = {
"z": self.z(),
"z_level_qc": self.z_level_qc(),
"z_unc": self.z_unc(),
"t": self.t(),
"t_level_qc": self.t_level_qc(),
"t_unc": self.t_unc(),
"s": self.s(),
"s_level_qc": self.s_level_qc(),
"s_unc": self.s_unc(),
"oxygen": self.oxygen(),
"phosphate": self.phosphate(),
"silicate": self.silicate(),
"pH": self.pH(),
"p": self.p()
}
df = pd.DataFrame(columns)
# record profile data in a metadata object on the dataframe
df.attrs["latitude"] = self.latitude()
df.attrs["latitude_unc"] = self.latitude_unc()
df.attrs["longitude"] = self.longitude()
df.attrs["longitude_unc"] = self.longitude_unc()
df.attrs["uid"] = self.uid()
df.attrs["n_levels"] = self.n_levels()
df.attrs["year"] = self.year()
df.attrs["month"] = self.month()
df.attrs["day"] = self.day()
df.attrs["time"] = self.time()
df.attrs["cruise"] = self.cruise()
df.attrs["probe_type"] = self.probe_type()
df.attrs["originator_flag_type"] = self.originator_flag_type()
df.attrs["PIs"] = self.PIs()
df.attrs["originator_station"] = self.originator_station()
df.attrs["originator_cruise"] = self.originator_cruise()
df.attrs["t_metadata"] = self.t_metadata()
df.attrs["s_metadata"] = self.s_metadata()
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_reactome_hierarchy_df() -> pd.DataFrame:\n return pd.read_csv(REACTOME_HIERARCHICAL_MAPPINGS_PATH, sep='\\t')",
"def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)",
"def to_df(self):\r\n return pd.DataFrame([dict(self)])",
"def return_data_as_pandas_df(self):\n if not self.response:\n return None\n\n data = self.response['data'][self.data_type.value]\n\n # flatten data dictionary by joining property and subproperty names\n data_flat = {}\n for i, entry in enumerate(data):\n id = self.id[i]\n curr_dict = {}\n for key, values in entry.items():\n if isinstance(values, list):\n v = values[0]\n else:\n v = values\n if isinstance(v, str):\n new_key = f\"{key}\"\n curr_dict[new_key] = v\n else:\n for subprop, val in v.items():\n new_key = f\"{key}.{subprop}\"\n curr_dict[new_key] = val\n data_flat[id] = curr_dict\n\n return pd.DataFrame.from_dict(data_flat, orient='index')",
"def data(self):\n return self.as_named_DataFrame()",
"def get_data(self)->pd.DataFrame:\n pass",
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def to_df(self):\n return pd.DataFrame([dict(self)])",
"def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)",
"def dataframe(self):\n df = pd.DataFrame({'x':self.x, 'y':self.y, 'd':self.d})\n\n if self.z is not None:\n for k, v in self.z.items():\n df[k] = v\n\n return df",
"def process_data(self):\n structure_data = self.parse_root(self.root)\n\n dict_data = {}\n for d in structure_data:\n dict_data = {**dict_data, **d}\n df = pd.DataFrame(data=list(dict_data.values()), index=dict_data.keys()).T\n\n return df",
"def pd(self, *args, **kwargs):\n return pd.DataFrame.from_records(self.aslist(), *args, **kwargs)",
"def construct_data_frame(self) -> pd.DataFrame:\n data_frame = self.base_data_frame[\n [self.name_col, self.description_col]\n ].reset_index()\n data_frame.columns = [\"label_encoder\", \"name\", \"description\"]\n\n return data_frame.set_index(\"label_encoder\")",
"def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe",
"def df(self) -> \"pandas.DataFrame\":\n titles = []\n comments = []\n alternative_codes = []\n children = []\n for cat in self.values():\n titles.append(cat.title)\n comments.append(cat.comment)\n alternative_codes.append(cat.codes[1:])\n children.append(\n tuple(tuple(sorted(c.codes[0] for c in cs)) for cs in cat.children)\n )\n return pandas.DataFrame(\n index=self.keys(),\n data={\n \"title\": titles,\n \"comment\": comments,\n \"alternative_codes\": alternative_codes,\n \"children\": children,\n },\n )",
"def depth_t(self) -> pd.DataFrame:\n return self._load_fetch(self.DEPTH_T)",
"def create_dataframe(data: dict) -> pd.DataFrame:\n df = pd.DataFrame(data)\n df.index.name = 'coin_name'\n return df",
"def level_data(self):\n self.level(self.data)",
"def to_frame(self) -> pd.DataFrame:\n df = pd.DataFrame(data={\n 'Name': [p.name for p in self],\n 'Description': [p.desc for p in self],\n 'Value': [p.value for p in self],\n 'Hyper-Space': [p.hyper_space for p in self]\n }, columns=['Name', 'Description', 'Value', 'Hyper-Space'])\n return df",
"def dataframe(self):\n dictionary = OrderedDict(zip(self.keys, [[value] for value in self.values]))\n dataframe = pd.DataFrame(dictionary)\n return dataframe",
"def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df",
"def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)",
"def dataframe(self):\n\t\treturn self._dataframe",
"def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())",
"def to_pandas(self):\n self.meta = pd.DataFrame(self.meta)\n return",
"def stack(self, level, dropna):\n return DataFrameDefault.register(pandas.DataFrame.stack)(\n self, level=level, dropna=dropna\n )",
"def _setup_dataframe(self, serie, metadata=None):\n header = self.get_data_header(serie, dataset='cnv')\n df = self.get_data_in_frame(serie, header, dataset='cnv')\n df = self.df_handler.map_column_names_of_dataframe(df)\n\n return df"
] | [
"0.61326367",
"0.60237515",
"0.60217154",
"0.59482",
"0.592096",
"0.5915685",
"0.5874256",
"0.5874256",
"0.5874256",
"0.5874256",
"0.5874256",
"0.5869173",
"0.5836051",
"0.58339447",
"0.58162373",
"0.58150154",
"0.5766248",
"0.57555896",
"0.57363296",
"0.5733595",
"0.57188636",
"0.5710547",
"0.5696295",
"0.5679369",
"0.5675831",
"0.5670463",
"0.56486493",
"0.5584405",
"0.55651045",
"0.55558264"
] | 0.7697623 | 0 |
Returns a data series containing primary header of the current profile | def header(self):
data = {}
data['latitude'] = self.latitude()
data['latitude_unc'] = self.latitude_unc()
data['longitude'] = self.longitude()
data['longitude_unc'] = self.longitude_unc()
data['uid'] = self.uid()
data['n_levels'] = self.n_levels()
data['year'] = self.year()
data['month'] = self.month()
data['day'] = self.day()
data['time'] = self.time()
data['cruise'] = self.cruise()
data['probe_type'] = self.probe_type()
header = pd.Series(data)
return header | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _horizontal_header(self):\n return self.header()",
"def _horizontal_header(self):\n return self.header()",
"def header(self):\n return self[0]",
"def header(self) -> List:\n return self.rows[0]",
"def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"",
"def tsv_header(self):\n return self.tsv_lines[0]",
"def headerData(self, section, orientation, role):\r\n if role == Qt.DisplayRole:\r\n if orientation == Qt.Horizontal:\r\n return [\"Title\", \"Website\"][section]\r\n if orientation == Qt.Vertical:\r\n return None",
"def header(self) -> list:\n cols = self.data.columns.tolist()\n header = [\"index\"]\n for col_int in cols:\n header.append(col_int)\n return header",
"def _horizontal_header(self):\n return self.horizontalHeader()",
"def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header",
"def buildStatsTableHeader(self, table):\n heading = table.thead.tr\n heading.th('No')\n heading.th('Begin probe')\n heading.th('End probe')\n heading.th('Min')\n heading.th('Max')\n heading.th('Median')\n heading.th('Mean')\n heading.th('{}%'.format(self.percentile1))\n heading.th('{}%'.format(self.percentile2))\n heading.th('Standard Deviation')",
"def headerData(self, section, orientation, role):\n headers = [\"Constituancy\", \"Lab\", \"Con\", \"LD\"]\n\n if role == qc.Qt.DisplayRole and orientation == qc.Qt.Horizontal:\n return qc.QVariant(headers[section])\n\n return qc.QVariant()",
"def getMeasHeaders(self):\n headers = []\n for ii in range(self.rows):\n inst = self.instruments[self.stringInsts.index(self.selInsts[ii])]\n param = inst.getParam(self.selParams[ii])\n if type(param.comps) is not list:\n if param.type == 'cont':\n headers.append(sc.formatHeader(inst, param, param.units))\n else:\n headers.append(sc.formatHeader(inst, param))\n else:\n for ii,comp in enumerate(param.comps):\n if param.type == 'cont':\n headers.append(sc.formatHeader(inst, comp, param.units[ii]))\n else:\n headers.append(sc.formatHeader(inst, comp))\n return headers",
"def headerData(self, section:int, orientation:QtCore.Qt.Orientation, role:typing.Optional[int]=QtCore.Qt.DisplayRole) -> typing.Any:",
"def header(self):\r\n raise NotImplementedError",
"def headerData(self, sidx, orientation, role):\n res = None\n if role == qtc.Qt.DisplayRole:\n res = QNULL\n if orientation == qtc.Qt.Horizontal:\n res = self._headers[sidx]\n elif role == qtc.Qt.DecorationRole and orientation == qtc.Qt.Vertical:\n res = qtc.QVariant(GC.load_icon(\"wizards/remove.png\"))\n if res is None:\n res = AbsTableModel.headerData(self, sidx, orientation, role)\n return res",
"def header(self, **args):\n return self.pageConfig['header'] % self.pageConfig",
"def headerData(self, sidx, orientation, role):\n res = None\n if orientation == qtc.Qt.Vertical and role == qtc.Qt.DecorationRole and sidx == self._sel.give_conds_nb():\n res = qtc.QVariant(GC.load_icon(\"wizards/add.png\"))\n elif role == qtc.Qt.DisplayRole:\n res = QNULL\n if orientation == qtc.Qt.Horizontal:\n res = self._headers[sidx]\n elif role == qtc.Qt.DecorationRole and orientation == qtc.Qt.Vertical:\n res = qtc.QVariant(GC.load_icon(\"wizards/remove.png\"))\n if res is None:\n res = AbsTableModel.headerData(self, sidx, orientation, role)\n return res",
"def headerData(self, section, orientation, role=Qt.DisplayRole):\n if(orientation == Qt.Horizontal and role == Qt.DisplayRole):\n if(section == Columns.Date):\n return \"#\"\n elif(section == Columns.Code):\n return \"Code\"\n elif(section == Columns.User):\n return \"User\"\n elif(section == Columns.Tags):\n return \"Privileges\"\n elif(section == Columns.TimesRequested):\n return \"Times requested\"",
"def second_header():\n return \"\"\"\n<th>\n<th>start\n<th>start\n<th>end\n<th>(secs)\n<th>time\n<th>frames\n<th>\n<th>bin\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>\n<th>width\n<th>angle\n<th>\n<th>\n<th>\n<th>\n\"\"\"",
"def headerData(self, section, orientation, role = Qt.DisplayRole):\n\n if role != Qt.DisplayRole:\n return QVariant()\n\n if orientation == Qt.Vertical and role == Qt.DisplayRole:\n value = section + 1\n return QVariant(str(value))\n elif orientation == Qt.Horizontal and role == Qt.DisplayRole:\n value = ef.col_num_to_string(int(section)+1)\n return QVariant(str(value))\n else:\n return QVariant('')",
"def header(self):\n ...",
"def get_heading(self):\n return self.heading[0]",
"def show_header():\n return {};",
"def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):\n if role == QtCore.Qt.DisplayRole:\n if orientation == QtCore.Qt.Horizontal:\n if len(self.__header) > section:\n return self.__header[section]\n else:\n return '%.2d' % (section + 1)\n else:\n return None",
"def get_header():\n str_list = ['specimennumber','speciesid','group','family','genus','species','scientificname', \\\n 'commonname','country','state','county','locality','latitude','longitude', \\\n 'source','accuracy','drainagename','centroidtype','huc8name','huc8', \\\n 'huc10name','huc10','huc12name','huc12','date','year','month','day','status','comments', \\\n 'recordtype','disposal','museumcatnumber','freshmarineintro','references']\n return str_list",
"def CSVHeader(self):\n \t\n return ','.join('\"{}\"'.format(Statistics.attrs[i][1]) \n for i in sorted(Statistics.attrs.keys()))",
"def getHeader(self):\n return self.data.header",
"def headerData(self, section, orientation, role):\n headers = [\"Lab\", \"Con\", \"LD\"]\n\n if role == qc.Qt.DisplayRole:\n if orientation == qc.Qt.Vertical:\n return qc.QVariant(headers[section])\n\n return qc.QVariant(\"Vote (%)\")\n\n return qc.QVariant()",
"def headerData(self, i, orientation, role=myqt.Qt.ItemDataRole.DisplayRole):\n if (\n orientation == myqt.Qt.Orientation.Horizontal\n and role == myqt.Qt.ItemDataRole.DisplayRole\n ):\n return self.column[i]\n return None"
] | [
"0.64102864",
"0.64102864",
"0.6335023",
"0.62913215",
"0.60832685",
"0.6059192",
"0.6058631",
"0.6040735",
"0.6023438",
"0.60070777",
"0.5971422",
"0.59401476",
"0.59113216",
"0.5908255",
"0.59058255",
"0.5874509",
"0.58718234",
"0.5869801",
"0.58629376",
"0.5793363",
"0.5790896",
"0.5779564",
"0.57356364",
"0.56863034",
"0.56728464",
"0.56712687",
"0.56527644",
"0.5647497",
"0.5645894",
"0.5625349"
] | 0.7370823 | 0 |
Given a valid (IPv4) IP address, return a defanged version of that IP address. A defanged IP address replaces every period "." with "[.]". | def defangIPaddr(address):
address_as_list = list(address)
length_of_address = len(address_as_list)
for i in range(length_of_address):
if address_as_list[i] == ".":
address_as_list[i] = "[.]"
return "".join(address_as_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def safe_addr(ip_addr):\n return '.'.join(ip_addr.split('.')[:2] + ['xxx', 'xxx'])",
"def filter_ip_address(self, string):\n count = string.count('.')\n newstring = string\n if count < 3:\n # Not enough components to matter\n return newstring\n\n dot_split = string.split('.')\n\n # Count the number of components that convert to an integer\n int_count = 0\n for component in dot_split:\n try:\n # Note: _ is pythonic for unused variable\n _ = int(component)\n int_count = int_count + 1\n except ValueError:\n pass\n\n if int_count >= 4:\n # Replace everything\n newstring = string.replace('.', '-')\n\n return newstring",
"def _sanitize_ipv4_mapping(ip_str):\r\n if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):\r\n # not an ipv4 mapping\r\n return ip_str\r\n\r\n hextets = ip_str.split(':')\r\n\r\n if '.' in hextets[-1]:\r\n # already sanitized\r\n return ip_str\r\n\r\n ipv4_address = \"%d.%d.%d.%d\" % (\r\n int(hextets[6][0:2], 16),\r\n int(hextets[6][2:4], 16),\r\n int(hextets[7][0:2], 16),\r\n int(hextets[7][2:4], 16),\r\n )\r\n\r\n result = ':'.join(hextets[0:6])\r\n result += ':' + ipv4_address\r\n\r\n return result",
"def safe_ip_format(ip):\r\n try:\r\n if netaddr.IPAddress(ip).version == 6:\r\n return '[%s]' % ip\r\n except (TypeError, netaddr.AddrFormatError): # hostname\r\n pass\r\n # it's IPv4 or hostname\r\n return ip",
"def ip_address(addr):\n parts = addr.split('.')\n if len(parts) != 4:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n for part in parts:\n try:\n num = int(part)\n if num < 0 or num > 255:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n except ValueError:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n return addr",
"def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])",
"def reverse_dotted_decimals(ipaddress):\n return '.'.join(ipaddress.split('.')[::-1])",
"def format_ip(addr):\n return \\\n str(ord(addr[0])) + '.' + \\\n str(ord(addr[1])) + '.' + \\\n str(ord(addr[2])) + '.' + \\\n str(ord(addr[3]))",
"def reverse_lookup_zone(ipaddress):\n return reverse_dotted_decimals(ipaddress) + '.in-addr.arpa'",
"def format_ipv4(value, mask=None):\n value_ipv4 = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(value))])\n if mask is None:\n return value_ipv4\n value_mask = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(mask))])\n return \"{}/{}\".format(value_ipv4, value_mask)",
"def get_ip_pattern(ip):\n return re.compile(ip.replace('.', '[.]'))",
"def _parse_addr(self, addr_str):\n addr = [int(i) for i in addr_str.split('.')]\n if len(addr) != 4 or any([i < 0 for i in addr]) or any([i > 255 for i in addr]):\n raise ValueError('Invalid IP address: %s' % addr_str)\n val = 0\n for i in addr:\n val *= 255\n val += i\n return val",
"def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')",
"def normalize_ip4(self):\n\n ip = str(self.ip4)\n # Let's normalize the ip list first\n ip_list = list(\n map(\n lambda v: ipaddress.IPv4Network(v),\n filter(\n lambda v: self.try_convert(v, None, ipaddress.IPv4Network),\n map(\n lambda v: v.split('|')[1].split('/')[0].strip()\n if '|' in v else\n v.split('/')[0].strip(),\n ip.split(',')\n )\n )\n )\n )\n\n if ip_list:\n ip_list.sort()\n ip = tuple(\n int(c)\n for c in str(ip_list[0]).split('/')[0].split('.')\n )\n else:\n ip = (9999, ip)\n\n self.ip4 = ip",
"def get_single_net_client_addr (ip_addr, octetListDict = {'3' : 1}, ip_type = 'ipv4'):\n if ip_type == 'ipv4':\n ip_lst = ip_addr.split('.')\n\n for octet,increment in octetListDict.iteritems():\n int_octet = int(octet)\n if ((int_octet < 0) or (int_octet > 3)):\n raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )\n else:\n if (int(ip_lst[int_octet]) + increment) < 255:\n ip_lst[int_octet] = str(int(ip_lst[int_octet]) + increment)\n else:\n raise ValueError('the requested increment exceeds 255 client address limit')\n\n return '.'.join(ip_lst)\n\n else: # this is a ipv6 address, handle accordingly\n ip_lst = ip_addr.split(':')\n\n for octet,increment in octetListDict.iteritems():\n int_octet = int(octet)\n if ((int_octet < 0) or (int_octet > 7)):\n raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )\n else:\n if (int(ip_lst[int_octet]) + increment) < 65535:\n ip_lst[int_octet] = format( int(ip_lst[int_octet], 16) + increment, 'X')\n else:\n raise ValueError('the requested increment exceeds 65535 client address limit')\n\n return ':'.join(ip_lst)",
"def _explode_shorthand_ip_string(ip_str):\r\n if not _is_shorthand_ip(ip_str):\r\n # We've already got a longhand ip_str.\r\n return ip_str\r\n\r\n new_ip = []\r\n hextet = ip_str.split('::')\r\n\r\n # If there is a ::, we need to expand it with zeroes\r\n # to get to 8 hextets - unless there is a dot in the last hextet,\r\n # meaning we're doing v4-mapping\r\n if '.' in ip_str.split(':')[-1]:\r\n fill_to = 7\r\n else:\r\n fill_to = 8\r\n\r\n if len(hextet) > 1:\r\n sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))\r\n new_ip = hextet[0].split(':')\r\n\r\n for _ in xrange(fill_to - sep):\r\n new_ip.append('0000')\r\n new_ip += hextet[1].split(':')\r\n\r\n else:\r\n new_ip = ip_str.split(':')\r\n\r\n # Now need to make sure every hextet is 4 lower case characters.\r\n # If a hextet is < 4 characters, we've got missing leading 0's.\r\n ret_ip = []\r\n for hextet in new_ip:\r\n ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())\r\n return ':'.join(ret_ip)",
"def ip_pad(ippart,fillupper=False):\n sp=ippart.split('.')\n fill='0'\n if fillupper:\n fill='255'\n \n quads=[]\n for part in sp:\n if part=='':\n continue\n quads.append(str(part))\n \n while len(quads)<4:\n quads.append(fill)\n \n return '.'.join(quads)",
"def isofy_ipv4(ip_string, prefix=\"\"):\n ipaddress.IPv4Address(ip_string) # fails for invalid IP\n\n if prefix != \"\":\n prefix_valid = bool(re.match(r\"^.{2}(\\..{4})*?$\", prefix))\n if not prefix_valid:\n raise ValueError(f\"{prefix} cannot be used as ISO prefix, please check formatting\")\n prefix += \".\"\n # IP: split and fill with 0s\n ip_parts = ip_string.split(\".\")\n padded = [p.zfill(3) for p in ip_parts]\n joined = \"\".join(padded)\n # IP: split to chunks à 4 chars\n chunksize = 4\n ip_chunks = [joined[i : i + chunksize] for i in range(0, len(joined), chunksize)]\n # combine\n iso_address = prefix + \".\".join(ip_chunks) + \".00\"\n return iso_address",
"def ip4range(iprange):\n assert not ('/' in iprange and '-' in iprange),'cidr and dash notation is not possible'\n if '/' in iprange:\n #cidr range\n ippart,mask=iprange.split('/',1)\n mask=int(mask)\n ip=ip_pad(ippart)\n lowerlong,upperlong=cidr2lowerupper(ip,mask)\n lowerip=long2ip(lowerlong)\n upperip=long2ip(upperlong)\n \n elif '-' in iprange:\n lpart,upart=iprange.split('-',1)\n lowerip=ip_pad(lpart)\n \n #upperip only one octet? fill last specified octed from lpart\n if '.' not in upart:\n sp=lpart.split('.')\n sp[-1]=upart\n upart='.'.join(sp)\n \n upperip=ip_pad(upart,True)\n else:\n lowerip=ip_pad(iprange)\n upperip=ip_pad(iprange,True)\n \n return lowerip,upperip",
"def test_IP_to_IPv4(self):\n self.assertEqual(helpers.IP_to_IPv4('00000000000000000000000000000000'), '0.0.0.0')\n self.assertEqual(\n helpers.IPs_to_IPv4s(\n [\n '00000000000000000000000000000000',\n '10001000100110100011111010101001'\n ]\n ),\n ['0.0.0.0', '136.154.62.169']\n )",
"def resolve_ip(self, url):\n ext = tldextract.extract(url)\n if ext.subdomain:\n # ToDo: possibly check for exceptions\n return socket.gethostbyname(\n ext.subdomain + \".\" + ext.registered_domain\n )\n else:\n return socket.gethostbyname(ext.registered_domain)",
"def address_to_ip_prefix(address):\n return address.split('/')",
"def generateRandomIPv4():\n return \".\".join(map(str, (random.randint(0, 255) for _ in range(4))))",
"def test_IPv4_to_IP(self):\n self.assertEqual(helpers.IPv4_to_IP('0.0.0.0'), '00000000000000000000000000000000')\n self.assertEqual(\n helpers.IPv4s_to_IPs(['0.0.0.0', '136.154.62.169']),\n [\n '00000000000000000000000000000000',\n '10001000100110100011111010101001'\n ]\n )",
"def is_ipv4_address(ip): \n octet_range = range(256) \n octets = ip.split('.') \n\n if len(octets) != 4: \n return False \n elif any(not octet.isdigit() for octet in octets): \n return False \n elif any(int(octet) not in octet_range for octet in octets): \n return False \n\n return True",
"def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []",
"def check_ipv4_ipv6_fqdn(val):\n\n try:\n val = u'{0}'.format(val)\n ip = ip_network(val, strict=False)\n return ip.version\n except ValueError:\n return 0",
"def get_ip_dotted(self):\r\n return socket.inet_ntoa(struct.pack('>I', self.ip))",
"def fromV4(klass, ip):\n if not isinstance(ip, V4Address):\n ip = V4Address(str(ip))\n return klass(\"::ffff:{0!s}\".format(ip))",
"def format_url_address(address):\n try:\n addr = netaddr.IPAddress(address)\n if addr.version == constants.IPV6_FAMILY:\n return \"[%s]\" % address\n else:\n return str(address)\n except netaddr.AddrFormatError:\n return address"
] | [
"0.7697277",
"0.7319066",
"0.68061745",
"0.6796537",
"0.6741873",
"0.6540487",
"0.649368",
"0.63472986",
"0.6228145",
"0.59975916",
"0.59913415",
"0.5963269",
"0.59367293",
"0.59113383",
"0.5905575",
"0.5866596",
"0.58397835",
"0.582544",
"0.58252937",
"0.5804415",
"0.57601374",
"0.5739073",
"0.5738965",
"0.5738692",
"0.57243055",
"0.569266",
"0.56783783",
"0.56779927",
"0.5664518",
"0.5664266"
] | 0.73443043 | 1 |
This view returns a list of all courses. | def list_all_courses(request):
courses = Course.objects.all()
courses = [dict(course_name = c.course_name, course_code = c.course_code, course_year = c.year,
course_url = '/course/%s/' % c.course_code.lower()) for c in courses]
response = {'courses': courses}
return render_to_response('all_courses.json', response, mimetype = 'application/json',
context_instance = RequestContext(request)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def course_listing(request):\r\n if GlobalStaff().has_user(request.user):\r\n # user has global access so no need to get courses from django groups\r\n courses = _accessible_courses_list(request)\r\n else:\r\n try:\r\n courses = _accessible_courses_list_from_groups(request)\r\n except ItemNotFoundError:\r\n # user have some old groups or there was some error getting courses from django groups\r\n # so fallback to iterating through all courses\r\n courses = _accessible_courses_list(request)\r\n\r\n def format_course_for_view(course):\r\n \"\"\"\r\n return tuple of the data which the view requires for each course\r\n \"\"\"\r\n return (\r\n course.display_name,\r\n reverse_course_url('course_handler', course.id),\r\n get_lms_link_for_item(course.location),\r\n course.display_org_with_default,\r\n course.display_number_with_default,\r\n course.location.name\r\n )\r\n\r\n return render_to_response('index.html', {\r\n 'courses': [format_course_for_view(c) for c in courses if not isinstance(c, ErrorDescriptor)],\r\n 'user': request.user,\r\n 'request_course_creator_url': reverse('contentstore.views.request_course_creator'),\r\n 'course_creator_status': _get_course_creator_status(request.user),\r\n 'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False)\r\n })",
"def list_course(request, template=\"core/list_course.html\"):\n response = {\n 'morning': Course.objects.at_morning(),\n 'afternoon': Course.objects.at_afternoon(),\n }\n return direct_to_template(request, template, response)",
"def courses(request):\r\n courses = get_courses(request.user, request.META.get('HTTP_HOST'))\r\n courses = sort_by_announcement(courses)\r\n\r\n return render_to_response(\"courseware/courses.html\", {'courses': courses})",
"def getCoursesList(self, pageSize=100):\n results = self.service.courses().list(pageSize=pageSize).execute()\n self.courses = results.get('courses', [])\n if not self.courses:\n return []\n return self.courses # Might not have to return self.courses, but it's useful for now",
"def get_courses(self):\r\n\r\n return self.def_ms.get_courses()",
"def view_all_courses(request, username):\n if request.method == 'GET':\n\n # if user log in \n try:\n user = User.objects.get(username=username)\n if ensure_login(user) == False:\n return JsonResponse({'login': 'User must login'}, status=403) \n except:\n return JsonResponse({'login': 'User must login'}, status=403)\n\n if user.is_staff:\n courses = courseQuerySetSerializer(user.created_courses.all())\n else:\n courses = courseQuerySetSerializer(user.enrolled_courses.all())\n\n if courses is None:\n return JsonResponse({'error': 'No courses to view'}, status=404)\n \n return JsonResponse({'success': True, 'courses': courses}, status=200) # each course_code should be stored in data-course attribte inorder to grap it when perfoming actions on a speific course\n else:\n return JsonResponse({'error': 'Method not allowed'}, status=405)",
"def index(request):\n # TODO: Use django.views.generic.ListView\n courses = Course.objects.order_by('-name')\n context = {\n 'courses': courses,\n }\n return render(request, 'hall_pass/index.html', context)",
"def index():\n user, user_id = get_user()\n # Get this user's course with their roles\n my_courses = []\n if user:\n my_courses = user.get_courses()\n # Get all public courses\n public_courses = Course.get_public()\n\n return render_template('courses/index.html',\n user=user,\n my_courses=my_courses,\n public_courses=public_courses)",
"def get_courses(db: Session = Depends(get_db)): # , _: models.User = Depends(get_current_user))\n return crud.course.get_multi(db, skip=0, limit=100)",
"def get_courses(self):\n\n self.search([]).unlink()\n token = self.env['odoo.moodle'].search([('create_uid', '=', self.env.user.id)]).token\n domain = \"http://localhost:8888\"\n webservice_url = \"/webservice/rest/server.php?\"\n parameters = {\n \"wstoken\":token,\n 'wsfunction': 'core_course_get_courses',\n 'moodlewsrestformat': 'json'\n }\n request = requests.get(url=domain+webservice_url, params=parameters)\n request = request.json()\n print(request)\n\n for req in request:\n try:\n if req['id']==1:\n pass\n else:\n self.create({\n 'course_id': req['id'], \n 'category':req['categoryid'],\n 'fullname':req['fullname'], \n 'shortname':req['shortname'],\n 'summary': req['summary']\n }\n )\n except Exception:\n print('Course not created')",
"def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)",
"def get_courses():\n courses = []\n courses_recs = Course._file.read_db()\n for course in courses_recs[\"courses\"]:\n courses.append(Course(**course))\n return courses",
"def show_courses_and_profs(request):\r\n\tcourses = course_views.all_courses(request)\r\n\treturn render(request, 'templates/browse.html', {'courses': courses})",
"def _accessible_courses_list(request):\r\n courses = modulestore('direct').get_courses()\r\n\r\n # filter out courses that we don't have access to\r\n def course_filter(course):\r\n \"\"\"\r\n Get courses to which this user has access\r\n \"\"\"\r\n if GlobalStaff().has_user(request.user):\r\n return course.location.course != 'templates'\r\n\r\n return (has_course_access(request.user, course.id)\r\n # pylint: disable=fixme\r\n # TODO remove this condition when templates purged from db\r\n and course.location.course != 'templates'\r\n )\r\n courses = filter(course_filter, courses)\r\n return courses",
"def extract_courses():\n if settings.XPRO_COURSES_API_URL:\n return requests.get(settings.XPRO_COURSES_API_URL, timeout=20).json()\n return []",
"def search_courses():\n current_user = view_helpers.get_current_user()\n courses, has_more = m.Course.search(flask.request.values, current_user)\n\n course_dicts, user_course_dicts, _ = (\n m.Course.get_course_and_user_course_dicts(courses, current_user))\n\n return api_util.jsonify({\n 'courses': course_dicts,\n 'user_courses': user_course_dicts,\n 'has_more': has_more,\n })",
"def get_courses(self, *args):\n courses = []\n user = self.context['user']\n modules = user.profile.purchased_modules.all()\n for module in modules:\n course_id = self.course_in_courses(module.course.mnemo, courses)\n if course_id:\n courses[course_id[0]]['modules'].append({'mnemo': module.mnemo})\n else:\n courses.append({\n 'mnemo': module.course.mnemo,\n 'modules': [{'mnemo': module.mnemo}]\n })\n return courses",
"def get_courses_for_wiki(self, wiki_slug):\r\n courses = []\r\n return courses",
"def getUserCoursesList(self, chat_id):\n\t\tcommand = \"SELECT ID, name, description FROM courses WHERE author_id=?;\"\n\t\tparams = (chat_id,)\n\n\t\tdata = self._run_command(command, params)\n\n\t\tif not data:\n\t\t\treturn None\n\n\t\tresult = []\n\t\tfor i in data:\n\t\t\tresult.append({\"ID\": i[0], \n\t\t\t\t\"name\": i[1],\n\t\t\t\t\"description\": i[2] if i[2] else \"\",\n\t\t\t\t})\n\n\t\treturn result",
"def get(self, request):\r\n\r\n if not request.user.is_staff:\r\n raise Http404\r\n data = []\r\n\r\n for course in self.get_courses(): # pylint: disable=unused-variable\r\n datum = [course.display_name, course.id]\r\n datum += [CourseEnrollment.objects.filter(\r\n course_id=course.id).count()]\r\n datum += [CourseStaffRole(course.id).users_with_role().count()]\r\n datum += [','.join([x.username for x in CourseInstructorRole(\r\n course.id).users_with_role()])]\r\n data.append(datum)\r\n\r\n datatable = dict(header=[_('Course Name'), _('course_id'),\r\n _('# enrolled'), _('# staff'),\r\n _('instructors')],\r\n title=_('Enrollment information for all courses'),\r\n data=data)\r\n context = {\r\n 'datatable': datatable,\r\n 'msg': self.msg,\r\n 'djangopid': os.getpid(),\r\n 'modeflag': {'staffing': 'active-section'},\r\n 'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),\r\n }\r\n return render_to_response(self.template_name, context)",
"def get_courses(self, depth=0):\r\n return self.courses.values()",
"def retrieve_courses(self) -> pd.DataFrame:\n if self.courses_df is None:\n self.courses_df = pd.read_sql_query('SELECT * FROM courses', con=self.connection())\n\n return self.courses_df",
"def test_get_course_list(self):\r\n request = self.factory.get('/course/')\r\n request.user = self.user\r\n\r\n course_location = SlashSeparatedCourseKey('Org1', 'Course1', 'Run1')\r\n self._create_course_with_access_groups(course_location, self.user)\r\n\r\n # get courses through iterating all courses\r\n courses_list = _accessible_courses_list(request)\r\n self.assertEqual(len(courses_list), 1)\r\n\r\n # get courses by reversing group name formats\r\n courses_list_by_groups = _accessible_courses_list_from_groups(request)\r\n self.assertEqual(len(courses_list_by_groups), 1)\r\n # check both course lists have same courses\r\n self.assertEqual(courses_list, courses_list_by_groups)",
"def course_index(request, course_key):\r\n course_module = _get_course_module(course_key, request.user, depth=3)\r\n lms_link = get_lms_link_for_item(course_module.location)\r\n sections = course_module.get_children()\r\n\r\n\r\n return render_to_response('overview.html', {\r\n 'context_course': course_module,\r\n 'lms_link': lms_link,\r\n 'sections': sections,\r\n 'course_graders': json.dumps(\r\n CourseGradingModel.fetch(course_key).graders\r\n ),\r\n 'new_section_category': 'chapter',\r\n 'new_subsection_category': 'sequential',\r\n 'new_unit_category': 'vertical',\r\n 'category': 'vertical'\r\n })",
"def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses",
"def available_courses(self):\r\n def _get_course_name(el):\r\n # The first component in the link text is the course number\r\n _, course_name = el.text.split(' ', 1)\r\n return course_name\r\n\r\n return self.q(css='section.info > hgroup > h3 > a').map(_get_course_name).results",
"def index(request, extra_context={}, user=AnonymousUser()):\r\n\r\n # The course selection work is done in courseware.courses.\r\n domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False\r\n # do explicit check, because domain=None is valid\r\n if domain is False:\r\n domain = request.META.get('HTTP_HOST')\r\n\r\n courses = get_courses(user, domain=domain)\r\n courses = sort_by_announcement(courses)\r\n\r\n context = {'courses': courses}\r\n\r\n context.update(extra_context)\r\n return render_to_response('index.html', context)",
"def get_courses(self):\n if not self.is_course_based_activity():\n raise IllegalState()\n else:\n raise Unimplemented()",
"def get_courses_html():\r\n r = requests.get(URL_CS_ALL_REQ)\r\n if r.status_code == 200:\r\n return r.text\r\n else:\r\n return None",
"def GetCourses(firebase: firebase) -> None:\n\n global courses\n obj_key_list = []\n\n result = firebase.get('/course', None)\n\n if result is None:\n return\n\n for i in result.keys():\n obj_key_list.append(i)\n\n for i in obj_key_list:\n course = Course()\n course.setId(i)\n course.setKnowledgeAreaId(result[i]['knowledgeareaid'])\n course.setCatalogId(result[i]['catalogid'])\n course.setTitle(result[i]['name'])\n course.setDescription(result[i]['description'])\n course.setInstructor(result[i]['instructor'])\n course.setFee(result[i]['fee'])\n courses.append(course)"
] | [
"0.81252503",
"0.7970566",
"0.7938835",
"0.7740521",
"0.75059247",
"0.74730605",
"0.747094",
"0.7467073",
"0.7453179",
"0.7405879",
"0.7360561",
"0.72404325",
"0.70759976",
"0.7071681",
"0.70332766",
"0.70184034",
"0.6910547",
"0.6848491",
"0.682735",
"0.6808897",
"0.6771157",
"0.6758471",
"0.67448324",
"0.6678707",
"0.6677386",
"0.6662227",
"0.66555995",
"0.6651526",
"0.6618716",
"0.66128343"
] | 0.83171123 | 0 |
Get all user ids based on name of folders under "public_dataset/" | def get_user_ids() -> List[str]:
listOfFiles = os.listdir('public_dataset')
listOfFiles.remove('data_description.pdf')
try:
listOfFiles.remove('.DS_Store')
except:
pass
return listOfFiles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_session_ids(user_id: str) -> List[str]:\n listOfSessions = os.listdir('public_dataset/'+user_id)\n try:\n listOfSessions.remove('.DS_Store')\n except:\n pass\n return listOfSessions",
"def local_user_ids(steam):\n if steam is None:\n return None\n # The userdata directory, at the top level, just contains a single\n # subdirectory for every user which has logged into this system (and\n # therefore that Steam has data for)\n return os.listdir(steam.userdata_directory)",
"def get_user_session_ids_for_task(user_id: str, task_name: str) -> List[str]:\n listOfSessions = os.listdir('Plots/Research/'+user_id+'/'+task_name)\n try:\n listOfSessions.remove('.DS_Store')\n except:\n pass\n return listOfSessions",
"def user_ids(self):\r\n raise NotImplementedError",
"def get_uids():\n DB_NAME = 'cloud_storage.db'\n DB_DIRECTORY = 'server_side_storage/'\n db = sqlite3.connect('{}{}'.format(DB_DIRECTORY, DB_NAME))\n cursor = db.cursor()\n cursor.execute(\"SELECT uid FROM user_ids\")\n all_uids = cursor.fetchall()\n db.commit()\n cursor.close()\n db.close()\n all_uids = list(itertools.chain(*all_uids))\n return all_uids",
"def get_clusters() -> List[List[str]]:\n all_users = get_user_ids()\n pass",
"def get_user_folders_dict(user_id):\n return { folder['full_name'] : folder['id'] for folder in canvas_api.pull_folders(user_id) }",
"def get_dataset_ids(clean_folder):\n files = os.listdir(clean_folder)\n\n datasets = list(set([i.split('.')[0] for i in files]))\n\n return [d for d in datasets if d + '.otu_table.clean.feather' in files and d + '.metadata.clean.feather' in files]",
"def user_ids(self):\n return list(self.get_users())",
"def get_user_ids(session, access_token):\n endpoint = \"https://graph.microsoft.com/v1.0/users?$select=id\"\n r = session.get(endpoint, headers={\"Authorization\": \"Bearer \" + access_token})\n response = json.loads(r.text)\n return response[\"value\"]",
"def list_users_in_pool():\n files = []\n USERS_DIR = os.path.join(UPLOAD_DIRECTORY, \"users\")\n for filename in os.listdir(USERS_DIR):\n path = os.path.join(USERS_DIR, filename)\n if os.path.isdir(path):\n files.append(filename)\n return jsonify(files)",
"def _get_ids_from_name_public(self, name):\r\n results = self.list_public_images(name=name)\r\n return [result['id'] for result in results]",
"def get_all_users():",
"def getInterestedUsers():",
"def get_user_list(dataset):\n res = dataset\\\n .map(lambda x: x[0])\\\n .collect()\n return list(set(res))",
"def get_user_folders(user):\n folders = Folder.objects.filter(user=user)\n return folders",
"def getIds(self) -> List[int]:\n return list(self.users.keys())",
"def _user_in_subid(self, sub_file, wanted_user):\n subid_list = []\n if self.passwd_file:\n (user, dum1, dum2, dum3, dum4, dum5) = \\\n self._get_user_from_file(wanted_user)\n else:\n (user, dum1, dum2, dum3, dum4, dum5) = \\\n self._get_user_from_host(wanted_user)\n try:\n insub = open(sub_file)\n except (IOError, OSError):\n return []\n else:\n for line in insub:\n try:\n (subuser, subid, count) = line.strip().split(':')\n except ValueError:\n continue\n if subuser == user:\n subid_list.extend([(subid, count), ])\n insub.close()\n return subid_list",
"def user_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"user_ids\")",
"def get_id_users(self):\n return self.execute(TABELLE['id_users']['select']['all'])",
"def fusion_api_get_directory_users(self, body, api=None, headers=None):\n return self.logindomain.users(body, api, headers)",
"def _get_all_dir_ids(site_id):\n result = None\n querystring = 'select id from {} where site_id = %s;'.format(TABLES[2])\n res = execute_query(querystring, (site_id,))\n if res:\n result = [x[0] for x in res]\n return result",
"def _get_ids_from_name_private(self, name):\r\n results = self.list_private_images(name=name)\r\n return [result['id'] for result in results]",
"def get_ids(self) -> List[str]:",
"def get_shared_users(self, dataset_id: str) -> List[str]:\n\n access_configs: List[\n SharedAccessConfigData\n ] = self._collaboration_api.get_shared_access_configs_by_dataset_id(\n dataset_id=dataset_id\n )\n user_emails = []\n\n # iterate through configs and find first WRITE config\n # we use the same hard rule in the frontend to communicate with the API\n # as we currently only support WRITE access\n for access_config in access_configs:\n if access_config.access_type == SharedAccessType.WRITE:\n user_emails.extend(access_config.users)\n break\n\n return user_emails",
"def getUserIds(self):\n raise BorkedGetUserIds",
"def _get_users_list(self):\n return self.users['user_id'].tolist()",
"def _get_user_ids(model):\n return model.objects.values_list(\"user\", flat=True).distinct(\"user\")",
"def list_users(self, stream_name:str, version:int=1)->List[str]:\n stream_path = self._get_storage_path(stream_name=stream_name, version=version)\n all_users = self._ls_dir(stream_name=stream_name, version=version)\n user_ids = []\n for usr in all_users:\n user_ids.append(usr.replace(stream_path,\"\").replace(\"user=\",\"\").replace(\"study=\"+self.study_name, \"\"))\n return user_ids",
"def get_user_ids():\n TOTAL_USERS = 50\n return list(numpy.random.choice(\n TOTAL_USERS, random.randint(1, TOTAL_USERS), replace=False\n ))"
] | [
"0.73738235",
"0.7166007",
"0.6606021",
"0.6443705",
"0.6379112",
"0.63501024",
"0.6274118",
"0.62501055",
"0.62358475",
"0.61600703",
"0.61323065",
"0.6101705",
"0.6071845",
"0.6023387",
"0.6020886",
"0.6019847",
"0.6011264",
"0.5994475",
"0.5981317",
"0.59687907",
"0.5893183",
"0.58896255",
"0.5881904",
"0.588013",
"0.58772665",
"0.5871288",
"0.58417124",
"0.58098656",
"0.5801267",
"0.57523715"
] | 0.763626 | 0 |
Get all session ids for a specific user based on folder structure e.g. "public_dataset/100669/100669_session_13" has user_id=100669, session_id=13 | def get_user_session_ids(user_id: str) -> List[str]:
listOfSessions = os.listdir('public_dataset/'+user_id)
try:
listOfSessions.remove('.DS_Store')
except:
pass
return listOfSessions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_session_ids_for_task(user_id: str, task_name: str) -> List[str]:\n listOfSessions = os.listdir('Plots/Research/'+user_id+'/'+task_name)\n try:\n listOfSessions.remove('.DS_Store')\n except:\n pass\n return listOfSessions",
"def local_user_ids(steam):\n if steam is None:\n return None\n # The userdata directory, at the top level, just contains a single\n # subdirectory for every user which has logged into this system (and\n # therefore that Steam has data for)\n return os.listdir(steam.userdata_directory)",
"def get_user_folders_dict(user_id):\n return { folder['full_name'] : folder['id'] for folder in canvas_api.pull_folders(user_id) }",
"def get_user_folders(user):\n folders = Folder.objects.filter(user=user)\n return folders",
"def get_user_ids() -> List[str]:\n listOfFiles = os.listdir('public_dataset')\n listOfFiles.remove('data_description.pdf')\n try:\n listOfFiles.remove('.DS_Store')\n except:\n pass\n return listOfFiles",
"def build_rawdata(user_id):\n path = 'wattbikesessions/'\n files = os.listdir(path+user_id+'/')\n rawdata = []\n for file in files:\n try:\n rawdata.append(pd.read_pickle(path + file))\n except Exception:\n print('Could not load:',file)\n continue\n return preprocess.load_session_data(rawdata)",
"def _user_in_subid(self, sub_file, wanted_user):\n subid_list = []\n if self.passwd_file:\n (user, dum1, dum2, dum3, dum4, dum5) = \\\n self._get_user_from_file(wanted_user)\n else:\n (user, dum1, dum2, dum3, dum4, dum5) = \\\n self._get_user_from_host(wanted_user)\n try:\n insub = open(sub_file)\n except (IOError, OSError):\n return []\n else:\n for line in insub:\n try:\n (subuser, subid, count) = line.strip().split(':')\n except ValueError:\n continue\n if subuser == user:\n subid_list.extend([(subid, count), ])\n insub.close()\n return subid_list",
"def get_user_ids(session, access_token):\n endpoint = \"https://graph.microsoft.com/v1.0/users?$select=id\"\n r = session.get(endpoint, headers={\"Authorization\": \"Bearer \" + access_token})\n response = json.loads(r.text)\n return response[\"value\"]",
"def readUserSession(datafile):\n for line in datafile:\n pages = line.split()\n total = len(pages)\n # Select user sessions with 2 or more pages\n if total < 2:\n continue\n\n # Exclude outliers by removing extreme long sessions\n if total > 500:\n continue\n\n return [PAGE_CATEGORIES[int(i) - 1] for i in pages]\n return []",
"def get_sub_folders(session, ds_browser, ds_path):\n search_task = session._call_method(\n session._get_vim(),\n \"SearchDatastore_Task\",\n ds_browser,\n datastorePath=ds_path)\n try:\n task_info = session._wait_for_task(search_task)\n except error_util.FileNotFoundException:\n return set()\n # populate the folder entries\n if hasattr(task_info.result, 'file'):\n return set([file.path for file in task_info.result.file])\n return set()",
"def user_ids(self):\r\n raise NotImplementedError",
"def get_user_session_data(user_id: str, user_session_id: str) -> DataFrame:\n activity_df = read_file(user_id, user_session_id, 'Activity.csv')\n accel_df = read_file(user_id, user_session_id, 'Accelerometer.csv')\n gyro_df = read_file(user_id, user_session_id, 'Gyroscope.csv')\n\n measurements_df = accel_df.join(gyro_df, lsuffix = '_accel', rsuffix = '_gyro')\n full_df = measurements_df.join(activity_df.set_index('ID'), on='ActivityID' + '_accel')\n full_df = full_df.dropna().reset_index(drop = True)\n\n return full_df",
"def wishlist_sessions(self, user):\n wishlist_key = self.get_wishlist_key(user)\n session_keys = [ndb.Key(urlsafe=wsck) for wsck in\n wishlist_key.get().sessionKeys]\n sessions = ndb.get_multi(session_keys)\n return sessions",
"def get_clusters() -> List[List[str]]:\n all_users = get_user_ids()\n pass",
"def _create_session_data(self, abs_path, sess_root):\n sess_path = os.path.join(abs_path, sess_root)\n if not os.path.exists(sess_path):\n os.makedirs(sess_path)\n sess_id = len(os.listdir(sess_path))\n sess_path = os.path.join(sess_path, str(sess_id))\n print(\"SESSION PATH:\", sess_path)\n print(\"SESSION ID:\", sess_id) \n return sess_id, sess_path",
"def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n q = Queue()\n q.put([userID])\n\n while not q.empty():\n user_path = q.get()\n user = user_path[-1]\n\n if user not in visited.keys():\n visited[user] = user_path\n for friend in self.friendships[user]:\n new_path = user_path[::]\n new_path.append(friend)\n q.put(new_path)\n\n # get average degree of separation per user\n degrees = 0\n for key, item in visited.items():\n degrees += len(item)\n \n print('average degree of separation', degrees/len(self.users))\n\n return visited",
"def get_user_sessions(base_url, group_id, token, user_id):\n url = base_url + route_user_sessions.format(user_id=user_id)\n response = requests.get(url, headers=headers(group_id, token))\n return response",
"def get_all_user_meter_ids(session):\n\n return [meter_id[0] for meter_id in session.query(User.meter_id).all()]",
"def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n print(f\"user ID {userID}\")\n\n for i in range(1, len(self.users)):\n visited[i] = self.bfs(userID, i)\n\n return visited",
"def write_all_users(folder_name: str, label: bool):\n make_directory(folder_name)\n for user in get_user_ids():\n print(\"Analysis of user: \" + user)\n subfolder_name = folder_name + \"/\" + user\n make_directory(subfolder_name)\n for session in get_user_session_ids(user):\n print(\"Session: \" + session)\n file_name = subfolder_name + \"/\" + session + \".csv\"\n data = get_feature_vector(user, session)\n if data == None:\n continue\n if label:\n data = [labels] + data\n write_to_csv(data, file_name)",
"def transform_to_sessions(events, uid):\n \n class Sessions:\n values = {\n 'id': [], 'size_ts': [],\n 'interruptions': [], 'n_inte': [], 'n_events': [], 'start_time': [], 'end_time': [],\n 'user': [], 'edition': [], 'text_nav': [], 'high_nav': [], 'file': [], 'refactoring': [],\n 'clean_build': [], 'debug': [], 'tools': [], 'control': [], 'testing': [], 'search': []\n\n }\n\n lists = {\n 'l_interruptions': [], 'l_edition': [], 'l_text_nav': [], 'l_high_nav': [], 'l_file': [],\n 'l_refactoring': [], 'l_clean_build': [], 'l_debug': [], 'l_tools': [], 'l_control': [],\n 'l_testing': [], 'l_search': [], 'size_ts': [], 'n_inte': [], 'id': []\n }\n\n def __init__(self):\n pass\n\n def create_dataframe(self):\n \"\"\"\n Creates a dataframe with the sessions of the user\n \"\"\"\n sessions = pandas.DataFrame().from_dict(self.values)\n sessions_lists = pandas.DataFrame().from_dict(self.lists)\n return sessions, sessions_lists\n \n so = Sessions()\n\n s = set(events['session_id'])\n\n count = 0 # count of sessions\n\n # process all the sessions of the user\n for i in s:\n user_sessions = events[events['session_id'] == i]\n #user_sessions.to_csv(PATH_SESSIONS + '/' + str(uid) + '-' + str(count) + '.csv', index=False)\n\n split_session = [user_sessions]\n\n # if breaking points are found, the session splits into subsessions\n bp = break_points(user_sessions['interval'], user_sessions['minute'])\n if len(bp) > 0:\n bp.append(len(user_sessions))\n split_session = []\n index = 0\n for k in range(0,len(bp)):\n sub_session = user_sessions.iloc[index:(bp[k]+1)]\n split_session.append(sub_session)\n index = bp[k]+1\n\n count += 1\n\n for session in split_session:\n\n minutes = np.unique(np.array(session['minute']))\n l_edits = []\n l_text_nav = []\n l_high_nav = []\n l_refactoring = []\n l_debug = []\n l_file = []\n l_build = []\n l_tools = []\n l_control = []\n l_testing = []\n l_search = []\n l_inte = []\n dur = len(minutes)\n ni = 0\n\n for m in minutes:\n # for every minute of the session, count the number of edition and\n # selection events and the duration of the interruptions (among other types of events)\n subset = session[session['minute'] == m]\n l_edits.append(len(subset[subset['detailed_type'] == 'edit-text']))\n l_high_nav.append(len(subset[subset['detailed_type'] == 'high-nav']))\n l_text_nav.append(len(subset[subset['detailed_type'] == 'text-nav']))\n l_refactoring.append(len(subset[subset['detailed_type'] == 'refactoring']))\n l_debug.append(len(subset[subset['detailed_type'] == 'debug']))\n l_file.append(len(subset[subset['detailed_type'] == 'file']))\n l_build.append(len(subset[subset['detailed_type'] == 'clean-build']))\n l_tools.append(len(subset[subset['detailed_type'] == 'tools']))\n l_control.append(len(subset[subset['detailed_type'] == 'control']))\n l_testing.append(len(subset[subset['detailed_type'] == 'testing']))\n l_search.append(len(subset[subset['detailed_type'] == 'search']))\n\n if subset.iloc[len(subset)-1]['is_interruption']:\n l_inte.append(subset.iloc[len(subset)-1]['interval']//60)\n ni += 1\n else:\n l_inte.append(0)\n\n so.values['id'].append(str(uid) + '-' + str(count))\n so.lists['id'].append(str(uid) + '-' + str(count))\n # print (str(uid) + '-' + str(count))\n so.values['size_ts'].append(dur)\n so.lists['size_ts'].append(dur)\n\n # convert the lists to string\n str_edits = ' '.join(str(e) for e in l_edits)\n str_high_nav = ' '.join(str(e) for e in l_high_nav)\n str_text_nav = ' '.join(str(e) for e in l_text_nav)\n str_refactoring = ' '.join(str(e) for e in l_refactoring)\n str_debug = ' '.join(str(e) for e in l_debug)\n str_file = ' '.join(str(e) for e in l_file)\n str_build = ' '.join(str(e) for e in l_build)\n str_tools = ' '.join(str(e) for e in l_tools)\n str_control = ' '.join(str(e) for e in l_control)\n str_testing = ' '.join(str(e) for e in l_testing)\n str_search = ' '.join(str(e) for e in l_search)\n str_inte = ' '.join(str(e) for e in l_inte)\n\n so.values['edition'].append(str_edits)\n so.values['high_nav'].append(str_high_nav)\n so.values['text_nav'].append(str_text_nav)\n so.values['refactoring'].append(str_refactoring)\n so.values['debug'].append(str_debug)\n so.values['file'].append(str_file)\n so.values['clean_build'].append(str_build)\n so.values['tools'].append(str_tools)\n so.values['control'].append(str_control)\n so.values['testing'].append(str_testing)\n so.values['search'].append(str_search)\n so.values['interruptions'].append(str_inte)\n\n so.lists['l_interruptions'].append(l_inte)\n so.lists['l_edition'].append(l_edits)\n so.lists['l_high_nav'].append(l_high_nav)\n so.lists['l_text_nav'].append(l_text_nav)\n so.lists['l_refactoring'].append(l_refactoring)\n so.lists['l_debug'].append(l_debug)\n so.lists['l_file'].append(l_file)\n so.lists['l_clean_build'].append(l_build)\n so.lists['l_tools'].append(l_tools)\n so.lists['l_control'].append(l_control)\n so.lists['l_testing'].append(l_testing)\n so.lists['l_search'].append(l_search)\n so.lists['n_inte'].append(ni)\n \n so.values['n_inte'].append(ni)\n so.values['start_time'].append(session.iloc[0]['datetime'])\n so.values['end_time'].append(session.iloc[len(session) - 1]['datetime'])\n so.values['user'].append(session.iloc[0][0])\n so.values['n_events'].append(len(session[session['detailed_type'] != ' ']))\n\n result, lists = so.create_dataframe()\n return result, lists",
"def user_ids(self):\n return list(self.get_users())",
"def get_folder_ids(root_folder_id):\n creds = authenticate()\n service = build('drive', 'v3', credentials=creds)\n\n page_token = None\n response = service.files().list(q=\"mimeType = 'application/vnd.google-apps.folder' and '{}' in parents and trashed = false\".format(root_folder_id),\n pageSize=10,\n spaces=\"drive\",\n fields='nextPageToken, files(id, name, webViewLink)',\n pageToken=page_token,\n\n ).execute()\n found_ids = [response[\"files\"][i][\"id\"] for i in range(0, len(response[\"files\"]))] # extract all the IDs\n found_names = [response[\"files\"][i][\"name\"] for i in range(0, len(response[\"files\"]))]\n found_ids.append(root_folder_id) # if the Photos folder(as named on Google Drive) just contains folders, this is not needed\n\n return found_ids, found_names",
"def list_users(self, stream_name:str, version:int=1)->List[str]:\n stream_path = self._get_storage_path(stream_name=stream_name, version=version)\n all_users = self._ls_dir(stream_name=stream_name, version=version)\n user_ids = []\n for usr in all_users:\n user_ids.append(usr.replace(stream_path,\"\").replace(\"user=\",\"\").replace(\"study=\"+self.study_name, \"\"))\n return user_ids",
"def _get_images_and_labels(self, path: str, user_id: int):\n\n image_paths = [os.path.join(path, f) for f in os.listdir(path)]\n face_samples = []\n ids = []\n\n for imagePath in image_paths:\n\n pil_image = Image.open(imagePath).convert('L') # convert it to grayscale\n img_numpy = np.array(pil_image, 'uint8')\n\n faces = self.detector.detectMultiScale(img_numpy)\n\n for (x, y, w, h) in faces:\n face_samples.append(img_numpy[y:y + h, x:x + w])\n ids.append(user_id)\n\n return face_samples, ids",
"def getDashboardsForUser(user):\n dashboards = Dashboard.objects(Q(analystId=user.id) | Q(isPublic=True))\n parents = []\n userDashboards = []\n #get all id's of parent dashboards\n for dash in dashboards:\n if dash.parent:\n parents.append(dash.parent)\n #remove any parent from the list to prevent duplicate dashboards\n for dash in dashboards:\n if not dash.id in parents:\n userDashboards.append(dash)\n return userDashboards",
"async def get_storage_locations(user_id: UserID):",
"def getSessionByUsername(self, username):\n match = []\n for session in self.sessions:\n if (session.identifier[1] == username):\n match.append(session)\n return match",
"def _get_all_dir_ids(site_id):\n result = None\n querystring = 'select id from {} where site_id = %s;'.format(TABLES[2])\n res = execute_query(querystring, (site_id,))\n if res:\n result = [x[0] for x in res]\n return result",
"def load_session(self, user_id):\n ukey = self.r_key('session', user_id)\n return self.r_server.hgetall(ukey)"
] | [
"0.7234155",
"0.64891356",
"0.6317786",
"0.623503",
"0.60883653",
"0.6014238",
"0.59397954",
"0.5935082",
"0.57786095",
"0.5704666",
"0.5618625",
"0.56115544",
"0.55442506",
"0.553197",
"0.5504939",
"0.54119486",
"0.53995836",
"0.5365967",
"0.53602356",
"0.53527534",
"0.5340456",
"0.5330198",
"0.5288203",
"0.5286841",
"0.52862555",
"0.52782524",
"0.52702016",
"0.5263222",
"0.5248504",
"0.5244204"
] | 0.800213 | 0 |
Get all session ids for a specific user and task based on folder structure e.g. "public_dataset/100669/100669_session_13" has user_id=100669, session_id=13 | def get_user_session_ids_for_task(user_id: str, task_name: str) -> List[str]:
listOfSessions = os.listdir('Plots/Research/'+user_id+'/'+task_name)
try:
listOfSessions.remove('.DS_Store')
except:
pass
return listOfSessions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_user_session_ids(user_id: str) -> List[str]:\n listOfSessions = os.listdir('public_dataset/'+user_id)\n try:\n listOfSessions.remove('.DS_Store')\n except:\n pass\n return listOfSessions",
"def local_user_ids(steam):\n if steam is None:\n return None\n # The userdata directory, at the top level, just contains a single\n # subdirectory for every user which has logged into this system (and\n # therefore that Steam has data for)\n return os.listdir(steam.userdata_directory)",
"def get_sub_folders(session, ds_browser, ds_path):\n search_task = session._call_method(\n session._get_vim(),\n \"SearchDatastore_Task\",\n ds_browser,\n datastorePath=ds_path)\n try:\n task_info = session._wait_for_task(search_task)\n except error_util.FileNotFoundException:\n return set()\n # populate the folder entries\n if hasattr(task_info.result, 'file'):\n return set([file.path for file in task_info.result.file])\n return set()",
"def get_user_folders_dict(user_id):\n return { folder['full_name'] : folder['id'] for folder in canvas_api.pull_folders(user_id) }",
"def _create_session_data(self, abs_path, sess_root):\n sess_path = os.path.join(abs_path, sess_root)\n if not os.path.exists(sess_path):\n os.makedirs(sess_path)\n sess_id = len(os.listdir(sess_path))\n sess_path = os.path.join(sess_path, str(sess_id))\n print(\"SESSION PATH:\", sess_path)\n print(\"SESSION ID:\", sess_id) \n return sess_id, sess_path",
"def get_user_ids(session, access_token):\n endpoint = \"https://graph.microsoft.com/v1.0/users?$select=id\"\n r = session.get(endpoint, headers={\"Authorization\": \"Bearer \" + access_token})\n response = json.loads(r.text)\n return response[\"value\"]",
"def get_user_ids() -> List[str]:\n listOfFiles = os.listdir('public_dataset')\n listOfFiles.remove('data_description.pdf')\n try:\n listOfFiles.remove('.DS_Store')\n except:\n pass\n return listOfFiles",
"def build_rawdata(user_id):\n path = 'wattbikesessions/'\n files = os.listdir(path+user_id+'/')\n rawdata = []\n for file in files:\n try:\n rawdata.append(pd.read_pickle(path + file))\n except Exception:\n print('Could not load:',file)\n continue\n return preprocess.load_session_data(rawdata)",
"def transform_to_sessions(events, uid):\n \n class Sessions:\n values = {\n 'id': [], 'size_ts': [],\n 'interruptions': [], 'n_inte': [], 'n_events': [], 'start_time': [], 'end_time': [],\n 'user': [], 'edition': [], 'text_nav': [], 'high_nav': [], 'file': [], 'refactoring': [],\n 'clean_build': [], 'debug': [], 'tools': [], 'control': [], 'testing': [], 'search': []\n\n }\n\n lists = {\n 'l_interruptions': [], 'l_edition': [], 'l_text_nav': [], 'l_high_nav': [], 'l_file': [],\n 'l_refactoring': [], 'l_clean_build': [], 'l_debug': [], 'l_tools': [], 'l_control': [],\n 'l_testing': [], 'l_search': [], 'size_ts': [], 'n_inte': [], 'id': []\n }\n\n def __init__(self):\n pass\n\n def create_dataframe(self):\n \"\"\"\n Creates a dataframe with the sessions of the user\n \"\"\"\n sessions = pandas.DataFrame().from_dict(self.values)\n sessions_lists = pandas.DataFrame().from_dict(self.lists)\n return sessions, sessions_lists\n \n so = Sessions()\n\n s = set(events['session_id'])\n\n count = 0 # count of sessions\n\n # process all the sessions of the user\n for i in s:\n user_sessions = events[events['session_id'] == i]\n #user_sessions.to_csv(PATH_SESSIONS + '/' + str(uid) + '-' + str(count) + '.csv', index=False)\n\n split_session = [user_sessions]\n\n # if breaking points are found, the session splits into subsessions\n bp = break_points(user_sessions['interval'], user_sessions['minute'])\n if len(bp) > 0:\n bp.append(len(user_sessions))\n split_session = []\n index = 0\n for k in range(0,len(bp)):\n sub_session = user_sessions.iloc[index:(bp[k]+1)]\n split_session.append(sub_session)\n index = bp[k]+1\n\n count += 1\n\n for session in split_session:\n\n minutes = np.unique(np.array(session['minute']))\n l_edits = []\n l_text_nav = []\n l_high_nav = []\n l_refactoring = []\n l_debug = []\n l_file = []\n l_build = []\n l_tools = []\n l_control = []\n l_testing = []\n l_search = []\n l_inte = []\n dur = len(minutes)\n ni = 0\n\n for m in minutes:\n # for every minute of the session, count the number of edition and\n # selection events and the duration of the interruptions (among other types of events)\n subset = session[session['minute'] == m]\n l_edits.append(len(subset[subset['detailed_type'] == 'edit-text']))\n l_high_nav.append(len(subset[subset['detailed_type'] == 'high-nav']))\n l_text_nav.append(len(subset[subset['detailed_type'] == 'text-nav']))\n l_refactoring.append(len(subset[subset['detailed_type'] == 'refactoring']))\n l_debug.append(len(subset[subset['detailed_type'] == 'debug']))\n l_file.append(len(subset[subset['detailed_type'] == 'file']))\n l_build.append(len(subset[subset['detailed_type'] == 'clean-build']))\n l_tools.append(len(subset[subset['detailed_type'] == 'tools']))\n l_control.append(len(subset[subset['detailed_type'] == 'control']))\n l_testing.append(len(subset[subset['detailed_type'] == 'testing']))\n l_search.append(len(subset[subset['detailed_type'] == 'search']))\n\n if subset.iloc[len(subset)-1]['is_interruption']:\n l_inte.append(subset.iloc[len(subset)-1]['interval']//60)\n ni += 1\n else:\n l_inte.append(0)\n\n so.values['id'].append(str(uid) + '-' + str(count))\n so.lists['id'].append(str(uid) + '-' + str(count))\n # print (str(uid) + '-' + str(count))\n so.values['size_ts'].append(dur)\n so.lists['size_ts'].append(dur)\n\n # convert the lists to string\n str_edits = ' '.join(str(e) for e in l_edits)\n str_high_nav = ' '.join(str(e) for e in l_high_nav)\n str_text_nav = ' '.join(str(e) for e in l_text_nav)\n str_refactoring = ' '.join(str(e) for e in l_refactoring)\n str_debug = ' '.join(str(e) for e in l_debug)\n str_file = ' '.join(str(e) for e in l_file)\n str_build = ' '.join(str(e) for e in l_build)\n str_tools = ' '.join(str(e) for e in l_tools)\n str_control = ' '.join(str(e) for e in l_control)\n str_testing = ' '.join(str(e) for e in l_testing)\n str_search = ' '.join(str(e) for e in l_search)\n str_inte = ' '.join(str(e) for e in l_inte)\n\n so.values['edition'].append(str_edits)\n so.values['high_nav'].append(str_high_nav)\n so.values['text_nav'].append(str_text_nav)\n so.values['refactoring'].append(str_refactoring)\n so.values['debug'].append(str_debug)\n so.values['file'].append(str_file)\n so.values['clean_build'].append(str_build)\n so.values['tools'].append(str_tools)\n so.values['control'].append(str_control)\n so.values['testing'].append(str_testing)\n so.values['search'].append(str_search)\n so.values['interruptions'].append(str_inte)\n\n so.lists['l_interruptions'].append(l_inte)\n so.lists['l_edition'].append(l_edits)\n so.lists['l_high_nav'].append(l_high_nav)\n so.lists['l_text_nav'].append(l_text_nav)\n so.lists['l_refactoring'].append(l_refactoring)\n so.lists['l_debug'].append(l_debug)\n so.lists['l_file'].append(l_file)\n so.lists['l_clean_build'].append(l_build)\n so.lists['l_tools'].append(l_tools)\n so.lists['l_control'].append(l_control)\n so.lists['l_testing'].append(l_testing)\n so.lists['l_search'].append(l_search)\n so.lists['n_inte'].append(ni)\n \n so.values['n_inte'].append(ni)\n so.values['start_time'].append(session.iloc[0]['datetime'])\n so.values['end_time'].append(session.iloc[len(session) - 1]['datetime'])\n so.values['user'].append(session.iloc[0][0])\n so.values['n_events'].append(len(session[session['detailed_type'] != ' ']))\n\n result, lists = so.create_dataframe()\n return result, lists",
"def _user_in_subid(self, sub_file, wanted_user):\n subid_list = []\n if self.passwd_file:\n (user, dum1, dum2, dum3, dum4, dum5) = \\\n self._get_user_from_file(wanted_user)\n else:\n (user, dum1, dum2, dum3, dum4, dum5) = \\\n self._get_user_from_host(wanted_user)\n try:\n insub = open(sub_file)\n except (IOError, OSError):\n return []\n else:\n for line in insub:\n try:\n (subuser, subid, count) = line.strip().split(':')\n except ValueError:\n continue\n if subuser == user:\n subid_list.extend([(subid, count), ])\n insub.close()\n return subid_list",
"def get_user_folders(user):\n folders = Folder.objects.filter(user=user)\n return folders",
"def gen_static_session(self, set_choice, session_len, num_sessions):\r\n return_seq = []\r\n chosen_set = self.split_data[set_choice]\r\n\r\n for user_index, group in chosen_set.groupby('user_index'):\r\n group = group.reset_index(drop=True)\r\n start_i = group[(group.iloc[0]['datetime'] + pd.Timedelta(hours=session_len * num_sessions)) < group['datetime']]\r\n if start_i.shape[0] > 0:\r\n start_i = start_i.index[0]\r\n for i in range(start_i, group.shape[0]):\r\n session_diff = np.floor((group.iloc[i-1]['datetime'] - group['datetime']).apply(lambda x: x.total_seconds() / 60 / 60 / session_len))\r\n sessions = [group[session_diff == h] for h in range(num_sessions - 1, -1, -1)] # (num_sessions)\r\n return_seq.append([user_index,\r\n [session['poi_index'].to_list() for session in sessions],\r\n [session['timestamp'].to_list() for session in sessions],\r\n group.iloc[i]['poi_index'],\r\n len(sessions)])\r\n return return_seq",
"def gen_static_session(self, set_choice, session_len, num_sessions):\n return_seq = []\n chosen_set = self.split_data[set_choice]\n\n for user_index, group in chosen_set.groupby('user_index'):\n group = group.reset_index(drop=True)\n start_i = group[(group.iloc[0]['datetime'] + pd.Timedelta(hours=session_len * num_sessions)) < group['datetime']]\n if start_i.shape[0] > 0:\n start_i = start_i.index[0]\n for i in range(start_i, group.shape[0]):\n session_diff = np.floor((group.iloc[i-1]['datetime'] - group['datetime']).apply(lambda x: x.total_seconds() / 60 / 60 / session_len))\n sessions = [group[session_diff == h] for h in range(num_sessions - 1, -1, -1)] # (num_sessions)\n return_seq.append([user_index,\n [session['poi_index'].to_list() for session in sessions],\n [session['timestamp'].to_list() for session in sessions],\n group.iloc[i]['poi_index'],\n len(sessions)])\n return return_seq",
"def get_user_session_data(user_id: str, user_session_id: str) -> DataFrame:\n activity_df = read_file(user_id, user_session_id, 'Activity.csv')\n accel_df = read_file(user_id, user_session_id, 'Accelerometer.csv')\n gyro_df = read_file(user_id, user_session_id, 'Gyroscope.csv')\n\n measurements_df = accel_df.join(gyro_df, lsuffix = '_accel', rsuffix = '_gyro')\n full_df = measurements_df.join(activity_df.set_index('ID'), on='ActivityID' + '_accel')\n full_df = full_df.dropna().reset_index(drop = True)\n\n return full_df",
"def readUserSession(datafile):\n for line in datafile:\n pages = line.split()\n total = len(pages)\n # Select user sessions with 2 or more pages\n if total < 2:\n continue\n\n # Exclude outliers by removing extreme long sessions\n if total > 500:\n continue\n\n return [PAGE_CATEGORIES[int(i) - 1] for i in pages]\n return []",
"def get_session_ids(self):\n with self._sessions_lock:\n session_ids = self.sessions.keys()\n\n return session_ids",
"def get_user_sessions(base_url, group_id, token, user_id):\n url = base_url + route_user_sessions.format(user_id=user_id)\n response = requests.get(url, headers=headers(group_id, token))\n return response",
"def _input_data_sessions(self):\n subj_files = self._get_subject_files()\n\n # remove the numbers after the comma in this list\n files = np.array([f.split(',')[0] for f in subj_files])\n\n # make a dict that assign a number to each unique file\n file_int = {f: i for i, f in enumerate(np.unique(files))}\n return np.array([file_int[f] for f in files])",
"def session_path(cls, project, session):\n return google.api_core.path_template.expand(\n 'projects/{project}/agent/sessions/{session}',\n project=project,\n session=session, )",
"def get_num_unique_programs_and_users(path, scenarios):\n unique_programs = set()\n valid_u_ids = set()\n for scenario in scenarios:\n valid_u_ids.add(scenario[0][\"UniqueId\"])\n\n for filename in glob.iglob(path + 'PythonTutor_Input_Data_Sessions_20*/**/*.py', recursive=True):\n print(filename)\n x = filename[filename.rfind('/') + 1:filename.rfind('_')]\n if x not in valid_u_ids: continue\n with open(filename, 'r') as thisfile:\n blob = thisfile.read()\n unique_programs.add(blob)\n\n valid_ips = set()\n not_valid_ips = set()\n count = 0\n for filename in glob.iglob(path + 'PythonTutor_Input_Data_Sessions_20*/**/*.json', recursive=True):\n x = filename[filename.rfind('/') + 1:filename.rfind('_a')]\n if x not in valid_u_ids: continue\n with open(filename, 'r') as thisfile:\n blob = json.load(thisfile)\n if blob[\"ip\"] in not_valid_ips:\n pass\n elif blob[\"ip\"] in valid_ips:\n valid_ips.remove(blob[\"ip\"])\n count += 1\n not_valid_ips.add(blob[\"ip\"])\n else:\n valid_ips.add(blob[\"ip\"])\n \n print(count)\n print(len(valid_ips))\n return unique_programs",
"def get_splitted_IDs(sessionID):\n \n # Debug assertions\n assert (sessionID is not None), \"sessionID is invalid.\"\n assert (isinstance(sessionID, str)), \"sessionID is no string.\"\n \n # Split session ID\n splitted = sessionID.split(':')\n \n # Get scene ID\n sceneID = splitted[0];\n \n # Get avatar ID if available\n if len(splitted) > 1: \n avatarID = splitted[1]\n else:\n avatarID = \"0\";\n \n return sceneID, avatarID",
"def write_all_users(folder_name: str, label: bool):\n make_directory(folder_name)\n for user in get_user_ids():\n print(\"Analysis of user: \" + user)\n subfolder_name = folder_name + \"/\" + user\n make_directory(subfolder_name)\n for session in get_user_session_ids(user):\n print(\"Session: \" + session)\n file_name = subfolder_name + \"/\" + session + \".csv\"\n data = get_feature_vector(user, session)\n if data == None:\n continue\n if label:\n data = [labels] + data\n write_to_csv(data, file_name)",
"def wishlist_sessions(self, user):\n wishlist_key = self.get_wishlist_key(user)\n session_keys = [ndb.Key(urlsafe=wsck) for wsck in\n wishlist_key.get().sessionKeys]\n sessions = ndb.get_multi(session_keys)\n return sessions",
"def get_clusters() -> List[List[str]]:\n all_users = get_user_ids()\n pass",
"def sessions(self, path:str):\n\n if not os.path.exists(path):\n print(path + ' doesn\\'t exist!')\n return\n\n cur_session = None\n cur_time = 0\n last_time = 0\n num_entries = 0\n\n def pop_session():\n delta = last_time - cur_time\n time_str = str(datetime.timedelta(seconds=delta)).split('.')[0]\n print('Session % 3d: % 8d entries | %s elapsed' % (cur_session, num_entries, time_str))\n\n with open(path, 'r') as f:\n for line in f:\n line = line.strip()\n if len(line) > 0:\n js = json.loads(line)\n if js['type'] == 'session':\n if cur_session is not None:\n pop_session()\n cur_time = js['time']\n cur_session = js['session']\n num_entries = 0\n last_time = js['time']\n num_entries += 1\n \n pop_session()",
"def user_ids(self):\r\n raise NotImplementedError",
"def gen_data(data_root, val_ratio = 0.1):\n session_list = os.listdir(data_root)",
"def itersessions(self):\n for x in np.unique(self.sessions):\n yield x, self.loc[self.sessions == x, :]",
"def pipeline(events, directory, sessions_file_name, chunks_file_name):\n start = time.time()\n \n # sort it by user and datetime\n events = events.sort(['user', 'datetime'], ascending=[1, 1])\n \n # group by user\n events = events.groupby([\"user\"])\n \n # Create a file per user\n store_grouped_data(events, directory + 'users')\n \n cores = multiprocessing.cpu_count()\n \n # Split the files into n groups\n files = os.listdir(directory + 'users')\n files = np.array_split(files, cores)\n \n # Transform the data to sessions in parallel\n pool = Pool()\n r = []\n res = DataFrame()\n lists = DataFrame()\n \n for i in range(0, cores):\n r1 = pool.apply_async(pipe_trans_events, [directory + 'users', files[i]])\n r.append(r1)\n\n for i in range(0, cores):\n r1, r2 = r[i].get()\n if len(res) == 0:\n res = r1\n lists = r2\n else:\n res = res.append(r1)\n lists = lists.append(r2)\n \n pool.close()\n pool.terminate()\n end = time.time()\n print 'Parallel execution finished'\n\n # keep sessions with at least 30 minutes of productive time and remove\n # sessions with high proportion of interruptions\n res['prop_inte'] = res['n_inte']/res['size_ts']\n lists['prop_inte'] = lists['n_inte']/lists['size_ts']\n\n res = res[res['size_ts'] >= 30] \n res = res[res['prop_inte'] < 0.5]\n lists = lists[lists['size_ts'] >= 30]\n lists = lists[lists['prop_inte'] < 0.5]\n\n res['session_id'] = range(0,len(res))\n\n res.to_csv(directory + '/' + sessions_file_name, index=False)\n\n # decompose the sessions into chunks of productive time of at least n minutes\n decomposed_sessions = decompose_sessions(res, lists, 4)\n decomposed_sessions.to_csv(directory + '/' + chunks_file_name, index=False)\n\n end = time.time()\n print end-start",
"def load_sessions_raw(data_folder, dataset_id, subject):\r\n runs = []\r\n labels = []\r\n sessions_path = []\r\n bad_sessions_path = []\r\n\r\n # the following verification will be required for handling several datasets\r\n if dataset_id in [\"raw_clean_32\"]:\r\n # stack all subject's sessions\r\n for root, dirs, files in os.walk(os.path.join(data_folder, dataset_id)):\r\n for file in files:\r\n if _isvalid_file(file):\r\n _, subj = _sparse_info_from_file(file.split(\".\")[0])\r\n if subj == subject:\r\n filepath = os.path.join(root, file)\r\n try:\r\n runs.append(mne.io.read_raw_fif(filepath, verbose=\"ERROR\")) # stacks raw\r\n # events=mne.read_events(filepath) # might be usefull for some dataset\r\n\r\n # WARNING: hardcoded label position could be troublesome in some dataset, check carefully\r\n labels.append(file.split('_')[1]) # stacks session name\r\n sessions_path.append(filepath)\r\n subject_data_found = True\r\n except:\r\n print(\"Couldn't load subject \" + subject + \" session \" + file.split('_')[1] + \" at \" + filepath)\r\n bad_sessions_path.append(filepath)\r\n elif dataset_id in [\"Distress2010\",\"NormativeDB\",\"Tinnitus_EEG\"]:\r\n # stack all subject's sessions\r\n for root, dirs, files in os.walk(os.path.join(data_folder, dataset_id)):\r\n for file in files: # TODO: problem here\r\n if _isvalid_file(file):\r\n _, subj = _sparse_info_from_file(file.split(\".\")[0])\r\n if subj == subject:\r\n filepath = os.path.join(root, file)\r\n try:\r\n data = _txt_to_numpy(filepath)\r\n if data.shape[0] is not 19:\r\n print(data.shape)\r\n print(\"WARNING\")\r\n raw = _CreateRaw_T(data)\r\n print(raw)\r\n runs.append(raw) # stacks raw\r\n\r\n # WARNING: hardcoded label position could be troublesome in some dataset, check carefully\r\n labels.append(\"rest\") # stacks session name\r\n\r\n sessions_path.append(filepath)\r\n subject_data_found = True\r\n except:\r\n print(\"Couldn't load subject \" + subject + \" at \" + filepath)\r\n bad_sessions_path.append(filepath)\r\n if len(runs) == 0:\r\n print(\"Couldn't load any session of subject \" + subject + \" in dataset \" + dataset_id)\r\n return runs, labels, sessions_path, bad_sessions_path"
] | [
"0.7328738",
"0.60029215",
"0.5648203",
"0.5603645",
"0.5575271",
"0.55561566",
"0.55468357",
"0.55285466",
"0.5505926",
"0.5423922",
"0.5409763",
"0.5390888",
"0.53891826",
"0.53674144",
"0.53345513",
"0.5258402",
"0.5182635",
"0.5168376",
"0.51601136",
"0.513915",
"0.51321954",
"0.5129717",
"0.512747",
"0.5118621",
"0.5112312",
"0.510498",
"0.5040548",
"0.50302833",
"0.5028525",
"0.5023133"
] | 0.7566754 | 0 |
Combine accelerometer, gyroscope, and activity labels for a specific session of a user | def get_user_session_data(user_id: str, user_session_id: str) -> DataFrame:
activity_df = read_file(user_id, user_session_id, 'Activity.csv')
accel_df = read_file(user_id, user_session_id, 'Accelerometer.csv')
gyro_df = read_file(user_id, user_session_id, 'Gyroscope.csv')
measurements_df = accel_df.join(gyro_df, lsuffix = '_accel', rsuffix = '_gyro')
full_df = measurements_df.join(activity_df.set_index('ID'), on='ActivityID' + '_accel')
full_df = full_df.dropna().reset_index(drop = True)
return full_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def feature_list(user_id: str, session: str, tap_feature: str, task_name: str, window: DataFrame):\n if window.shape[0] == 0:\n return None\n #Add user ID, session, task name\n features = [user_id, session, task_name]\n\n #Add orientation\n orientation = mode(window['Phone_orientation_accel'])\n features.append(orientation)\n\n #Add tap type\n features.append(tap_feature)\n\n lead_file = 'Accelerometer.csv'\n\n time_col = x_columns[lead_file]\n\n before_start = window[window[tap_feature] == 4].index[0]\n during_start = window[window[tap_feature] == 2].index[0]\n after_start = window[window[tap_feature] == 3].index[0] + 1\n after_end = window[window[tap_feature] == 5].index[0]\n\n before = window.loc[before_start : during_start]\n during = window.loc[during_start : after_start]\n after = window.loc[after_start : after_end + 1]\n\n if during.shape[0] < 2:\n # If there were none or one measurements during the tap,\n # add the closest ones\n during = window[during_start - 1 : after_start + 1]\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n # Feature 1: Mean during\n mean_during = mean(during[y])\n\n # Feature 2: SD during\n sd_during = sd(during[y])\n\n # Feature 3: Difference before/after\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n difference_before_after = mean_after - mean_before\n\n # Feature 4: Net change from tap\n net_change_due_to_tap = mean_during - mean_before\n\n # Feature 5: Maximal change from tap\n max_tap = max(during[y])\n max_change = max_tap - mean_before\n\n # Feature 6: Restoration time\n avgDiffs = []\n for j in range(after[y].shape[0]):\n subsequentValues = after[y].iloc[j:]\n subsequentDistances = subsequentValues.map(lambda x: abs(x - mean_before))\n averageDistance = mean(subsequentDistances)\n avgDiffs.append(averageDistance)\n time_of_earliest_restoration = min(avgDiffs)\n restoration_time = time_of_earliest_restoration - during[time_col].iloc[-1]\n\n # Feature 7: Normalized duration\n t_before_center = (before[time_col].iloc[0] + before[time_col].iloc[-1]) / 2 \n t_after_center = (after[time_col].iloc[0] + after[time_col].iloc[-1]) / 2\n normalized_duration = (t_after_center - t_before_center) / (mean_after - mean_before)\n \n # Feature 8: Ndormalized duration max\n t_max_in_tap = during[during[y] == max_tap][time_col].iloc[0]\n normalized_duration_max = (t_after_center - t_max_in_tap) / (mean_after - max_tap)\n\n\n features += [mean_during, sd_during, difference_before_after,\n net_change_due_to_tap, max_change, restoration_time,\n normalized_duration, normalized_duration_max]\n\n if random.choice(range(100))== 0:\n plot_tap('Plots/Project/' + session, before, during, after, time_col)\n \n return features",
"def GenerateMotionProfile(motion_profile_name, file_name, trajectory,\r\n position_units, velocity_units):\r\n # Grab the position, velocity, and duration\r\n path = []\r\n output = open(os.path.join(FILE_OUTPUT_PATH, file_name+\".txt\"), \"w\")\r\n output.write(\"position, velocity, acceration, dt\\n\")\r\n for i in range(len(trajectory)):\r\n path.append([trajectory[i].position * position_units,\r\n trajectory[i].velocity * velocity_units,\r\n 0.0, # No heading is used for single-axis\r\n int(trajectory[i].dt * 1000)])\r\n\r\n output.write(\"%3.4f, %3.4f, %3.4f, %1.3f\\n\" %\r\n (trajectory[i].position, trajectory[i].velocity,\r\n trajectory[i].acceleration, trajectory[i].dt))\r\n\r\n output.close()\r\n\r\n # Dump the path into a pickle file which will be read up later by the RoboRIO robot code\r\n with open(os.path.join(motion_profile_name, file_name+\".pickle\"), \"wb\") as fp:\r\n pickle.dump(path, fp)\r\n\r\n # Plot the data for review\r\n plt.figure()\r\n plt.title(\"Trajectory(Native Units)\")\r\n plt.plot([segment.y * position_units for segment in trajectory],\r\n [segment.x * position_units for segment in trajectory],\r\n marker='.', color='b')\r\n x = list(i * (trajectory[i].dt) for i, _ in enumerate(trajectory))\r\n\r\n # Plot the velocity and acceleration and look for any discontinuities\r\n plt.figure()\r\n plt.subplot(2, 1, 1)\r\n plt.title(\"Velocity\")\r\n plt.plot(x, [segment.velocity for segment in trajectory], marker='.', color='r',\r\n label='velocity')\r\n plt.grid()\r\n plt.subplot(2, 1, 2)\r\n plt.title(\"Acceleration\")\r\n plt.plot(x, [segment.acceleration for segment in trajectory], marker='.', color='b',\r\n label='acceration')\r\n plt.grid()\r\n plt.tight_layout()\r\n plt.show()",
"def read_values(self):\n temp, acc, gyro = self.read_ag_data()\n tempc = lsm9ds1.TEMPC_0 + temp * lsm9ds1.TEMP_SENSOR_SCALE\n tempf = (tempc * 9/5) + 32\n acc = [c * lsm9ds1.ACC_SENSOR_SCALE for c in acc]\n gyro = [g * lsm9ds1.DPS_SENSOR_SCALE for g in gyro]\n return tempf, acc, gyro",
"def action_session_user_stats(args, config, db, wdb):\n\n wdb.execute('''CREATE OR REPLACE VIEW analysis_session_users AS\n (SELECT DISTINCT\n analysis_session_requests.session_id as session_id,\n analysis_requestlog_combined.user_sid as user_sid\n FROM analysis_requestlog_combined, analysis_session_requests\n WHERE analysis_requestlog_combined.id = analysis_session_requests.request_id\n )\n ''')\n wdb.commit()\n\n # How many sessions did each user have?\n wdb.execute('''CREATE OR REPLACE VIEW analysis_session_count_per_user AS (\n SELECT\n analysis_session_users.user_sid,\n count(analysis_session_users.session_id) as session_count\n FROM analysis_session_users, user\n WHERE analysis_session_users.user_sid = user.user_name\n GROUP BY analysis_session_users.user_sid\n );''')\n wdb.commit()\n\n user_ids = db.simple_query('SELECT user_sid FROM analysis_session_users')\n sessions_per_user = collections.Counter(user_ids)\n sessions_per_user['anonymous'] = sessions_per_user[None]\n del sessions_per_user[None]\n\n write_data('user_session_counts', {\n 'data': dict(sessions_per_user.most_common()),\n })\n reverse_counts = collections.Counter(\n sessions_per_user.values()).most_common()\n write_data('user_session_counts_reverse', {\n 'data': list(reverse_counts),\n })",
"def read_ag_data(self):\n data = self.ag.read_bytes(Register.OUT_TEMP_L, 14)\n temp = lsm9ds1.to_int16(data[0:2])\n gyro = lsm9ds1.to_vector_left_to_right_hand_rule(data[2:8])\n acc = lsm9ds1.to_vector_left_to_right_hand_rule(data[8:14])\n return temp, acc, gyro",
"def _log_session_header(self, session_data:dict):\n info = {}\n info['type'] = 'session'\n info['session'] = self.session\n\n info['data'] = session_data\n\n if self.log_gpu_stats:\n keys = ['idx', 'name', 'uuid', 'pwr_cap', 'mem_total']\n\n gpus = gpu_info()\n info['gpus'] = [{k: gpus[i][k] for k in keys} for i in self.visible_gpus]\n \n if self.log_time:\n info['time'] = time.time()\n\n out = json.dumps(info) + '\\n'\n\n with open(self.log_path, 'a') as f:\n f.write(out)",
"def update(self):\n \n self.accelerometer()\n self.magnetometer()",
"def display_metrics(self):\n metrics = client.user_metrics(self.user_name.get())\n messagebox.showinfo(\"Metrics\", metrics)",
"def add_sample(self, time_received, current_label, emg_list, accel_1, accel_2, accel_3, gyro_1, gyro_2,\n gyro_3, orient_w, orient_x, orient_y, orient_z):\n\n self.add_data_lock.lock()\n\n self.timestamps.append(time_received)\n self.labels.append(current_label)\n\n for i, emg_channel in enumerate(emg_list):\n self.emg[i].append(emg_channel)\n\n self.accel[0].append(accel_1 / MYOHW_ACCELEROMETER_SCALE)\n self.accel[1].append(accel_2 / MYOHW_ACCELEROMETER_SCALE)\n self.accel[2].append(accel_3 / MYOHW_ACCELEROMETER_SCALE)\n\n self.gyro[0].append(gyro_1 / MYOHW_GYROSCOPE_SCALE)\n self.gyro[1].append(gyro_2 / MYOHW_GYROSCOPE_SCALE)\n self.gyro[2].append(gyro_3 / MYOHW_GYROSCOPE_SCALE)\n\n self.orient[0].append(orient_w / MYOHW_ORIENTATION_SCALE)\n self.orient[1].append(orient_x / MYOHW_ORIENTATION_SCALE)\n self.orient[2].append(orient_y / MYOHW_ORIENTATION_SCALE)\n self.orient[3].append(orient_z / MYOHW_ORIENTATION_SCALE)\n\n self.sync_data(self.is_master)\n\n self.add_data_lock.unlock()",
"def system_session(self):\n self.user['kernel'] = self.packages['kernel'][self.user['kernel']]\n\n # Set cpu parameters\n if 'intel' in self.system['cpu'].lower():\n self.user['cpu'] = {'name': self.system['cpu'],\n 'microcode': self.packages['microcode'][0]}\n elif 'AMD' in self.system['cpu']:\n self.user['cpu'] = {'name': self.system['cpu'],\n 'microcode': self.packages['microcode'][1]}\n else:\n self.user['cpu'] = {'name': self.system['cpu'], 'microcode': None}\n\n # Crypt and append passwords\n rootpasswd = crypt(self.user['root_passwd'], mksalt(METHOD_SHA512))\n userpasswd = crypt(self.user['user_passwd'], mksalt(METHOD_SHA512))\n self.user['passwords'] = {'root': rootpasswd, 'user': userpasswd}\n\n # Set keymap\n if 'keymap' not in self.system:\n self.user['keymap'] = self.user['language'].split('_')[0]\n else:\n self.user['keymap'] = self.system['keymap']\n\n # Append NTFS packages\n self.user['ntfs'] = self.system['ntfs']\n if self.system['ntfs'] is True:\n self.user['ntfs'] = self.packages['ntfs']\n\n # Set system firmware\n self.user['firmware'] = {'type': self.system['firmware'],\n 'version': self.system['efi'],\n 'driver': self.user['firmware']}\n\n # Append firmware packages\n if self.user['firmware']['driver'] is True:\n self.user['firmware']['driver'] = self.packages['firmware']\n\n # Set mirrorlist\n self.user['mirrorlist'] = self.system['mirrorlist']",
"def _get_observation(self, session):\n object_data = session[SESSION_OBJ_2D]\n sess_len = session[SESSION_LEN]\n # print (object_data)\n # print (sess_len)\n\n object_1_name, object_2_name = feature_utils.get_most_active_objects_interval(object_data, object_data.keys(), 0, sess_len)\n\n features = []\n\n for name in [object_1_name, object_2_name]:\n for frame in [-2, -1]:\n object_data[name][frame].transform.position\n features.append(object_data[name][frame].transform.get_feat())\n\n return np.concatenate( features ).flatten()",
"def session():\n \n # ind is a list of dictionaries for the actions. \n ind=[]\n for i in range(IND_INIT_SIZE):\n ind.append(action())\n ind.sort(key=lambda r: r[\"date\"]) # sorts the sequences by date of action\n \n beginning=ind[0]['date']\n feature_vect=creator.Individual()\n feature_vect.append(beginning.hour)\n for i in range(5):\n feature_vect.append(0)\n\n for act in ind:\n duration=act['date']-beginning\n if act['type']=='logon':\n feature_vect[2]+=1\n elif act['type']=='email' and act['activity']=='Send':\n feature_vect[3]+=1\n elif act['type']=='file' and (act[\"to_removable_media\"]==True or act[\"from_removable_media\"]==True):\n feature_vect[4]=+1\n elif act[\"type\"]==\"http\":\n feature_vect[5]+=1\n\n feature_vect[1]=duration.total_seconds()/60 # the duration is in minutes\n \n # Normalize the vector\n maxFV=max(feature_vect)\n for i in range(len(feature_vect)):\n feature_vect[i]/=maxFV\n \n return feature_vect",
"def analysis_dev_sec(self):\n #calc the date\n time_now = int(time.time())\n time_local = time.localtime(time_now)\n date = time.strftime(\"%Y-%m-%d\",time_local)\n sum_cpu_ratio = 0\n sum_gpu_mem_size = 0\n # key: time key\n key_re_time = \"[0-9]+ [0-9]+:[0-9]+:[0-9]+ 20[12][][0-9]\"\n # key: temperature key\n key_re_temper = \"[0-9]+C\"\n # key: gpu percent key\n key_re_percent = \"[0-9]+%\"\n # key: gpu mem key\n key_re_mem = \"%s\" % self.pid\n key_re_mem_null = \"No running processes found\"\n # key: line ending key\n key_ending = \"====ending====\"\n\n new_gpu_data_count = 0\n sum_gpu_usage_percent_all = 0\n for line in self.file_gpu.readlines():\n if re.search(key_re_time, line):\n # time own unit\n # 1. colect the gpu time info\n final_time = date + \" \" + line.split()[3]\n self.gpu_pertime.append(final_time)\n elif re.search(key_re_temper, line) and re.search(key_re_percent, line):\n #print \"2222, data_line: %s\" % line\n # 2. colect the gpu temperature info\n # 3. colect the gpu usage percentage info\n temper = float(line.split()[2].rstrip(\"C\"))\n gpu_usage = float(line.split()[12].rstrip(\"%\"))\n if new_gpu_data_count == 0:\n self.gpu_temper_1.append(temper)\n self.gpu_usage_percent_1.append(gpu_usage)\n elif new_gpu_data_count == 1:\n self.gpu_temper_2.append(temper)\n self.gpu_usage_percent_2.append(gpu_usage)\n elif new_gpu_data_count == 2:\n self.gpu_temper_3.append(temper)\n self.gpu_usage_percent_3.append(gpu_usage)\n elif new_gpu_data_count == 3:\n self.gpu_temper_4.append(temper)\n self.gpu_usage_percent_4.append(gpu_usage)\n new_gpu_data_count += 1\n elif re.search(key_re_mem, line) or re.search(key_re_mem_null, line):\n # 4. colect the gpu mem info\n this_gpu_num = line.split()[1]\n if \"MiB\" in line.split()[5]:\n this_gpu_mem = float(line.split()[5].strip(\"MiB\"))\n # TODO_this: if there have other unit\n\n if this_gpu_num == \"0\":\n self.gpu_mem_1.append(this_gpu_mem)\n elif this_gpu_num == \"1\":\n self.gpu_mem_2.append(this_gpu_mem)\n elif this_gpu_num == \"2\":\n self.gpu_mem_3.append(this_gpu_mem)\n elif this_gpu_num == \"3\":\n self.gpu_mem_4.append(this_gpu_mem)\n elif this_gpu_num == \"No\":\n self.gpu_mem_1.append(0)\n self.gpu_mem_2.append(0)\n self.gpu_mem_3.append(0)\n self.gpu_mem_4.append(0)\n \n elif re.search(key_ending, line):\n # control unit\n # 1.complete the gpu_mem list\n max_len_gpu_mem = max(len(self.gpu_mem_4), len(self.gpu_mem_3), len(self.gpu_mem_2), len(self.gpu_mem_1))\n min_len_gpu_mem = min(len(self.gpu_mem_4), len(self.gpu_mem_3), len(self.gpu_mem_2), len(self.gpu_mem_1))\n if max_len_gpu_mem != min_len_gpu_mem:\n if len(self.gpu_mem_1) != max_len_gpu_mem:\n self.gpu_mem_1.append(0)\n if len(self.gpu_mem_2) != max_len_gpu_mem:\n self.gpu_mem_2.append(0)\n if len(self.gpu_mem_3) != max_len_gpu_mem:\n self.gpu_mem_3.append(0)\n if len(self.gpu_mem_4) != max_len_gpu_mem:\n self.gpu_mem_4.append(0)\n new_gpu_data_count = 0\n\n # ! because all the list is equal\n for i in range(len(self.gpu_mem_1)):\n self.gpu_usage_percent_all.append(self.gpu_usage_percent_1[i] + self.gpu_usage_percent_2[i] + self.gpu_usage_percent_3[i] + self.gpu_usage_percent_4[i])\n\n #self.gpu_mem_all.append(self.gpu_mem_1[i] + self.gpu_mem_2[i] + self.gpu_mem_3[i] + self.gpu_mem_4[i])\n self.gpu_mem_all.append(self.gpu_mem_1[i] + self.gpu_mem_2[i] + self.gpu_mem_3[i] + self.gpu_mem_4[i])\n sum_gpu_mem_size += max(self.gpu_mem_1[i], self.gpu_mem_2[i], self.gpu_mem_3[i], self.gpu_mem_4[i])\n\n self.gpu_temper_max.append(max(self.gpu_temper_1[i] ,self.gpu_temper_2[i] ,self.gpu_temper_3[i] ,self.gpu_temper_4[i]))\n\n version_gpu_usage_percent_all = max(self.gpu_usage_percent_all)\n\n version_gpu_mem_all = max(self.gpu_mem_all)\n version_gpu_mem_avg = round(sum_gpu_mem_size/len(self.gpu_mem_all), 2)\n\n version_gpu_temper_max = max(self.gpu_temper_max)\n\n print \"version_gpu_usage_percent_all: %s\" % version_gpu_usage_percent_all\n print \"version_gpu_mem_all: %s\" % version_gpu_mem_all\n print \"version_gpu_mem_avg: %s\" % version_gpu_mem_avg\n print \"version_gpu_temper_max: %s\" % version_gpu_temper_max\n\n # insert into database: nvidia_list_1sec\n if self.db_onoff == \"on\":\n # insert into database: nvidia_list_1sec_avg\n self.mysql.insert_table_sql_nvidia_version(self.time_sql, version_gpu_usage_percent_all, version_gpu_mem_avg, version_gpu_temper_max)\n # insert into database: nvidia_list_1sec_max\n #self.mysql.insert_table_sql_nvidia_version(self.time_sql, version_gpu_usage_percent_all, version_gpu_mem_all, version_gpu_temper_max)",
"def read_track_user_annotations(self):\r\n\r\n # Check existence of tracking results\r\n\r\n if(len(self.tracked_faces) == 0):\r\n\r\n # Try to load YAML file\r\n if(os.path.exists(self.track_file_path)):\r\n\r\n print 'Loading YAML file with tracking results'\r\n\r\n with open(self.track_file_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n\r\n return\r\n\r\n user_ann_path = os.path.join(\r\n self.track_path, ce.FACE_RECOGNITION_USER_ANNOTATIONS)\r\n\r\n # Create directory for user annotations\r\n\r\n if(not(os.path.exists(user_ann_path))):\r\n\r\n os.makedirs(user_ann_path)\r\n\r\n print '\\n\\n### User annotations ###\\n'\r\n\r\n raw_input(\"Press Enter when you are ready to order key frames...\")\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n raw_input(\"Order key frames, than press Enter to continue...\")\r\n\r\n auto_p_counter = 0\r\n\r\n user_rec_faces = []\r\n\r\n # Iterate through tracked faces\r\n for auto_p_dict in self.tracked_faces:\r\n\r\n auto_p_dict[c.ANN_TAG_KEY] = c.UNDEFINED_TAG\r\n\r\n found = False\r\n # Search person in directory with user annotations\r\n for user_tag in os.listdir(user_ann_path):\r\n\r\n user_p_path = os.path.join(user_ann_path, user_tag)\r\n\r\n # Iterate though all images in directory\r\n for user_p_image in os.listdir(user_p_path):\r\n\r\n user_p_counter = os.path.splitext(user_p_image)[0]\r\n\r\n formatted_auto_p_counter = '%07d' % auto_p_counter\r\n\r\n if(user_p_counter == formatted_auto_p_counter):\r\n\r\n auto_p_dict[c.ANN_TAG_KEY] = user_tag\r\n\r\n found = True\r\n\r\n break\r\n\r\n if(found):\r\n\r\n break\r\n\r\n user_rec_faces.append(auto_p_dict)\r\n\r\n auto_p_counter = auto_p_counter + 1\r\n\r\n self.tracked_faces = user_rec_faces\r\n\r\n # Save recognition result in YAML file\r\n utils.save_YAML_file(self.track_file_path, self.tracked_faces)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for user annotation:', time_in_seconds, 's\\n'\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)",
"def read_user_annotations(self):\r\n\r\n # Check existence of clustering results\r\n\r\n if len(self.recognized_faces) == 0:\r\n\r\n # Try to load YAML files\r\n if os.path.exists(self.cluster_files_path):\r\n\r\n print 'Loading YAML files with clustering results'\r\n logger.debug('Loading YAML file with clustering results')\r\n\r\n self.recognized_faces = []\r\n for yaml_file in os.listdir(self.cluster_files_path):\r\n yaml_file_path = os.path.join(\r\n self.cluster_files_path, yaml_file)\r\n with open(yaml_file_path) as f:\r\n self.recognized_faces.append(yaml.load(f))\r\n\r\n print 'YAML file with clustering results loaded'\r\n logger.debug('YAML file with clustering results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No clustering results found!'\r\n logger.warning('No clustering results found!')\r\n\r\n return\r\n\r\n user_ann_path = os.path.join(\r\n self.cluster_path, ce.FACE_RECOGNITION_USER_ANNOTATIONS)\r\n\r\n # Create directory for user annotations\r\n\r\n if(not(os.path.exists(user_ann_path))):\r\n\r\n os.makedirs(user_ann_path)\r\n\r\n print '\\n\\n### User annotations ###\\n'\r\n\r\n raw_input(\"Press Enter when you are ready to order key frames...\")\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n raw_input(\"Order key frames, than press Enter to continue...\")\r\n\r\n auto_p_counter = 0\r\n\r\n user_rec_faces = []\r\n\r\n # Iterate through automatic recognized faces\r\n for auto_p_dict in self.recognized_faces:\r\n\r\n auto_p_dict[c.ASSIGNED_TAG_KEY] = c.UNDEFINED_TAG\r\n\r\n found = False\r\n # Search person in directory with user annotations\r\n for user_tag in os.listdir(user_ann_path):\r\n\r\n user_p_path = os.path.join(user_ann_path, user_tag)\r\n\r\n # Iterate though all images in directory\r\n for user_p_image in os.listdir(user_p_path):\r\n\r\n user_p_counter = os.path.splitext(user_p_image)[0]\r\n\r\n formatted_auto_p_counter = '%07d' % auto_p_counter\r\n\r\n if(user_p_counter == formatted_auto_p_counter):\r\n\r\n auto_p_dict[c.ASSIGNED_TAG_KEY] = user_tag\r\n\r\n found = True\r\n\r\n break\r\n\r\n if(found):\r\n\r\n break\r\n\r\n user_rec_faces.append(auto_p_dict)\r\n\r\n auto_p_counter = auto_p_counter + 1\r\n\r\n self.recognized_faces = user_rec_faces\r\n\r\n # Save clustering result in YAML files\r\n\r\n # Remove previous files\r\n if os.path.exists(self.cluster_files_path):\r\n shutil.rmtree(self.cluster_files_path)\r\n # Create directory for people clustering results\r\n os.makedirs(self.cluster_files_path)\r\n\r\n counter = 0\r\n for person_dict in self.recognized_faces:\r\n yaml_file_name = str(counter) + '.YAML'\r\n yaml_file_path = os.path.join(self.cluster_files_path, yaml_file_name)\r\n utils.save_YAML_file(yaml_file_path, person_dict)\r\n counter += 1\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for user annotation:', time_in_seconds, 's\\n'\r\n\r\n self.anal_times[ce.USER_ANNOTATION_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)",
"def print_stats(session, feature_batch, label_batch, cost, accuracy):\n # TODO: Implement Function\n cst = session.run(cost, feed_dict={x:feature_batch, y:label_batch, keep_prob:1.0})\n acc = session.run(accuracy, feed_dict={x:valid_features, y:valid_labels, keep_prob:1.0}) \n print(\"cost:{:.3f};accuracy:{:.3f} <br>\".format(cst,acc))",
"def get_sentence_frame_acc(intent_preds,intent_labels,slot_preds,slot_labels):\n # Get the intent comparison result\n intent_result = (intent_pres == intent_labels)\n\n # Get the slot comparision result\n slot_result = []\n for preds, labels in zip(slot_preds, slot_labels):\n assert len(preds) == len(labels)\n one_sent_result = True\n for p,l in zip(preds,labels):\n if p != l:\n one_sent_result = False \n break\n slot_result.append(one_sent_result)\n slot_result = np.array(slot_result)\n\n sementic_acc = np.multiply(intent_result,slot_result).mean()\n return {\n \"sementic_frame_acc\": sementic_acc\n }",
"def get_profile_data(self, transceiver, placement):",
"def annotationlabel(request,action=None):\n\n username = request.session['username']\n mode1 = request.session['mode']\n auto_required = request.GET.get('ns_id', None)\n mode = NameSpace.objects.get(ns_id=mode1)\n\n # print('mode',mode1)\n usecase = request.session['usecase']\n # language = request.GET.get('language',request.session['language'])\n type = 'labels'\n\n if request.method == 'GET' and action.lower() == 'user_labels':\n\n \"\"\"GET request: given the report, the labels annotated by the user are returned\"\"\"\n\n language = request.GET.get('language', request.session['language'])\n user_get = request.GET.get('username',username)\n report_id = request.GET.get('report_id')\n report1 = Report.objects.get(id_report = report_id,language = language)\n # if auto_required == 'Robot':\n # mode = NameSpace.objects.get(ns_id=auto_required)\n if auto_required is not None:\n mode_1 = NameSpace.objects.get(ns_id=auto_required)\n else:\n mode_1 = mode\n json_dict = get_user_gt(user_get,mode_1,report1,language,'labels')\n return JsonResponse(json_dict,safe=False)\n\n elif request.method == 'GET' and action.lower() == 'all_labels':\n\n \"\"\" GET request: given the use case, all the labels associated to that usecase are returned. \"\"\"\n\n labels = AnnotationLabel.objects.filter(name=usecase).values('seq_number','label','annotation_mode')\n print(labels)\n json_dict = {}\n if len(labels) > 0:\n\n if mode1 == 'Human' or auto_required == 'Human':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Manual' in el['annotation_mode']:\n # if int(el['seq_number']) > count: # i primi 20 sono inseriti automaticamente\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n if mode1 == 'Robot' or auto_required == 'Robot':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Automatic' in el['annotation_mode']:\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n\n else:\n json_dict['labels'] = []\n\n json_dict['labels'] = sorted(json_dict['labels'], key=lambda json: json['seq_number'])\n print(json_dict)\n return JsonResponse(json_dict)\n\n elif request.method == 'POST' and action.lower() == 'delete':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are removed together with the\n associated groundtruth.\"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting parameters.'}\n return json_response\n to_del = Associate.objects.filter(username=user, ns_id=mode, id_report=report1, language=language)\n if mode1 == 'Human':\n try:\n with transaction.atomic():\n\n if to_del.exists():\n json_response = delete_all_annotation(to_del, user, report1,language, type,mode)\n\n else:\n json_response = {'msg':'nothing to do'}\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred saving the ground_truth and the labels'}\n return JsonResponse(json_response)\n else:\n return JsonResponse(json_response)\n else:\n json_response = restore_robot_annotation(report1, 'labels', user)\n return JsonResponse(json_response)\n\n\n if request.method == 'POST' and action.lower() == 'insert':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are added in the database and a new \n JSON groundtruth is created. \"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting the parameters.'}\n return JsonResponse(json_response)\n\n labels_to_save = request_body_json['labels']\n # In this case the user manually deletes all the labels (NOT WITH CLEAR BUTTON) and saves.\n if len(labels_to_save) == 0 and mode1 == 'Human':\n\n \"\"\"If there are not labels to save, if there is a ground truth saved in the database, this is removed,\n otherwise no action is performed. \"\"\"\n\n rows = Associate.objects.filter(username = user,ns_id=mode, id_report = report1, language = language)\n if rows.exists():\n try:\n with transaction.atomic():\n json_response = delete_all_annotation(rows,user,report1,language,type,mode)\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred.'}\n return JsonResponse(json_response, status=500)\n else:\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'Nothing to save.'}\n return JsonResponse(json_response)\n\n if len(labels_to_save) == 0 and mode1 == 'Robot':\n\n \"\"\" If there are not labels to save and the name space is Robot no action is performed and the already \n existing ground-truth is kept \"\"\"\n to_del = Associate.objects.filter(id_report=report1, language=language, username=user, ns_id=mode)\n # print('RESTORE')\n json_response = restore_robot_annotation(report1, 'labels',user)\n return JsonResponse(json_response)\n\n update = True\n\n \"\"\" Check if the user's labels she inserted are as many as the rows already present in the db: \n if they are not: update the annotation: the old annotation is replaced with the new one\n if they are: check if the labels existing are those inserted, in this case nothing is done, otherwise \n the current groundtruth is updated. \"\"\"\n\n existing_rows = Associate.objects.filter(username = user,ns_id=mode, id_report =report1,language =language)\n if existing_rows.exists():\n if existing_rows.count() == len(labels_to_save):\n for label in labels_to_save:\n label1 = AnnotationLabel.objects.get(name=usecase, label=label['label'], seq_number=label['seq_number'])\n if not Associate.objects.filter(username=user,ns_id=mode, seq_number=label1.seq_number, label=label1,\n id_report=report1, language=language).exists():\n update = True\n break\n else:\n update = False\n if update == True:\n try:\n with transaction.atomic():\n # Remove all the existing labels inserted by the user for that report. The existing ground truth is kept untile the deletion is successful\n to_del = Associate.objects.filter(username=user,ns_id=mode, id_report=report1,language = language)\n delete_all_annotation(to_del,user,report1,language,type,mode)\n\n json_resp_labels = update_annotation_labels(labels_to_save,usecase,user,report1,language,mode)\n\n jsonDict = serialize_gt(type, usecase, username, report_id,language,mode)\n GroundTruthLogFile.objects.create(username=user,ns_id=mode, id_report=report1, language = language,\n gt_json=jsonDict, gt_type=type,insertion_time=Now())\n\n except (Exception) as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred saving the ground_truth '\n 'and the labels, the transaction rolledback'}\n return JsonResponse(json_response)\n\n else:\n return JsonResponse(json_resp_labels)\n else:\n if mode1 == 'Human':\n if not GroundTruthLogFile.objects.filter(gt_type='labels', username=user, ns_id=mode, id_report=report1,\n language=language).exists():\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n json_response = {'message': 'ok'}\n else:\n json_response = {'message': 'no changes detected'}\n return JsonResponse(json_response)\n\n elif mode1 == 'Robot':\n\n \"\"\" In this section the name space Robot is handled: If the user is in the AUTOMATIC MODE and the labels\n she inserts are those annotated by the algorithm, this means that she agrees with the annotation of the \n Robot user. The annotation does not change, only the insertion time is changed.\"\"\"\n\n try:\n with transaction.atomic():\n # in questa sezione solo se la gt è uguale a prima, l'utente acconsente alla gt della macchina\n user_robot = User.objects.get(username='Robot_user', ns_id=mode)\n gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode,\n id_report=report1, language=language,\n gt_type='labels')\n\n gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels')\n if gt_robot.count() == 1 and not gt.exists():\n # if gt_robot[0].insertion_time == gt[0].insertion_time:\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels').delete()\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n except Exception as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred updating labels dates'}\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'dates updated'}\n return JsonResponse(json_response)",
"def get_sensor_data(self):\n # Initialize ROS msgs\n imu_raw_msg = Imu()\n imu_msg = Imu()\n mag_msg = MagneticField()\n temp_msg = Temperature()\n\n # read from sensor\n buf = self.con.receive(registers.BNO055_ACCEL_DATA_X_LSB_ADDR, 45)\n # Publish raw data\n imu_raw_msg.header.stamp = self.node.get_clock().now().to_msg()\n imu_raw_msg.header.frame_id = self.param.frame_id.value\n # TODO: do headers need sequence counters now?\n # imu_raw_msg.header.seq = seq\n\n # TODO: make this an option to publish?\n imu_raw_msg.orientation_covariance = [\n self.param.variance_orientation.value[0], 0.0, 0.0,\n 0.0, self.param.variance_orientation.value[1], 0.0,\n 0.0, 0.0, self.param.variance_orientation.value[2]\n ]\n\n imu_raw_msg.linear_acceleration.x = \\\n self.unpackBytesToFloat(buf[0], buf[1]) / self.param.acc_factor.value\n imu_raw_msg.linear_acceleration.y = \\\n self.unpackBytesToFloat(buf[2], buf[3]) / self.param.acc_factor.value\n imu_raw_msg.linear_acceleration.z = \\\n self.unpackBytesToFloat(buf[4], buf[5]) / self.param.acc_factor.value\n imu_raw_msg.linear_acceleration_covariance = [\n self.param.variance_acc.value[0], 0.0, 0.0,\n 0.0, self.param.variance_acc.value[1], 0.0,\n 0.0, 0.0, self.param.variance_acc.value[2]\n ]\n imu_raw_msg.angular_velocity.x = \\\n self.unpackBytesToFloat(buf[12], buf[13]) / self.param.gyr_factor.value\n imu_raw_msg.angular_velocity.y = \\\n self.unpackBytesToFloat(buf[14], buf[15]) / self.param.gyr_factor.value\n imu_raw_msg.angular_velocity.z = \\\n self.unpackBytesToFloat(buf[16], buf[17]) / self.param.gyr_factor.value\n imu_raw_msg.angular_velocity_covariance = [\n self.param.variance_angular_vel.value[0], 0.0, 0.0,\n 0.0, self.param.variance_angular_vel.value[1], 0.0,\n 0.0, 0.0, self.param.variance_angular_vel.value[2]\n ]\n # node.get_logger().info('Publishing imu message')\n self.pub_imu_raw.publish(imu_raw_msg)\n\n # TODO: make this an option to publish?\n # Publish filtered data\n imu_msg.header.stamp = self.node.get_clock().now().to_msg()\n imu_msg.header.frame_id = self.param.frame_id.value\n\n q = Quaternion()\n # imu_msg.header.seq = seq\n q.w = self.unpackBytesToFloat(buf[24], buf[25])\n q.x = self.unpackBytesToFloat(buf[26], buf[27])\n q.y = self.unpackBytesToFloat(buf[28], buf[29])\n q.z = self.unpackBytesToFloat(buf[30], buf[31])\n # TODO(flynneva): replace with standard normalize() function\n # normalize\n norm = sqrt(q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n imu_msg.orientation.x = q.x / norm\n imu_msg.orientation.y = q.y / norm\n imu_msg.orientation.z = q.z / norm\n imu_msg.orientation.w = q.w / norm\n\n imu_msg.orientation_covariance = imu_raw_msg.orientation_covariance\n\n imu_msg.linear_acceleration.x = \\\n self.unpackBytesToFloat(buf[32], buf[33]) / self.param.acc_factor.value\n imu_msg.linear_acceleration.y = \\\n self.unpackBytesToFloat(buf[34], buf[35]) / self.param.acc_factor.value\n imu_msg.linear_acceleration.z = \\\n self.unpackBytesToFloat(buf[36], buf[37]) / self.param.acc_factor.value\n imu_msg.linear_acceleration_covariance = imu_raw_msg.linear_acceleration_covariance\n imu_msg.angular_velocity.x = \\\n self.unpackBytesToFloat(buf[12], buf[13]) / self.param.gyr_factor.value\n imu_msg.angular_velocity.y = \\\n self.unpackBytesToFloat(buf[14], buf[15]) / self.param.gyr_factor.value\n imu_msg.angular_velocity.z = \\\n self.unpackBytesToFloat(buf[16], buf[17]) / self.param.gyr_factor.value\n imu_msg.angular_velocity_covariance = imu_raw_msg.angular_velocity_covariance\n self.pub_imu.publish(imu_msg)\n\n # Publish magnetometer data\n mag_msg.header.stamp = self.node.get_clock().now().to_msg()\n mag_msg.header.frame_id = self.param.frame_id.value\n # mag_msg.header.seq = seq\n mag_msg.magnetic_field.x = \\\n self.unpackBytesToFloat(buf[6], buf[7]) / self.param.mag_factor.value\n mag_msg.magnetic_field.y = \\\n self.unpackBytesToFloat(buf[8], buf[9]) / self.param.mag_factor.value\n mag_msg.magnetic_field.z = \\\n self.unpackBytesToFloat(buf[10], buf[11]) / self.param.mag_factor.value\n mag_msg.magnetic_field_covariance = [\n self.param.variance_mag.value[0], 0.0, 0.0,\n 0.0, self.param.variance_mag.value[1], 0.0,\n 0.0, 0.0, self.param.variance_mag.value[2]\n ]\n self.pub_mag.publish(mag_msg)\n\n # Publish temperature\n temp_msg.header.stamp = self.node.get_clock().now().to_msg()\n temp_msg.header.frame_id = self.param.frame_id.value\n # temp_msg.header.seq = seq\n temp_msg.temperature = float(buf[44])\n self.pub_temp.publish(temp_msg)",
"def vga_session(self):\n gpu_driver = None\n if self.user['gpu_driver'] is True:\n\n # NVIDIA controller - append packages\n if 'nvidia' in self.user['vga_controller'].lower():\n\n if self.user['gpu_proprietary'] is True:\n hardvideo = self.packages['hardvideo'][3]\n\n if self.user['kernel'] == 'linux':\n gpu_driver = self.packages['gpu_driver'][3]\n\n elif self.user['kernel'] == 'linux-lts':\n gpu_driver = self.packages['gpu_driver'][4]\n\n else:\n gpu_driver = self.packages['gpu_driver'][5]\n\n else:\n gpu_driver = self.packages['gpu_driver'][2]\n hardvideo = self.packages['hardvideo'][2]\n\n # AMD Controller - append packages\n elif ('ATI' in self.user['vga_controller']) or \\\n ('AMD' in self.user['vga_controller']):\n\n gpu_driver = self.packages['gpu_driver'][1]\n hardvideo = self.packages['hardvideo'][1]\n\n # Intel controller - append packages\n elif 'intel' in self.user['vga_controller'].lower():\n gpu_driver = self.packages['gpu_driver'][0]\n hardvideo = self.packages['hardvideo'][0]\n\n # Unreconized controller - append packages\n else:\n gpu_driver = self.packages['gpu_driver'][6]\n hardvideo = self.packages['hardvideo'][4]\n\n # Set model with corresponding driver\n self.user['gpu'] = {'model': self.user['vga_controller'],\n 'driver': gpu_driver,\n 'hardvideo': self.user['hardvideo']}\n\n # Set hardware video acceleration\n if self.user['hardvideo'] is True:\n self.user['gpu']['hardvideo'] = hardvideo",
"def regimes(self):\n coupling = self.coupling()\n quantum_theta = self.quantum_theta()\n\n if coupling <= 0.01:\n coupling_str = f\"Weakly coupled regime: Gamma = {coupling}.\"\n elif coupling >= 100:\n coupling_str = f\"Strongly coupled regime: Gamma = {coupling}.\"\n else:\n coupling_str = f\"Intermediate coupling regime: Gamma = {coupling}.\"\n\n if quantum_theta <= 0.01:\n quantum_theta_str = (\n f\"Fermi quantum energy dominant: Theta = {quantum_theta}\"\n )\n elif quantum_theta >= 100:\n quantum_theta_str = (\n f\"Thermal kinetic energy dominant: Theta = {quantum_theta}\"\n )\n else:\n quantum_theta_str = (\n f\"Both Fermi and thermal energy important: Theta = {quantum_theta}\"\n )\n\n return [coupling_str, quantum_theta_str]",
"def updateHUD(self, x, y, pDeg, vMag, vDeg, aMag, aDeg, components, fps):\n self._xPosition = x\n self._yPosition = y\n self._positionDegree = pDeg\n self._velocityMag = vMag\n self._velocityDegree = vDeg\n self._accelerationMag = aMag\n self._accelerationDegree = aDeg\n self.thrusters = filter(lambda c: isinstance(c, Thruster), components)\n self.SASmodules = filter(lambda c: isinstance(c, SAS), components)\n\n graph.drawText((10, 10), \"X Position: \"\n + str(\"{:10.4f}\".format(self._xPosition))\n + \" m\", self._font, (255, 0, 0))\n graph.drawText((10, 30), \"Y Position: \"\n + str(\"{:10.4f}\".format(self._yPosition))\n + \" m\", self._font, (255, 0, 0))\n graph.drawText((10, 50), \"Nose Degree: \"\n + str(\"{:10.4f}\".format(self._positionDegree))\n + \" degrees\", self._font, (255, 0, 0))\n graph.drawText((10, 70), \"Velocity Magnitude: \"\n + str(\"{:10.4f}\".format(self._velocityMag))\n + \" m/s\", self._font, (255, 0, 0))\n graph.drawText((10, 90), \"Velocity Degree: \"\n + str(\"{:10.4f}\".format(self._velocityDegree))\n + \" degrees\", self._font, (255, 0, 0))\n graph.drawText((10, 110), \"Acceleration Magnitude: \"\n + str(\"{:10.4f}\".format(self._accelerationMag))\n + \" m/s^2\", self._font, (255, 0, 0))\n graph.drawText((10, 130), \"Acceleration Degree: \"\n + str(\"{:10.4f}\".format(self._accelerationDegree))\n + \" degrees\", self._font, (255, 0, 0))\n\n numThruster = 0\n for thruster in self.thrusters:\n numThruster = numThruster + 1\n graph.drawText((10, 130 + numThruster*20), \"Thruster Module \"\n + str(numThruster) + \" Fuel Remaining: \"\n + str(\"{:10.0f}\".format(thruster.fuel))\n + \" Liters\", self._font, (255, 0, 0))\n\n numSAS = 0\n for sas in self.SASmodules:\n numSAS = numSAS + 1\n graph.drawText((10, 130 + numThruster*20 + numSAS*20),\n \"SAS Module \" + str(numSAS) + \" Fuel Remaining: \"\n + str(\"{:10.0f}\".format(sas.fuel))\n + \" Liters\", self._font, (255, 0, 0))\n\n graph.drawText((10, 150 + numThruster*20 + numSAS*20),\n \"FPS: \"\n + \"{:0.3f}\".format(fps), self._font, (255, 0, 0))",
"def get_accel_data(self, g = False):\n\t\tx = self.read_i2c_word(self.ACCEL_XOUT0)\n\t\ty = self.read_i2c_word(self.ACCEL_YOUT0)\n\t\tz = self.read_i2c_word(self.ACCEL_ZOUT0)\n\n\t\taccel_scale_modifier = None\n\t\taccel_range = self.read_accel_range(True)\n\n\t\tif accel_range == self.ACCEL_RANGE_2G:\n\t\t\taccel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n\t\telif accel_range == self.ACCEL_RANGE_4G:\n\t\t\taccel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G\n\t\telif accel_range == self.ACCEL_RANGE_8G:\n\t\t\taccel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G\n\t\telif accel_range == self.ACCEL_RANGE_16G:\n\t\t\taccel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G\n\t\telse:\n\t\t\tprint(\"Unkown range - accel_scale_modifier set to self.ACCEL_SCALE_MODIFIER_2G\")\n\t\t\taccel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n\n\t\tx = x / accel_scale_modifier\n\t\ty = y / accel_scale_modifier\n\t\tz = z / accel_scale_modifier\n\n\t\tif g is True:\n\t\t\treturn {'x': x, 'y': y, 'z': z}\n\t\telif g is False:\n\t\t\tx = x * self.GRAVITIY_MS2\n\t\t\ty = y * self.GRAVITIY_MS2\n\t\t\tz = z * self.GRAVITIY_MS2\n\t\t\treturn {'x': x, 'y': y, 'z': z}",
"def run(self):\n\n \"\"\" Detects labels given a GCS path. \"\"\"\n video_client = videointelligence.VideoIntelligenceServiceClient()\n features = [videointelligence.enums.Feature.LABEL_DETECTION]\n operation = video_client.annotate_video(self.input()[0].path, \n features=features)\n print('\\nProcessing video for label annotations:\\n')\n \n result = operation.result(timeout=900)\n \n print(result)\n print('\\nFinished processing.')\n \n segment_labels = result.annotation_results[0].shot_label_annotations\n \n output_csv = \"\"\n for i, segment_label in enumerate(segment_labels):\n print('Video label description: {}'.format(\n segment_label.entity.description))\n for category_entity in segment_label.category_entities:\n print('\\tLabel category description: {}'.format(\n category_entity.description))\n \n for i, segment in enumerate(segment_label.segments):\n start_time = (segment.segment.start_time_offset.seconds +\n segment.segment.start_time_offset.nanos / 1e9)\n end_time = (segment.segment.end_time_offset.seconds +\n segment.segment.end_time_offset.nanos / 1e9)\n positions = '{}s to {}s'.format(start_time, end_time)\n confidence = segment.confidence\n print('\\tSegment {}: {}'.format(i, positions))\n print('\\tConfidence: {}'.format(confidence))\n \n output_csv_line = '{},{},{},{}\\n'.format(\n segment_label.entity.description, \n category_entity.description,\n start_time, \n end_time)\n output_csv = output_csv + output_csv_line\n print(output_csv_line)\n print('\\n')\n print('\\n\\n-------\\n') \n print(output_csv) \n \n # output data\n f = self.output().open('w')\n f.write(output_csv)\n f.close()",
"def AddSessionUtilization(asg_name, arn_scalein, arn_scaleout):\n logger.info('Creating Session Utilization CloudWatch alarm for ASG: ' + asg_name)\n alarmname= asg_name + '-cw-su'\n return common_alarm_func_add(asg_name, \"panSessionUtilization\", lib.get_cw_name_space(stackname, asg_name), arn_scalein, arn_scaleout,\n\t\t\talarmname, \"Session Utilization\", 'Percent')",
"def state(self,session):\n return \"{'kind': '%s', %s}\" \\\n % (self.plugin, \",\".join(\"%r:%r\" % (key,session[key])\n for key in session if key.startswith('tpg.')))",
"def print_global_statistics(stats):\n\n print('Final Results')\n print('LED: {} WED: {}'.format(stats.global_letter_edit_distance,stats.global_word_edit_distance))",
"def getAccelerometer(self):\n cmd = 'A'\n acc = [-1,-1,-1]\n out = self.getData(cmd)\n out = str(out, 'utf-8')\n if self.debug:\n print(out)\n isStart = False\n if out[0] == 'a':\n j = 0\n for i in range(len(out)):\n if isStart:\n if out[i] == ',':\n acc[j] = int(data)\n j = j + 1\n isStart = False\n else:\n data=data+out[i]\n if out[i] == ',':\n isStart = True\n data = ''\n acc[j] = int(data)\n return acc",
"def get_feature_names():\n return ['UserID', 'SessionID', 'TaskName', 'Orientation', 'TapType'] + get_numerical_feature_names()"
] | [
"0.5068103",
"0.48550618",
"0.47635156",
"0.4737972",
"0.47350296",
"0.4701941",
"0.46496907",
"0.46420303",
"0.46216983",
"0.461062",
"0.45927304",
"0.4569354",
"0.4546855",
"0.45429146",
"0.45310783",
"0.4522676",
"0.45213246",
"0.4519421",
"0.45122227",
"0.45062613",
"0.4498076",
"0.44904843",
"0.44878724",
"0.44852453",
"0.44849595",
"0.44689867",
"0.44644788",
"0.44620273",
"0.4451899",
"0.44437993"
] | 0.5247065 | 0 |
Adds the Euclidean norm of the accelerometer and gyroscope data | def add_magnitude_columns(data: DataFrame):
data['M_accel'] = data[['X_accel','Y_accel','Z_accel']].apply(np.linalg.norm, axis = 1)
data['M_gyro'] = data[['X_gyro','Y_gyro','Z_gyro']].apply(np.linalg.norm, axis = 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def norm(self):",
"def norm(self):\n return np.sqrt(np.dot(self._data, self._data))",
"def norm(self):\n raise NotImplementedError",
"def norm(self):\n # TODO: implement\n return",
"def norm(self):\n return math.sqrt(sum([x*x for x in self.mV]))",
"def norm(self):\n\t\treturn np.sqrt(self.normSq())",
"def norm(self):\n\t\treturn math.sqrt(self.norm2())",
"def norm(self, X, G):\n raise NotImplementedError",
"def norm(self):\r\n old_origin = np.array(self.origin)\r\n self.origin = [0, 0, 0]\r\n old_origin[0] = old_origin[0] / self.x[0]\r\n old_origin[1] = old_origin[1] / self.y[1]\r\n old_origin[2] = old_origin[2] / self.z[2]\r\n self.data = ndimage.shift(self.data, -old_origin, mode='wrap')",
"def norm(self):\n return np.linalg.norm(self.values)",
"def norm(self):\n return numpy.linalg.norm(self.values)",
"def normE(self):\n\n # Get the magnitude of E and add it to our data\n E_mag = np.zeros_like(self.data['Ex'], dtype=np.float64)\n for comp in ('Ex', 'Ey', 'Ez'):\n E_mag += np.absolute(self.data[comp])**2\n self.extend_data('normE', np.sqrt(E_mag))\n return np.sqrt(E_mag)",
"def normSq(self):\n\t\treturn self.x*self.x+self.y*self.y",
"def norm( self):\n return self._norm",
"def mag(self):\n return np.linalg.norm(self._vals)",
"def norm(self) -> \"Vector\":\n self.values = tuple(self/self.mag())\n return self",
"def norm2d(self) -> float:\n\n return self.v2ddict.norm2d()",
"def norm(self):\n return np.linalg.norm(self.ravel())",
"def _compute_raw_image_norm(self):\n return np.sum(self._data, dtype=float)",
"def norm(self) -> float:\n return self.squared_norm()**0.5",
"def norm2(self):\n\t\treturn self.x ** 2 + self.y ** 2 + self.z ** 2",
"def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))",
"def norm(self):\n return self.pixels.norm",
"def normsq(self):\n return sum(x**2 for x in self.data)",
"def numeric(self, values):\n return np.linalg.norm(\n values[0] - self.a) / np.linalg.norm(values[0] - self.b)",
"def norm(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n if self._dtype == complex:\n def __map(m):\n return m[2].real ** 2 + m[2].imag ** 2\n else:\n def __map(m):\n return m[2] ** 2\n\n n = self._data.map(\n __map\n ).reduce(\n lambda a, b: a + b\n )\n\n return math.sqrt(n)",
"def norm(self):\n return sqrt(self.dot(self))",
"def norm_bound(self, input_mags):\n return np.sum(input_mags)",
"def normalize(self):\n self.vector /= np.linalg.norm(self.vector)",
"def norm(self):\n return math.sqrt(self.dotProduct(self))"
] | [
"0.65737027",
"0.62713337",
"0.61886215",
"0.61312336",
"0.6103076",
"0.6101492",
"0.599665",
"0.5944298",
"0.5938198",
"0.591555",
"0.5907996",
"0.59041804",
"0.5890714",
"0.58686596",
"0.5850779",
"0.58359885",
"0.5789611",
"0.5770915",
"0.57617897",
"0.57581675",
"0.5749528",
"0.5725333",
"0.5692695",
"0.5676956",
"0.567657",
"0.5667801",
"0.56526595",
"0.5648284",
"0.56396353",
"0.5634631"
] | 0.6359628 | 1 |
Return a dataframe of the relative times at which tap events occur. | def get_tap_events(user_id: str, user_session_id: str) -> DataFrame:
full_df = pd.DataFrame()
for tap_file in tap_file_names:
columns = tap_file_important_columns[tap_file]
data = read_file(user_id, user_session_id, tap_file)
time_data = pd.DataFrame()
time_data['Start'] = data[columns[0]]
time_data['End'] = data[columns[-2]]
time_data['Type'] = tap_file_to_feature_name[tap_file]
full_df = pd.concat([full_df, time_data], ignore_index = True)
return full_df.dropna().sort_values(by = 'Start').reset_index(drop = True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_times(self):\n times = []\n for i in range(1, len(self.events)):\n times.append(self.events[i-1].elapsed_time(self.events[i]))\n return times",
"def getTimes( self ):\n\n pars\t= ( _EVENT_TIME, 0, 0, 0 )\n values = self.adbGetEvent( pars )\n return values[2]",
"def get_timings(self):\n exp=lib.is_Exposure_d8(self.hcam,7)*1E-3\n frame_rate=lib.is_SetFrameRate(self.hcam,0x8000)\n return self.AcqTimes(exp,1./frame_rate)",
"def time_stats(df):",
"def calculate_times(log):\n log['processing_time'] = 0\n log['multitasking'] = 0\n log = log.to_dict('records')\n log = sorted(log, key=lambda x: (x['source'], x['caseid']))\n for _, group in itertools.groupby(log, key=lambda x: (x['source'], x['caseid'])):\n events = list(group)\n events = sorted(events, key=itemgetter('start_timestamp'))\n for i in range(0, len(events)):\n # In one-timestamp approach the first activity of the trace\n # is taken as instantsince there is no previous timestamp\n # to find a range\n dur = (events[i]['end_timestamp'] -\n events[i]['start_timestamp']).total_seconds()\n if i == 0:\n wit = 0\n else:\n wit = (events[i]['start_timestamp'] -\n events[i-1]['end_timestamp']).total_seconds()\n events[i]['waiting_time'] = wit if wit >= 0 else 0\n events[i]['processing_time'] = dur\n return pd.DataFrame.from_dict(log)",
"def trip_duration_stats(df):",
"def getTimes():",
"def getTimes():",
"def getTimes():",
"def getElapseTimes( self ):\n\n pars\t= ( _EVENT_ELAPSED_TIME,0, 0, 0 )\n values = self.adbGetEvent( pars )\n return values[2]",
"def get_exposure_times(self):\n exposure_time = self.meta.exposure.exposure_time\n duration = self.meta.exposure.duration\n start_time = self.meta.exposure.start_time\n mid_time = self.meta.exposure.mid_time\n end_time = self.meta.exposure.end_time\n return (exposure_time, duration, start_time, mid_time, end_time)",
"def probe_times(self):\r\n probe_times = []\r\n for probe in self.__probes.values():\r\n if probe.complete():\r\n if probe.round_trip_time() > 20:\r\n \"Long probe: %s \" %self.__id\r\n probe_times.append(probe.round_trip_time())\r\n return probe_times",
"def get_timed_events(self):\n return self.dispatcher.timed_events",
"def timingColumns(self, results):\n \n pass",
"def frame(self):\n microseconds = np.array(self.results['times']) * 1e6\n return pd.DataFrame(self.results, index=microseconds)",
"def get_times():\n if f.root.stopped:\n return copy.deepcopy(f.root.times)\n else:\n t = timer()\n times = collapse.collapse_times()\n f.root.self_cut += timer() - t\n return times",
"def get_times(self):\n raise NotImplementedError(\"Abstract method not implemented.\")",
"def compute_tap_intervals(xtaps, t, threshold=20):\n import numpy as np\n\n if isinstance(xtaps, list):\n xtaps = np.asarray(xtaps)\n if isinstance(t, list):\n t = np.asarray(t)\n\n # Set time points:\n tap_times = t - t[0]\n\n # Calculate x offset:\n xtaps_offset = xtaps - np.mean(xtaps)\n\n # Find left/right finger \"press\" events:\n dx = xtaps_offset[1:] - xtaps_offset[:-1]\n ipress = np.where(np.abs(dx) > threshold)\n\n # Filter data:\n #xtaps = xtaps[ipress]\n tap_times = tap_times[ipress]\n\n # Find press event intervals:\n tap_intervals = tap_times[1:] - tap_times[:-1]\n\n return ipress, tap_intervals",
"def timings(self):\r\n return self._timings",
"def frameTimes(self):\n sr = self.sampleRate\n offset = self.activeOffset\n stride = self.activeStride\n nf = self.numFrames\n t = np.arange(nf) * (stride[0] / sr) + (offset / sr)\n return t",
"def transit_times(self):\n try:\n self._transit_times\n self.nt\n except:\n return np.array([], dtype=float)\n return self._transit_times[:self.nt]",
"def timings_in_context(samples):\n iso = isolate(samples)\n t = iso.groupby(axis=1, level=0).apply(_timing_in_context)\n t.columns = t.columns.droplevel(0)\n return t",
"def determineTimes():\r\n tm = getLocalTime()\r\n startFadeUpTime = utime.localtime(utime.mktime((tm[0], tm[1], tm[2], WAKEUP_TUPLE[0],\r\n WAKEUP_TUPLE[1] - FADE_TIME, tm[5], tm[6], tm[7])))\r\n startFadeDownTime = utime.localtime(utime.mktime((tm[0], tm[1], tm[2], WAKEUP_TUPLE[0],\r\n WAKEUP_TUPLE[1] + LIT_LENGTH, tm[5], tm[6], tm[7])))\r\n return [startFadeUpTime[3:5], startFadeDownTime[3:5]]",
"def get_time_table(self,day):\n output = []\n for link in self.data[day]:\n df = self.data[link][day]\n for row in df:\n output.append({'actualtime_arr_from':row[0],'acutaltime_arr_to':row[1],\\\n 'routeid':row[2],'link':route})\n from operator import itemgetter\n return sorted(output, key=itemgetter('actualtime_arr_from'))",
"def UTC_times(times, \n trace, \n diff_thres = 30.0):\n # set times values to seconds\n \n #AUTOMATE THIS SECTION!\n #CHECK THAT THIS IS CORRECT\n times = times / trace.stats.sampling_rate\n #remove unwanted parts of times numpy array \n times = times[:,0]\n \n #remove the first instance of time because it is \n #somehow always of the wrong format!\n #times = np.delete(times, 0) \n \n event_times = []\n event = [times[0]]\n \n start_time = trace.stats.starttime\n \n #for item in times:\n # print start_time + item\n\n for i in range(1, len(times)):\n \n # check if two events in times array have a difference < diff_thres, \n #if not, run average of those times, if so append that events to a \n #new events_times list\n \n #time_diff = times[i + 1] - times[i]\n \n time_diff = times[i] - times[i-1]\n\n #save info until events are far enough apart! \n if time_diff < diff_thres:\n\n event.append(times[i])\n \n \n #raise conditional for if events are far enough apart! \n else:\n\n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n\n event_times.append([event_start, event_end])\n \n event = [] \n \n event.append(times[i])\n\n #if event still contains something for any reason, add it to event times\n if len(event) > 0: \n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n event_times.append([event_start, event_end])\n event = [] \n \n\n\n #if len(event_times) == 0 and len(event) > 0 or time_diff > diff_thres and len(event) > 0:\n \n #event_times.append(sum(event) / len(event))\n \n # event_start = event[0] - 2 #minus 5 seconds\n # event_end = event[-1] + 2 #add 5 seconds\n \n # event_times.append([event_start, event_end])\n \n # event = []\n \n #event_times.append(times[i])\n \n # else:\n # event.append(times[i])\n \n\n UTC_events = []\n\n #earthquake length threshold is 10 seconds and above!\n eq_len = 0#5.0\n\n for i in event_times:\n estart = start_time + i[0]\n eend = start_time + i[1]\n \n if eend - estart > eq_len:\n UTC_events.append([estart, eend])\n \n #UTC_events = np.unique(np.asarray(UTC_events))\n\n \n return UTC_events",
"def get_reltriggertimes(self):\n return np.array(self.trtimes)-self.soundstarttime",
"def get_pump_times(self, start):\n pumps_dict = {}\n for pump in self.pumps:\n dataframe_ = pd.DataFrame()\n time = []\n command = []\n for i in range(len(pump.start_intervals)):\n t_on = pump.start_intervals[i].epanet_on_time\n t_off = pump.start_intervals[i].epanet_off_time\n time += [start + t_on * pd.Timedelta(\"1S\"),\n start + t_off * pd.Timedelta(\"1S\")]\n command += [1, 0]\n dataframe_['Time'] = time\n dataframe_[pump.link_id] = command\n pumps_dict[pump.link_id] = dataframe_\n return pumps_dict",
"def recorded_timestamps(self):\n return sorted(self.reception_records.keys())",
"def topairs(self):\n return list(zip(self._times, self._values))",
"def timestamps():\n timestamps = ( # Index\n 1459516622.1, # 0\n 1459516622.2, # 1\n 1459516622.3, # 2\n 1459516623.0, # 3\n 1459516623.1, # 4\n 1459516623.3, # 5\n 1459516624.0, # 6\n )\n return timestamps"
] | [
"0.6776355",
"0.64633656",
"0.6301473",
"0.610719",
"0.597576",
"0.59644234",
"0.59297544",
"0.59297544",
"0.59297544",
"0.589159",
"0.58347505",
"0.5790734",
"0.57504827",
"0.57383585",
"0.5738113",
"0.57378346",
"0.56966096",
"0.56897324",
"0.56846184",
"0.56791633",
"0.5653802",
"0.5645184",
"0.55838233",
"0.5583573",
"0.55813366",
"0.55731356",
"0.5568267",
"0.5564948",
"0.55638635",
"0.55275494"
] | 0.6853497 | 0 |
This is a step of the processing of the data from the accelerometer/gyroscope. It adds a new column for each kind of tap, indicating whether a tap of that kind is in progress. | def add_columns_for_taps(full_data: DataFrame, tap_data: DataFrame):
for tap_file in tap_file_names:
tap_type = tap_file_to_feature_name[tap_file]
data = tap_data[tap_data['Type'] == tap_type].reset_index(drop = True)
lead_file = 'Accelerometer.csv'
time_column_name = x_columns[lead_file]
data_times = full_data[time_column_name]
data_index = 0
new_column = []
for tap_index in range(data.shape[0]):
try:
while data_times[data_index] < (data['Start'][tap_index] * 1000000):
new_column.append(0) # Not in the midst of a tap
data_index += 1
if data_index >= full_data.shape[0]: break
if data_index >= full_data.shape[0]: break
new_column.append(1) # At least one value in the midst of the tap
data_index += 1
if data_index >= full_data.shape[0]: break
while data_times[data_index] < (data['End'][tap_index] * 1000000):
new_column.append(1)
data_index += 1
if data_index >= full_data.shape[0]: break
if data_index >= full_data.shape[0]: break
except KeyError:
print("Okay, here's that thing again")
return
while data_index < full_data.shape[0]:
new_column.append(0)
data_index += 1
full_data[tap_type] = new_column | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mark_tap_start_and_end(data: DataFrame, delta_in_ms: int):\n\n lead_file = 'Accelerometer.csv'\n time_col = x_columns[lead_file]\n\n delta = delta_in_ms * 1000000\n\n for tap_file in tap_file_names:\n tap_feature = tap_file_to_feature_name[tap_file]\n # Step 1: Put a 2 at the start and a 3 at the end of each event\n\n indices = data[data[tap_feature] == 1].index\n if len(indices) == 0:\n continue\n for i in range(len(indices)):\n if i == 0 or data[time_col][ indices[i] ] - data[time_col][ indices[i - 1] ] > delta:\n data[tap_feature].loc[ indices[i] ] = 2\n if i > 0:\n if data[tap_feature][ indices[i - 1] ] == 1:\n data[tap_feature].loc[ indices[i - 1] ] = 3\n elif indices[i - 1] + 1 < data.shape[0] and data[tap_feature][ indices[i - 1] + 1 ] == 0:\n # In this case, the tap lasted only one time step,\n # so we call the end of the last tap the reading after\n data[tap_feature].loc[ indices[i - 1] + 1 ] = 3\n else:\n #Hopefully this case will never occur, where two consecutive taps\n #are more than delta apart but with no readings in between\n print(\"Something seems off about this data...\")\n print(data[ indices[i] - 5 : indices[i] + 5][[time_col, tap_feature]])\n return\n\n if i == len(indices) - 1:\n # If we're at the end of the list, that must be the end of the last tap\n if data[tap_feature][ indices[i] ] == 1:\n data[tap_feature].loc[ indices[i] ] = 3\n elif indices[i] + 1 < data.shape[0]:\n data[tap_feature].loc[ indices[i] + 1] = 3\n else:\n data[tap_feature].loc[ indices[i] ] = 0 # Remove the miscreant\n print(\"There's an issue with a tap at the very last point of the data...\")\n\n if sum(data[data[tap_feature] == 2][tap_feature]) * 3 != sum(data[data[tap_feature] == 3][tap_feature]) * 2:\n print(\"Uh oh, we placed an unbalanced number of 2's and 3's. Thanos would be disappointed.\")\n \n\n # Step 2: Put a 4 at the start of the \"before\" window\n # and a 5 at the end of the \"after\" window\n\n start_indices = data[data[tap_feature] == 2].index\n end_indices = data[data[tap_feature] == 3].index\n if len(start_indices) != len(end_indices):\n print(\"Impossible.\")\n\n #We should be able to get a half_delta on either side of\n #each window\n half_delta = delta // 2\n\n\n for i in range(len(start_indices)):\n find_index_before = start_indices[i]\n range_min = data[time_col][ start_indices[i] ] - half_delta\n while find_index_before > 0 and data[time_col][find_index_before] > range_min \\\n and data[tap_feature][find_index_before - 1] < 2:\n find_index_before -= 1\n if data[tap_feature][find_index_before] == 0:\n data[tap_feature].loc[find_index_before] = 4\n elif data[tap_feature][find_index_before] == 5 and data[tap_feature][find_index_before + 1] == 0:\n # Keep our windows from overlapping - don't put the start of one on\n # top of the end of the previous\n data[tap_feature].loc[find_index_before + 1] = 4\n elif find_index_after == 0 and data[tap_feature][find_index_after + 1] == 0:\n # If we're at the start of the interval, shift what was there forward one\n data[tap_feature].loc[find_index_after + 1] = data[tap_feature].loc[find_index_after]\n data[tap_feature].loc[find_index_after] = 4\n elif find_index_before == start_indices[i] and data[tap_feature][find_index_before - 1] == 5 \\\n and find_index_before >= 2 and data[tap_feature][find_index_before - 2] < 2:\n data[tap_feature].loc[find_index_before - 2] = 5\n data[tap_feature].loc[find_index_before - 1] = 4\n else:\n # The most likely case is that we hit the beginning or end of the\n # interval, in which case we should probably just throw the point out\n print(\"Oh no, that's pretty weird: \", data[tap_feature][find_index_before], find_index_before, start_indices[i])\n \n\n find_index_after = end_indices[i]\n range_max = data[time_col][ end_indices[i] ] + half_delta\n while find_index_after + 1 < data.shape[0] and data[time_col][find_index_after] < range_max \\\n and data[tap_feature][find_index_after + 1] < 2:\n find_index_after += 1\n if data[tap_feature][find_index_after] == 0:\n data[tap_feature].loc[find_index_after] = 5\n elif find_index_after == data.shape[0] - 1 and data[tap_feature][find_index_after - 1] == 0:\n # If we're at the end of the interval, shift what was there back one\n data[tap_feature].loc[find_index_after - 1] = data[tap_feature].loc[find_index_after]\n data[tap_feature].loc[find_index_after] = 5\n elif find_index_after == end_indices[i] and data[tap_feature][find_index_after + 1] < 2:\n data[tap_feature].loc[find_index_before + 1] = 5\n else:\n # See above comment\n print(\"Oh no, that's REALLY weird\", find_index_after, data[tap_feature])",
"def feature_list(user_id: str, session: str, tap_feature: str, task_name: str, window: DataFrame):\n if window.shape[0] == 0:\n return None\n #Add user ID, session, task name\n features = [user_id, session, task_name]\n\n #Add orientation\n orientation = mode(window['Phone_orientation_accel'])\n features.append(orientation)\n\n #Add tap type\n features.append(tap_feature)\n\n lead_file = 'Accelerometer.csv'\n\n time_col = x_columns[lead_file]\n\n before_start = window[window[tap_feature] == 4].index[0]\n during_start = window[window[tap_feature] == 2].index[0]\n after_start = window[window[tap_feature] == 3].index[0] + 1\n after_end = window[window[tap_feature] == 5].index[0]\n\n before = window.loc[before_start : during_start]\n during = window.loc[during_start : after_start]\n after = window.loc[after_start : after_end + 1]\n\n if during.shape[0] < 2:\n # If there were none or one measurements during the tap,\n # add the closest ones\n during = window[during_start - 1 : after_start + 1]\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n # Feature 1: Mean during\n mean_during = mean(during[y])\n\n # Feature 2: SD during\n sd_during = sd(during[y])\n\n # Feature 3: Difference before/after\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n difference_before_after = mean_after - mean_before\n\n # Feature 4: Net change from tap\n net_change_due_to_tap = mean_during - mean_before\n\n # Feature 5: Maximal change from tap\n max_tap = max(during[y])\n max_change = max_tap - mean_before\n\n # Feature 6: Restoration time\n avgDiffs = []\n for j in range(after[y].shape[0]):\n subsequentValues = after[y].iloc[j:]\n subsequentDistances = subsequentValues.map(lambda x: abs(x - mean_before))\n averageDistance = mean(subsequentDistances)\n avgDiffs.append(averageDistance)\n time_of_earliest_restoration = min(avgDiffs)\n restoration_time = time_of_earliest_restoration - during[time_col].iloc[-1]\n\n # Feature 7: Normalized duration\n t_before_center = (before[time_col].iloc[0] + before[time_col].iloc[-1]) / 2 \n t_after_center = (after[time_col].iloc[0] + after[time_col].iloc[-1]) / 2\n normalized_duration = (t_after_center - t_before_center) / (mean_after - mean_before)\n \n # Feature 8: Ndormalized duration max\n t_max_in_tap = during[during[y] == max_tap][time_col].iloc[0]\n normalized_duration_max = (t_after_center - t_max_in_tap) / (mean_after - max_tap)\n\n\n features += [mean_during, sd_during, difference_before_after,\n net_change_due_to_tap, max_change, restoration_time,\n normalized_duration, normalized_duration_max]\n\n if random.choice(range(100))== 0:\n plot_tap('Plots/Project/' + session, before, during, after, time_col)\n \n return features",
"def _handleSensorDataAnalysis(self, data: SensorData):\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The _handleSensorDataAnalysis method is being called\")\n\t\t\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tif self.enableHandleTempChangeOnDevice and data.getSensorType() == SensorData.TEMP_SENSOR_TYPE:\n\n\t\t\tad = ActuatorData(actuatorType = ActuatorData.HVAC_ACTUATOR_TYPE)\n\t\t\tvalue = data.getValue()\n\t\t\tif value >= self.triggerHvacTempFloor and value <= self.triggerHvacTempCeiling:\n\t\t\t\tad.setCommand(ActuatorData.COMMAND_OFF)\n\t\t\telse:\n\t\t\t\tad.setCommand(ActuatorData.COMMAND_ON)\n\t\t\t\n\t\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)\n\t\t\t\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tif self.enableHandleSoilHumidityChangeOnDevice and data.getSensorType() == SensorData.SOIL_HUMIDITY_SENSOR_TYPE:\n\t\t\t\n\t\t\tad = ActuatorData(actuatorType = ActuatorData.SPRINKLER_ACTUATOR_TYPE)\n\t\t\tvalue = data.getValue()\n\t\t\tif value >= self.triggerWaterDeviceHumiCeiling: \n\t\t\t\tad.setCommand(ActuatorData.COMMAND_OFF)\n\t\t\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)\n\t\t\telif value <= self.triggerWaterDeviceHumiFloor:\n\t\t\t\tad.setCommand(ActuatorData.COMMAND_ON)\n\t\t\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)\n\t\t\t\tself.coapClient.sendGetRequest(ResourceNameEnum.CDA_ACTUATOR_CMD_RESOURCE, False, 5)\n\t\t\telse:\n\t\t\t\tself.coapClient.sendGetRequest(ResourceNameEnum.CDA_CLOUD_ACTUATOR_CMD_RESOURCE, False, 5)",
"def _store_rows(self):\n\n for value in self.values:\n self.counters.append(value['counter'])\n self.timestamps.append(value['timestamp'])\n self.acceleration.append(value['acceleration'])",
"def _on_packet_imu_accelerometer(self, packet):\n\n try:\n packet_dict = self.get_packet_dict(packet)\n if not packet_dict:\n return\n except (ValueError, TypeError) as e:\n return\n\n self.last_accelerometer = packet_dict\n\n self.received_imu = True",
"def compute_tap_features(xtaps, ytaps, t, threshold=20):\n import numpy as np\n\n from mhealthx.extractors.tapping import compute_drift, \\\n compute_tap_intervals, compute_intertap_gap\n from mhealthx.extractors.tapping import TapFeatures as T\n from mhealthx.signals import signal_features\n\n if isinstance(xtaps, list):\n xtaps = np.array(xtaps)\n if isinstance(ytaps, list):\n ytaps = np.array(ytaps)\n if isinstance(t, list):\n t = np.array(t)\n\n # Intertap intervals:\n ipress, intervals = compute_tap_intervals(xtaps, t, threshold)\n\n # Filter data:\n t = t[ipress]\n xtaps = xtaps[ipress]\n ytaps = ytaps[ipress]\n\n # Delta between fastest and slowest intertap intervals:\n T.intertap_gap10, T.intertap_gap25, \\\n T.intertap_gap50 = compute_intertap_gap(intervals)\n\n # Left and right taps and drift:\n mean_x = np.mean(xtaps)\n iL = np.where(xtaps < mean_x)\n iR = np.where(xtaps >= mean_x)\n xL = xtaps[iL]\n yL = ytaps[iL]\n xR = xtaps[iR]\n yR = ytaps[iR]\n driftL = compute_drift(xL, yL)\n driftR = compute_drift(xR, yR)\n\n # Number of taps:\n T.num_taps = xtaps.size\n T.num_taps_left = xL.size\n T.num_taps_right = xR.size\n\n # Time:\n T.time_rng = t[-1] - t[0]\n\n # Intertap interval statistics:\n T.intertap_num, T.intertap_min, T.intertap_max, T.intertap_rng, \\\n T.intertap_avg, T.intertap_std, T.intertap_med, T.intertap_mad, \\\n T.intertap_kurt, T.intertap_skew, T.intertap_cvar, T.intertap_lower25, \\\n T.intertap_upper25, T.intertap_inter50, T.intertap_rms, \\\n T.intertap_entropy, T.intertap_tk_energy = signal_features(intervals)\n\n # Tap statistics:\n T.xL_num, T.xL_min, T.xL_max, T.xL_rng, T.xL_avg, T.xL_std, \\\n T.xL_med, T.xL_mad, T.xL_kurt, T.xL_skew, T.xL_cvar, \\\n T.xL_lower25, T.xL_upper25, T.xL_inter50, T.xL_rms, \\\n T.xL_entropy, T.xL_tk_energy = signal_features(xL)\n\n T.xR_num, T.xR_min, T.xR_max, T.xR_rng, T.xR_avg, T.xR_std, \\\n T.xR_med, T.xR_mad, T.xR_kurt, T.xR_skew, T.xR_cvar, \\\n T.xR_lower25, T.xR_upper25, T.xR_inter50, T.xR_rms, \\\n T.xR_entropy, T.xR_tk_energy = signal_features(xR)\n\n # T.yL_num, T.yL_min, T.yL_max, T.yL_rng, T.yL_avg, T.yL_std, \\\n # T.yL_med, T.yL_mad, T.yL_kurt, T.yL_skew, T.yL_cvar, \\\n # T.yL_lower25, T.yL_upper25, T.yL_inter50, T.yL_rms, \\\n # T.yL_entropy, T.yL_tk_energy = signal_features(yL)\n\n # T.yR_num, T.yR_min, T.yR_max, T.yR_rng, T.yR_avg, T.yR_std, \\\n # T.yR_med, T.yR_mad, T.yR_kurt, T.yR_skew, T.yR_cvar, \\\n # T.yR_lower25, T.yR_upper25, T.yR_inter50, T.yR_rms, \\\n # T.yR_entropy, T.yR_tk_energy = signal_features(yR)\n\n # Drift statistics:\n T.driftL_num, T.driftL_min, T.driftL_max, T.driftL_rng, T.driftL_avg, \\\n T.driftL_std, T.driftL_med, T.driftL_mad, T.driftL_kurt, T.driftL_skew, \\\n T.driftL_cvar, T.driftL_lower25, T.driftL_upper25, T.driftL_inter50, \\\n T.driftL_rms, T.driftL_entropy, T.driftL_tk_energy = \\\n signal_features(driftL)\n\n T.driftR_num, T.driftR_min, T.driftR_max, T.driftR_rng, T.driftR_avg, \\\n T.driftR_std, T.driftR_med, T.driftR_mad, T.driftR_kurt, T.driftR_skew, \\\n T.driftR_cvar, T.driftR_lower25, T.driftR_upper25, T.driftR_inter50, \\\n T.driftR_rms, T.driftR_entropy, T.driftR_tk_energy = \\\n signal_features(driftR)\n\n return T",
"def check(self):\n basic_recognized = 1\n # Scan the array, in order to check if the primitives are recognized correctly\n for frame in self.data_array:\n hmm_name = frame.best_log_probability[0]\n\n if str(basic_recognized) in hmm_name or str(basic_recognized+1) in hmm_name:\n if str(basic_recognized+1) in hmm_name:\n basic_recognized+=1\n else:\n return False\n # Has been recognized the complete gesture? If yes return true else false\n if basic_recognized == self.n_primitives+1:\n return True",
"def _process_data(self):\r\n # Rename columns to match final feature class\r\n self._rename_columns()\r\n # Add point ID column\r\n self._add_pointid()\r\n # Sort rows by transect id and timestamp\r\n self._sort_rows()\r\n # Fill Null records with a value\r\n self._fill_nulls()\r\n # Set site_code to lower case\r\n self._lower_site_code()\r\n # Create survey_id\r\n self._calc_survey_id()\r\n # Calculate nativesg column if at least one of the veg columns is a Native seagrass type\r\n if set(self.veg_columns).intersection(set(NATIVESG_CODES)) > 0:\r\n self.nativesg_columns = list(set(self.veg_columns).intersection(set(NATIVESG_CODES)))\r\n self._calc_nativesg()\r\n #\r",
"def run_tapas(data, queries):\n tokenizer, model = load_model_and_tokenizer()\n table, inputs = prepare_inputs(data, queries, tokenizer)\n predicted_table_cell_coords, predicted_aggregation_operators = generate_predictions(inputs, model, tokenizer)\n aggregation_predictions_string, answers = postprocess_predictions(predicted_aggregation_operators,\n predicted_table_cell_coords, table)\n ans_list = show_answers(queries, answers, aggregation_predictions_string)\n\n print(ans_list)",
"def __process_health(self) -> None:\n status = self.metrics.get(\"Status\", None)\n if status:\n health = status.get(\"Health\", None)\n measurement = \"Health\"\n if health == \"Warning\":\n value = 1\n datapoint = self.__gen_datapoint(measurement, self.label, value)\n self.datapoints.append(datapoint)\n elif health == \"Critical\":\n value = 2\n datapoint = self.__gen_datapoint(measurement, self.label, value)\n self.datapoints.append(datapoint)\n return",
"def _add_READING(self, w2, row):\n assert self.variant_unit, \"Can't call add_READING if self.variant_unit is None\"\n row['READING'] = self.get_attestation(w2, self.variant_unit)\n return True",
"def timingColumns(self, results):\n \n pass",
"def tracking(self) -> None:\n dist, delta_angle, timestamp = self.vision.get_vision_data()\n # collect data only once per loop\n if timestamp is None:\n # self.next_state(\"searching\")\n # print(f\"tracking -> searching {self.vision.get_vision_data()}\")\n self.state = self.searching\n else:\n if abs(delta_angle) > self.find_allowable_angle(dist):\n # print(f\"Telling turret to slew by {delta_angle}\")\n self.turret.slew(delta_angle)\n if self.ready_to_spin():\n # self.next_state(\"firing\")\n # print(f\"tracking -> spining_up {self.vision.get_vision_data()}\")\n self.distance = dist\n self.state = self.spining_up",
"def _track_data_statistics(self, info_l, last_info, episode_len,\n all_stats, maxlen_stats):\n maxlen = get_max_episode_len(self.path)\n start = info_l[0]['extras']\n last_ex = last_info['extras']\n\n if 'cable-shape' in self.path or 'cable-line-notarget' in self.path:\n nb_sides = start['nb_sides']\n frac_beads = last_ex['nb_zone'] / last_ex['nb_beads']\n if episode_len == maxlen:\n maxlen_stats[f'done_{nb_sides}'].append( last_ex['task.done'] )\n maxlen_stats[f'frac_{nb_sides}'].append( frac_beads )\n all_stats[f'done_{nb_sides}'].append( last_ex['task.done'] )\n all_stats[f'frac_{nb_sides}'].append( frac_beads )\n all_stats[f'len_{nb_sides}'].append( episode_len )\n\n elif 'cable-ring' in self.path:\n delta = last_ex['fraction'] - start['fraction']\n percent = last_ex['convex_hull_area'] - start['convex_hull_area']\n percent = 100 * percent / start['convex_hull_area']\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['fraction'].append( last_ex['fraction'] )\n maxlen_stats['fraction_delta'].append( delta )\n maxlen_stats['percent_improve'].append( percent )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['fraction'].append( last_ex['fraction'] )\n all_stats['fraction_delta'].append( delta )\n all_stats['percent_improve'].append( percent )\n\n elif 'cloth-flat' in self.path:\n delta = last_ex['cloth_coverage'] - start['cloth_coverage']\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['coverage_delta'].append( delta )\n maxlen_stats['cloth_coverage'].append( last_ex['cloth_coverage'] )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['coverage_delta'].append( delta )\n all_stats['cloth_coverage'].append( last_ex['cloth_coverage'] )\n\n elif 'cloth-cover' in self.path:\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n\n elif 'bag-alone-open' in self.path:\n delta = last_ex['fraction'] - start['fraction']\n percent = last_ex['convex_hull_area'] - start['convex_hull_area']\n percent = 100 * percent / start['convex_hull_area']\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['fraction'].append( last_ex['fraction'] )\n maxlen_stats['fraction_delta'].append( delta )\n maxlen_stats['percent_improve'].append( percent )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['fraction'].append( last_ex['fraction'] )\n all_stats['fraction_delta'].append( delta )\n all_stats['percent_improve'].append( percent )\n\n elif 'bag-items-easy' in self.path or 'bag-items-hard' in self.path:\n # For this it'd be interesting to see what task stage we're at.\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['task_stage'].append( last_ex['task_stage'] )\n maxlen_stats['zone_items_rew'].append( last_ex['zone_items_rew'] )\n maxlen_stats['zone_beads_rew'].append( last_ex['zone_beads_rew'] )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['task_stage'].append( last_ex['task_stage'] )\n all_stats['zone_items_rew'].append( last_ex['zone_items_rew'] )\n all_stats['zone_beads_rew'].append( last_ex['zone_beads_rew'] )\n\n elif 'bag-color-goal' in self.path:\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['task_stage'].append( last_ex['task_stage'] )\n maxlen_stats['frac_in_target_bag'].append( last_ex['frac_in_target_bag'] )\n maxlen_stats['frac_in_distract_bag'].append( last_ex['frac_in_distract_bag'] )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['task_stage'].append( last_ex['task_stage'] )\n all_stats['frac_in_target_bag'].append( last_ex['frac_in_target_bag'] )\n all_stats['frac_in_distract_bag'].append( last_ex['frac_in_distract_bag'] )\n\n else:\n print(f'For: {self.path}, we are not tracking extra stats.')",
"def separate_activity_types(self):\n # Read in the CSV file and make a DataFrame.\n try :\n all_actsDF = pd.read_csv('strava-activities.csv', index_col=\"id\", parse_dates=[\"start_date\", \"start_date_local\"])\n except FileNotFoundError :\n print(\"separate_activity_types couldn't find strava-activities.csv.\")\n else :\n # We need to make sure that all_actsDF has all of the columns that are referenced\n # in the loop below. Otherwise, the code might throw a key error. For example, if someone\n # has no heart rate data at all, stava-activities.csv won't have a max_heartrate column,\n # causing the code to blow up when it looks for that column. So just add empty columns\n # as needed.\n necessary_columns = [\"distance\", \"total_elevation_gain\", \"elapsed_time\", \"moving_time\", \"max_speed(mph)\", \"max_speed(kph)\", \"start_date\", \"elevation_gain(ft)\", \"max_heartrate\"]\n for col in necessary_columns :\n if not col in all_actsDF.columns :\n all_actsDF[col] = np.nan\n\n # Get the list of unique activity types (Ride, Hike, Kayak, etc.)\n act_types = all_actsDF[\"type\"].unique()\n # Get the list of unique years in the data.\n # Extract each year out of the data and sort them.\n years = pd.Series(d.year for d in all_actsDF[\"start_date\"]).unique()\n years.sort()\n\n # Create a dataframe that will hold summary statistics for each activity.\n # The index or the set of rows is the activity types. The columns are the stats\n # we are interested in.\n stats = [\"Total Distance (miles)\", \"Total Distance (km)\", \"Total Elev. Gain (meters)\", \"Total Elev. Gain (ft)\", \"Total Elev. Gain (miles)\", \"Total Elev. Gain (km)\", \"Total Duration (hours)\", \"Total Duration (days)\", \"Average Duration (min)\", \"Total Moving Time (hours)\", \"Total Moving Time (days)\", \"Average Moving Time (min)\", \"Average Speed (mph)\", \"Average Speed (kph)\", \"Max Speed (mph)\", \"Max Speed (kph)\", \"Max Speed Date\", \"Max Elevation Gain(ft)\", \"Max Elevation Gain(m)\", \"Max Elevation Gain Date\", \"Max Heart Rate\", \"Max HR Date\"]\n summaryDF = pd.DataFrame(index=act_types, columns=stats)\n # Loop through all of the activity types and add info into the summary file.\n # Also create a csv for each activity that has the Strava info for that activity only.\n for act in act_types:\n actDF = all_actsDF[all_actsDF[\"type\"] == act]\n actDF.to_csv(act + \".csv\")\n # Add the summary stats\n summaryDF.loc[act, \"Total Distance (miles)\"] = actDF[\"distance\"].sum() * 0.000621371\n summaryDF.loc[act, \"Total Distance (km)\"] = actDF[\"distance\"].sum() / 1000\n summaryDF.loc[act, \"Total Elev. Gain (meters)\"] = actDF[\"total_elevation_gain\"].sum()\n summaryDF.loc[act, \"Total Elev. Gain (ft)\"] = actDF[\"total_elevation_gain\"].sum() * 3.28084\n summaryDF.loc[act, \"Total Elev. Gain (miles)\"] = actDF[\"total_elevation_gain\"].sum() * 3.28084/5280\n summaryDF.loc[act, \"Total Elev. Gain (km)\"] = actDF[\"total_elevation_gain\"].sum() / 1000\n summaryDF.loc[act, \"Total Duration (hours)\"] = actDF[\"elapsed_time\"].sum() / 3600\n summaryDF.loc[act, \"Total Duration (days)\"] = actDF[\"elapsed_time\"].sum() / (3600*24)\n summaryDF.loc[act, \"Average Duration (min)\"] = actDF[\"elapsed_time\"].mean() / 60\n summaryDF.loc[act, \"Total Moving Time (hours)\"] = actDF[\"moving_time\"].sum() / 3600\n summaryDF.loc[act, \"Total Moving Time (days)\"] = actDF[\"moving_time\"].sum() / (3600*24)\n summaryDF.loc[act, \"Average Moving Time (min)\"] = actDF[\"moving_time\"].mean() / 60\n summaryDF.loc[act, \"Average Speed (mph)\"] = (actDF[\"distance\"].sum() / actDF[\"moving_time\"].sum()) * 2.23694\n summaryDF.loc[act, \"Average Speed (kph)\"] = (actDF[\"distance\"].sum() / actDF[\"moving_time\"].sum()) * 3.6\n summaryDF.loc[act, \"Max Speed (mph)\"] = actDF[\"max_speed(mph)\"].max()\n summaryDF.loc[act, \"Max Speed (kph)\"] = actDF[\"max_speed(kph)\"].max()\n # We have to be careful anytime we want a specific date that something occured because\n # it may never have occurred and the result may be empty. That's why we do the following\n # five lines.\n s = actDF.loc[actDF[\"max_speed(mph)\"] == actDF[\"max_speed(mph)\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max Speed Date\"] = s.iloc[0].date()\n else :\n summaryDF.loc[act, \"Max Speed Date\"] = None\n summaryDF.loc[act, \"Max Elevation Gain(ft)\"] = actDF[\"elevation_gain(ft)\"].max()\n summaryDF.loc[act, \"Max Elevation Gain(m)\"] = actDF[\"total_elevation_gain\"].max()\n s = actDF.loc[actDF[\"elevation_gain(ft)\"] == actDF[\"elevation_gain(ft)\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max Elevation Gain Date\"] = s.iloc[0].date()\n else :\n summaryDF.loc[act, \"Max Elevation Gain Date\"] = None\n summaryDF.loc[act, \"Max Heart Rate\"] = actDF[\"max_heartrate\"].max()\n # We have to be careful with max heart rate because not all activities will have HR data.\n # The following code makes sure there is HR data before trying to access it.\n s = actDF.loc[actDF[\"max_heartrate\"] == actDF[\"max_heartrate\"].max(), \"start_date\"]\n if not s.empty :\n summaryDF.loc[act, \"Max HR Date\"] = s.iloc[0].date()\n else:\n summaryDF.loc[act, \"Max HR Date\"] = None\n\n # Summarize each activity by year\n act_summaryDF = pd.DataFrame(index=stats, columns = years)\n for y in years :\n subDF = actDF[(actDF[\"start_date\"] >= datetime.datetime(year = y, month = 1, day = 1, tzinfo=pytz.utc)) & (actDF[\"start_date\"] < datetime.datetime(year = y+1, month = 1, day = 1, tzinfo=pytz.utc))]\n # Need to check that we had any of this activity in the year.\n if not subDF.empty :\n act_summaryDF.loc[\"Total Distance (miles)\", y] = subDF[\"distance\"].sum() * 0.000621371\n act_summaryDF.loc[\"Total Distance (km)\", y] = subDF[\"distance\"].sum() / 1000\n act_summaryDF.loc[\"Total Elev. Gain (meters)\", y] = subDF[\"total_elevation_gain\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (ft)\", y] = subDF[\"total_elevation_gain\"].sum() * 3.28084\n act_summaryDF.loc[\"Total Elev. Gain (miles)\", y] = subDF[\"total_elevation_gain\"].sum() * 3.28084/5280\n act_summaryDF.loc[\"Total Elev. Gain (km)\", y] = subDF[\"total_elevation_gain\"].sum() / 1000\n act_summaryDF.loc[\"Total Duration (hours)\", y] = subDF[\"elapsed_time\"].sum() / 3600\n act_summaryDF.loc[\"Total Duration (days)\", y] = subDF[\"elapsed_time\"].sum() / (3600*24)\n act_summaryDF.loc[\"Average Duration (min)\", y] = subDF[\"elapsed_time\"].mean() / 60\n act_summaryDF.loc[\"Total Moving Time (hours)\", y] = subDF[\"moving_time\"].sum() / 3600\n act_summaryDF.loc[\"Total Moving Time (days)\", y] = subDF[\"moving_time\"].sum() / (3600*24)\n act_summaryDF.loc[\"Average Moving Time (min)\", y] = subDF[\"moving_time\"].mean() / 60\n act_summaryDF.loc[\"Average Speed (mph)\", y] = (subDF[\"distance\"].sum() / subDF[\"moving_time\"].sum()) * 2.23694\n act_summaryDF.loc[\"Average Speed (kph)\", y] = (subDF[\"distance\"].sum() / subDF[\"moving_time\"].sum()) * 3.6\n act_summaryDF.loc[\"Max Speed (mph)\", y] = subDF[\"max_speed(mph)\"].max()\n act_summaryDF.loc[\"Max Speed (kph)\", y] = subDF[\"max_speed(kph)\"].max()\n s = subDF.loc[subDF[\"max_speed(mph)\"] == subDF[\"max_speed(mph)\"].max(), \"start_date\"]\n if not s.empty:\n act_summaryDF.loc[\"Max Speed Date\", y] = s.iloc[0].date()\n else :\n act_summaryDF.loc[\"Max Speed Date\", y] = None\n\n act_summaryDF.loc[\"Max Elevation Gain(ft)\", y] = subDF[\"elevation_gain(ft)\"].max()\n act_summaryDF.loc[\"Max Elevation Gain(m)\", y] = subDF[\"total_elevation_gain\"].max()\n s = subDF.loc[subDF[\"elevation_gain(ft)\"] == subDF[\"elevation_gain(ft)\"].max(), \"start_date\"]\n if not s.empty :\n act_summaryDF.loc[\"Max Elevation Gain Date\", y] = s.iloc[0].date()\n else :\n act_summaryDF.loc[\"Max Elevation Gain Date\", y] = None\n act_summaryDF.loc[\"Max Heart Rate\", y] = subDF[\"max_heartrate\"].max()\n s = subDF.loc[subDF[\"max_heartrate\"] == subDF[\"max_heartrate\"].max(), \"start_date\"]\n if not s.empty :\n act_summaryDF.loc[\"Max HR Date\", y] = s.iloc[0].date()\n else:\n act_summaryDF.loc[\"Max HR Date\", y] = None\n # Add a few totals\n act_summaryDF.loc[\"Total Distance (miles)\", \"Total\"] = act_summaryDF.loc[\"Total Distance (miles)\"].sum()\n act_summaryDF.loc[\"Total Distance (km)\", \"Total\"] = act_summaryDF.loc[\"Total Distance (km)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (meters)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (meters)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (ft)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (ft)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (miles)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (miles)\"].sum()\n act_summaryDF.loc[\"Total Elev. Gain (km)\", \"Total\"] = act_summaryDF.loc[\"Total Elev. Gain (km)\"].sum()\n act_summaryDF.loc[\"Total Duration (hours)\", \"Total\"] = act_summaryDF.loc[\"Total Duration (hours)\"].sum()\n act_summaryDF.loc[\"Total Duration (days)\", \"Total\"] = act_summaryDF.loc[\"Total Duration (days)\"].sum()\n\n act_summaryDF.loc[\"Average Duration (min)\", \"Total\"] = summaryDF.loc[act, \"Average Duration (min)\"]\n act_summaryDF.loc[\"Total Moving Time (hours)\", \"Total\"] = act_summaryDF.loc[\"Total Moving Time (hours)\"].sum()\n act_summaryDF.loc[\"Total Moving Time (days)\", \"Total\"] = act_summaryDF.loc[\"Total Moving Time (days)\"].sum()\n act_summaryDF.loc[\"Average Moving Time (min)\", \"Total\"] = summaryDF.loc[act, \"Average Moving Time (min)\"]\n act_summaryDF.loc[\"Average Speed (mph)\", \"Total\"] = summaryDF.loc[act, \"Average Speed (mph)\"]\n act_summaryDF.loc[\"Average Speed (kph)\", \"Total\"] = summaryDF.loc[act, \"Average Speed (kph)\"]\n act_summaryDF.loc[\"Max Speed (mph)\", \"Total\"] = act_summaryDF.loc[\"Max Speed (mph)\"].max()\n act_summaryDF.loc[\"Max Speed (kph)\", \"Total\"] = act_summaryDF.loc[\"Max Speed (kph)\"].max()\n act_summaryDF.loc[\"Max Speed Date\", \"Total\"] = summaryDF.loc[act, \"Max Speed Date\"]\n act_summaryDF.loc[\"Max Elevation Gain(ft)\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain(ft)\"]\n act_summaryDF.loc[\"Max Elevation Gain(m)\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain(m)\"]\n act_summaryDF.loc[\"Max Elevation Gain Date\", \"Total\"] = summaryDF.loc[act, \"Max Elevation Gain Date\"]\n act_summaryDF.loc[\"Max Heart Rate\", \"Total\"] = summaryDF.loc[act, \"Max Heart Rate\"]\n act_summaryDF.loc[\"Max HR Date\", \"Total\"] = summaryDF.loc[act, \"Max HR Date\"]\n\n # Print the annual summary\n act_summaryDF.to_csv(act + \"-by-year.csv\")\n\n # Print the summary to a csv\n\n summaryDF.to_csv(\"strava-summary.csv\")",
"def _add_accelerations_to_df(self, route_df, a_prof):\n # print(route_df.head())\n accelerations = self._calculate_acceleration(route_df, a_prof)\n\n #Assign acceleration values to new row in route DataFrame.\n route_df = route_df.assign(\n acceleration=accelerations\n )\n\n return route_df",
"def extract_sensors_data(dataframe, ms_column='ms_ticker',\n time_column = 'Tstamp',\n ppg_columns=['led_1', 'led_2'],\n acc_columns=['acc_x', 'acc_y', 'acc_z']):\n\n sensors_dict = {}\n sensors = dataframe.loc[1:, 1:]\n sensors_columns = dataframe.head(1).values[0]\n sensors_columns = [i.replace(\" \", \"\") for i in sensors_columns if i.find('Index') == -1]\n sensors.columns = sensors_columns\n check_columns_exist(ppg_columns, sensors_columns)\n check_columns_exist(acc_columns, sensors_columns)\n check_columns_exist(ms_column, sensors_columns)\n check_columns_exist(time_column, sensors_columns)\n ppg = np.array(sensors[ppg_columns].values[1:, :], dtype=int)\n ms = np.array(sensors[ms_column].values[1:, ])\n ms_ints = np.array([int(str(i)[-3:]) for i in ms], dtype=float)\n ms_delta = [datetime.timedelta(milliseconds=i) for i in ms_ints]\n\n time = dataframe.loc[:,1].values[1:]\n time = np.array([pd.to_datetime(i) for i in time])\n time_with_ms = np.array(ms_delta) + time\n\n sensors_dict['PPG'] = ppg\n sensors_dict['time_sensors'] = time_with_ms.astype('datetime64[us]')\n sensors_dict['ms_ticker_sensors'] = ms\n acc = np.array(sensors[acc_columns].values[1:, :], dtype=float)\n sensors_dict['ACC'] = acc\n\n return sensors_dict",
"def plot_tap(file: str, before: DataFrame, during: DataFrame, after: DataFrame, time_col: str):\n\n print(\"Making plots at time \" + str(before[time_col].iloc[0]))\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n ax = before.plot(time_col, y, kind = 'scatter', color = 'blue', label = 'Before Tap')\n after.plot(time_col, y, kind = 'scatter', color = 'red', label = 'After Tap', ax = ax)\n during.plot(time_col, y, kind = 'scatter', color = 'black', label = 'During Tap', ax = ax)\n plt.axes(ax)\n plt.xlabel('Event Time')\n plt.ylabel(y)\n\n min_x = before[time_col].iloc[0] - (before[time_col].iloc[1] - before[time_col].iloc[0]) * 50\n min_y = min([min(during[y]), min(before[y]), min(after[y])])\n # Mark the mean during tap event (Feature 1)\n mean_during = mean(during[y])\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n plt.hlines(y = mean_during, xmin = min_x, xmax = during[time_col].iloc[-1], linestyle='dashed', \\\n color='black')\n plt.annotate(xy = (min_x, mean_during), s = 'avgDuringTap')\n # Mark the mean before\n plt.hlines(y = mean_before, xmin = min_x, xmax = before[time_col].iloc[-1], linestyle='dashed', \\\n color='blue')\n plt.annotate(xy = (min_x, mean_before), s = 'avg100msBefore')\n # Mark the mean after\n plt.hlines(y = mean_after, xmin = min_x, xmax = after[time_col].iloc[-1], linestyle='dashed', \\\n color='red')\n plt.annotate(xy = (min_x, mean_after), s = 'avg100msAfter')\n\n plt.legend()\n\n plt.savefig(file+'_'+y+'_time_'+str(before[time_col].iloc[0]) + '.png')\n\n plt.close()",
"def add_apple_data_to_activities(self):\n\n try:\n # apple data is loaded from csv rather than from json\n apple_data = self.load_apple_workouts()\n\n # filter out nike and strava data that has synced to apple, we are getting that from json source\n apple_data = apple_data[(apple_data.sourceName != \"Nike Run Club\") & (apple_data.sourceName != \"Strava\")]\n\n # set up 5 key metrics\n # note we're using enum values\n apple_data['source'] = ActivitySource.APPLE.value\n apple_data['activity_type'] = apple_data['workoutActivityType'].apply(lambda x: self.convert_apple_activity_type(x).value)\n apple_data['distance_in_km'] = apple_data['totalDistance']\n apple_data['duration_in_min'] = apple_data['duration']\n apple_data['start_timestamp'] = apple_data['startDate'].apply(lambda x: parse(x, tzinfos={\"America/Vancouver\"}))\n\n # filter out extraneous columns\n apple_data = apple_data.filter(self.data_frame_columns)\n self.all_activities = self.all_activities.append(apple_data, sort=True, ignore_index=True)\n\n logging.info(\"Done parsing Apple data.\")\n except Exception:\n logging.exception(\"Could not parse Apple data\")",
"def run():\n\n import matplotlib.pyplot as plt\n\n anomalies_t = []\n anomalies_v = []\n anomalies_c = []\n\n all_t = []\n all_v = []\n\n rows = []\n for i, row in dataSet.iterrows():\n\n inputData = row.to_dict()\n\n detectorValues = handleRecord(inputData)\n\n if (detectorValues[0] > 0.65):\n anomalies_t.append(inputData[\"timestamp\"])\n anomalies_v.append(inputData[\"value\"])\n anomalies_c.append(detectorValues[0])\n\n all_t.append(inputData[\"timestamp\"])\n all_v.append(inputData[\"value\"])\n\n outputRow = list(row) + list(detectorValues)\n\n rows.append(outputRow)\n\n # Progress report\n if (i % 1000) == 0:\n print \".\",\n sys.stdout.flush()\n\n fig, ax = plt.subplots()\n\n ax.plot(all_t, all_v)\n ax.plot(anomalies_t, anomalies_v, 'ro')\n\n plt.show()\n\n ans = pandas.DataFrame(rows)\n return ans",
"def cleanCsv(): \n\n count_neutral = 0\n count_sad = 0\n count_angry = 0\n count_happy = 0\n\n count_session_neutral = 0 \n\n for column_values in raw_data:\n\n if significant_data.fieldnames is None:\n dh = dict((h, h) for h in raw_data.fieldnames)\n significant_data.fieldnames = raw_data.fieldnames\n significant_data.writerow(dh)\n\n if column_values['AOI[Sad_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Left]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Neutral_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Right]Hit_0'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Left]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Happy_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_happy = count_happy + 1\n\n if column_values['AOI[Neutral_Right]Hit_1'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Sad_Left]Hit'] == '1':\n significant_data.writerow(column_values)\n count_sad = count_sad + 1\n\n if column_values['AOI[Neutral_Right]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n if column_values['AOI[Angry_Right]Hit'] == '1':\n significant_data.writerow(column_values)\n count_angry = count_angry + 1\n\n if column_values['AOI[Neutral_Left]Hit_2'] == '1':\n significant_data.writerow(column_values)\n count_neutral = count_neutral + 1\n\n return {\n 'count_neutral': count_neutral,\n 'count_sad': count_sad,\n 'count_angry': count_angry,\n 'count_happy': count_happy,\n }",
"def __flight_data_handler(self, event, sender, data):\n self.battery = data.battery_percentage\n self.fly_mode = data.fly_mode\n self.throw_fly_timer = data.throw_fly_timer\n self.throw_ongoing = data.throw_fly_timer > 0\n\n if self.prev_flight_data != str(data):\n print(data)\n self.prev_flight_data = str(data)\n self.flight_data = data\n\n if self.is_flying != data.em_sky:\n self.is_flying = data.em_sky\n log.debug(f\"FLYING : {self.is_flying}\")\n if not self.is_flying:\n self.reset()\n else:\n if self.tracking_after_takeoff:\n log.info(\"Tracking on after takeoff\")\n self.toggle_tracking(True)\n\n # if self.write_header_log:\n # self.write_header_log = False\n # self.log_file_log.write(f\"{data.format_cvs_header()}\\n\")\n # self.log_file_log.write(f\"{data.format_cvs(0)}\\n\")",
"def statistical_feature_extraction(window_size, signal, axis, device, subject_ID):\n\n start_running = timeit.default_timer()\n try:\n directory = f'data/row_data/{device}_{signal}/S{subject_ID}_{device}_{signal}.csv'\n sampling_rate = 20\n window_size = int(sampling_rate * window_size)\n # print(window_size)\n except:\n print('Error! Can not find such directory.')\n\n raw_signal = pd.read_csv(directory)\n win_count = 0\n total_win_count = 0\n features_for_all_windows_one_activity = []\n features_for_all_windows_all_activities = []\n column_title = f'{axis}_{device}_{signal}'\n for class_label in np.append(range(1, 14), range(15, 20)):\n activity_ID = chr(class_label + 64)\n raw_data_one_activity = np.array(raw_signal.loc[raw_signal['activity_ID'] == activity_ID, [column_title]])\n raw_data_one_activity = pd.DataFrame(raw_data_one_activity)\n\n for data_point in range(0, len(raw_data_one_activity), window_size):\n win_count += 1\n start = data_point\n end = start + window_size\n time_domain_window = raw_data_one_activity[start:end]\n\n time_mean = pd.Series(time_domain_window.mean()).rename(f'{axis}_{signal}_mean')\n time_min = pd.Series(time_domain_window.min()).rename(f'{axis}_{signal}_min')\n time_max = pd.Series(time_domain_window.max()).rename(f'{axis}_{signal}_max')\n time_std = pd.Series(time_domain_window.std()).rename(f'{axis}_{signal}_std')\n time_median = pd.Series(time_domain_window.median()).rename(f'{axis}_{signal}_median')\n time_variance = pd.Series(time_domain_window.var()).rename(f'{axis}_{signal}_variance')\n zero_crossing_rate = pd.Series(zero_crossing(time_domain_window)).rename(\n f'{axis}_{signal}_zero_crossing')\n mean_crossing = pd.Series(mean_crossing_rate(time_domain_window)).rename(\n f'{axis}_{signal}_mean_crossing')\n activity_id_ = pd.Series(activity_ID).rename('Activity_ID')\n\n features_for_one_window_one_activity = pd.concat(\n [time_mean, time_min, time_max, time_std, time_median, time_variance, zero_crossing_rate, mean_crossing,\n activity_id_], axis=1)\n features_for_all_windows_one_activity.append(features_for_one_window_one_activity)\n # print(features_for_all_windows)\n\n print('Window count', win_count)\n total_win_count += win_count\n win_count = 0\n features_for_all_windows_all_activities.append(features_for_all_windows_one_activity)\n features = pd.concat(features_for_all_windows_all_activities[0], ignore_index=False)\n print(features)\n save_as_directory = f'feature_label_tables/feature_{device}_{signal}/feature_S{subject_ID}_{axis}_{device}_{signal}.csv'\n features.to_csv(save_as_directory, encoding='utf-8', index=False)\n finish_running = timeit.default_timer()\n print('Total number of windows: ', total_win_count)\n print('Running time: ', finish_running - start_running)",
"def get_tap_events(user_id: str, user_session_id: str) -> DataFrame:\n full_df = pd.DataFrame()\n for tap_file in tap_file_names:\n columns = tap_file_important_columns[tap_file]\n data = read_file(user_id, user_session_id, tap_file)\n time_data = pd.DataFrame()\n time_data['Start'] = data[columns[0]]\n time_data['End'] = data[columns[-2]]\n time_data['Type'] = tap_file_to_feature_name[tap_file]\n full_df = pd.concat([full_df, time_data], ignore_index = True)\n return full_df.dropna().sort_values(by = 'Start').reset_index(drop = True)",
"def _alreadyProcessed(self, tiltseriesdata):\n\t\tseriesname = \"series%3d\" % (tiltseriesdata['number'])\n\t\tself._reloadDoneDict()\n\t\tif seriesname in self.donedict:\n\t\t\tif not self.stats['lastseries_skipped']:\n\t\t\t\tsys.stderr.write(\"skipping series\\n\")\n\t\t\telif self.stats['skipcount'] % 80 == 0:\n\t\t\t\tsys.stderr.write(\".\\n\")\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\".\")\n\t\t\tself.stats['lastseries_skipped'] = True\n\t\t\tself.stats['skipcount'] += 1\n\t\t\tself.stats['count'] += 1\n\t\t\treturn True\n\t\telse:\n\t\t\tself.stats['waittime'] = 0\n\t\t\tif self.stats['lastseries_skipped']:\n\t\t\t\tapDisplay.printMsg(\"\\nskipped\"+str(self.stats['skipcount'])+\" series so far\")\n\t\t\tself.stats['lastseries_skipped']=False\n\t\t\treturn False\n\t\treturn False",
"def trip_duration_stats(df):",
"def _array_hardware_status(self):\n data = self.fa.get_hardware_status()\n\n self.chassis_health = GaugeMetricFamily(\n 'purefa_hardware_chassis_health',\n 'FlashArray hardware chassis health status')\n self.controller_health = GaugeMetricFamily(\n 'purefa_hardware_controller_health',\n 'FlashArray hardware controller health status',\n labels=['controller'])\n self.component_health = GaugeMetricFamily(\n 'purefa_hardware_component_health',\n 'FlashArray hardware component health status',\n labels=['chassis', 'controller', 'component',\n 'index'])\n self.temperature = GaugeMetricFamily(\n 'purefa_hardware_temperature_celsius',\n 'FlashArray hardware temperature sensors',\n labels=['chassis', 'controller',\n 'sensor'])\n self.power = GaugeMetricFamily(\n 'purefa_hardware_power_volts',\n 'FlashArray hardware power supply voltage',\n labels=['chassis', 'power_supply'])\n\n re_chassis = re.compile(r\"^CH(\\d+)$\")\n re_controller = re.compile(r\"^CT(\\d+)$\")\n re_component = re.compile(r\"^(CH|CT)(\\d+)\\.([A-Z]+)([0-9]+)$\")\n\n for comp in data:\n if (comp['status'] == 'not_installed'):\n continue\n component_name = comp['name']\n component_state = 1 if (comp['status'] == 'ok') else 0\n\n # Chassis\n if re.match(r\"^CH\\d+$\", component_name):\n detail = re_chassis.match(component_name)\n c_index = detail.group(1)\n self.chassis_health.add_metric([c_index], component_state)\n continue\n # Controller\n elif re.match(r\"^CT\\d+$\", component_name):\n detail = re_controller.match(component_name)\n c_index = detail.group(1)\n self.controller_health.add_metric([c_index], component_state)\n continue\n # Components\n elif re.match(r\"^C(H|T)\\d+\\.[A-Z]+[0-9]+$\", component_name):\n detail = re_component.match(component_name)\n c_base = detail.group(1)\n c_base_index = detail.group(2)\n c_type = detail.group(3)\n c_index = detail.group(4)\n\n if c_base == 'CH':\n # Chassis-based\n labelset = [c_base_index, '', c_type, c_index]\n else:\n # Controller-based\n labelset = ['', c_base_index, c_type, c_index]\n\n # Component health status\n self.component_health.add_metric(\n labels=labelset, value=component_state)\n\n if c_type.lower() == 'tmp':\n # Additional metric for temperature\n if c_base == 'CH':\n self.temperature.add_metric(\n [c_base_index, '', c_index], float(comp['temperature']))\n else:\n self.temperature.add_metric(\n ['', c_base_index, c_index], float(comp['temperature']))\n elif c_type.lower() == 'pwr':\n # Additional metric for voltage level\n if comp['voltage'] is not None:\n self.power.add_metric([c_base_index, c_index],\n float(comp['voltage']))",
"def __insert_data_in_img(self):\n data_df = pd.read_csv(\n os.path.join(\n self.shap_logs_path,\n \"SHAP_summary_{}_{}_{}.csv\".format(\n self.classifier_name, \"PRESENT\", self.datetime\n ),\n ),\n index_col=0,\n )\n for feature_category in self.unique_feature_category_names:\n self.category_img_dict[feature_category][\"value\"] = int(\n data_df.loc[feature_category, :].sum()\n )\n\n for row_cnt, (feature_category_name, feature_data) in enumerate(\n self.category_img_dict.items()\n ):\n arrow_width = int(\n (self.baseline_scale_img.shape[1] / 100) * abs(feature_data[\"value\"])\n )\n if feature_data[\"value\"] > 0:\n arrow_end = (self.arrow_start[0] + arrow_width, self.arrow_start[1])\n arrow_middle = int(\n ((arrow_end[1] - self.arrow_start[1]) / 2) + self.arrow_start[1] - 7\n )\n for bracket_no, bracket in enumerate(self.ranges_lst):\n if abs(feature_data[\"value\"]) in bracket:\n color = (\n self.positive_arrow_colors[bracket_no][2],\n self.positive_arrow_colors[bracket_no][1],\n self.positive_arrow_colors[bracket_no][0],\n )\n cv2.arrowedLine(\n self.img, self.arrow_start, arrow_end, color, 5, tipLength=0.1\n )\n cv2.putText(\n self.img,\n \"+\" + str(abs(feature_data[\"value\"])) + \"%\",\n (arrow_end[0] - 7, arrow_middle - 15),\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n color,\n 2,\n )\n\n else:\n arrow_end = (self.arrow_start[0] - arrow_width, self.arrow_start[1])\n arrow_middle = int(\n ((self.arrow_start[1] - arrow_end[1]) / 2) + arrow_end[1] - 7\n )\n for bracket_no, bracket in enumerate(self.ranges_lst):\n if abs(feature_data[\"value\"]) in bracket:\n color = (\n self.negative_arrow_colors[bracket_no][2],\n self.negative_arrow_colors[bracket_no][1],\n self.negative_arrow_colors[bracket_no][0],\n )\n cv2.arrowedLine(\n self.img, self.arrow_start, arrow_end, color, 5, tipLength=0.1\n )\n cv2.putText(\n self.img,\n \"-\" + str(abs(feature_data[\"value\"])) + \"%\",\n (arrow_end[0] - 7, arrow_middle - 15),\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n color,\n 2,\n )\n\n if row_cnt != (len(list(self.category_img_dict.keys())) - 1):\n self.arrow_start = (\n arrow_end[0],\n self.side_scale_y_tick_cords[row_cnt + 1][0],\n )\n\n small_arrow_top_left = (\n int(arrow_end[1]) + 20,\n int(arrow_end[0] - self.small_arrow_img.shape[1] / 2),\n )\n small_arrow_bottom_right = (\n small_arrow_top_left[0] + self.small_arrow_img.shape[0],\n small_arrow_top_left[1] + self.small_arrow_img.shape[1],\n )\n self.img[\n small_arrow_top_left[0] : small_arrow_bottom_right[0],\n small_arrow_top_left[1] : small_arrow_bottom_right[1],\n ] = self.small_arrow_img\n color_bar_top_left = (\n arrow_end[1] + self.small_arrow_img.shape[0] + 25,\n self.baseline_scale_top_left[1],\n )\n color_bar_bottom_right = (\n color_bar_top_left[0] + self.color_bar_img.shape[0],\n color_bar_top_left[1] + self.color_bar_img.shape[1],\n )\n self.img[\n color_bar_top_left[0] : color_bar_bottom_right[0],\n color_bar_top_left[1] : color_bar_bottom_right[1],\n ] = self.color_bar_img\n\n color_bar_middle = (\n (int(580 + self.baseline_scale_img.shape[1] / 2)),\n color_bar_bottom_right[0] + 50,\n )\n cv2.putText(\n self.img,\n \"CLASSIFICATION PROBABILITY\",\n color_bar_middle,\n cv2.FONT_HERSHEY_COMPLEX,\n 1,\n (0, 0, 0),\n 2,\n )\n cv2.imwrite(self.img_save_path, self.img)\n self.visualization_timer.stop_timer()\n stdout_success(\n msg=f\"SHAP summary graph saved at {self.img_save_path}\",\n elapsed_time=self.visualization_timer.elapsed_time_str,\n )",
"def place_types_process_rows(self):\n\n for index in range(len(self.table)):\n row_rdf = self.place_types_map_row_to_rdf(self.table.iloc[index])\n if row_rdf is not None:\n self.data += row_rdf",
"def _on_new_batch(self, data):\n data[self.pid_cols] = self.pid.digitize(data[self.pid_cols])\n #set counts back to 0\n for label in self.labels:\n self.lab_counts[label] = 0 \n for col in self.cat_cols:\n for label in self.labels:\n for val in self.categories[col]:\n self.cat_counts[col][label][val] = 0\n \n #add each row to the counts\n for index, row in data.iterrows():\n label = row[self.target_col_name]\n self.lab_counts[label] += 1\n \n for col in self.cat_cols:\n #skip nans\n if self.isnan(row[col]):\n continue\n val = row[col]\n self.cat_counts[col][label][val] += 1\n \n self._calculate_probs_and_entropies()"
] | [
"0.626239",
"0.5580731",
"0.5003521",
"0.49078733",
"0.48890755",
"0.48783615",
"0.48759088",
"0.4853689",
"0.4851357",
"0.48206225",
"0.47803137",
"0.47471297",
"0.47058022",
"0.4700807",
"0.4691376",
"0.46603593",
"0.4647413",
"0.4630025",
"0.45931166",
"0.45842057",
"0.45676926",
"0.4566815",
"0.4565005",
"0.45553598",
"0.4522837",
"0.4520091",
"0.4517827",
"0.45082578",
"0.44873866",
"0.44656163"
] | 0.6911792 | 0 |
Locates each individual tap event and puts special values in the column to indicate the start and end | def mark_tap_start_and_end(data: DataFrame, delta_in_ms: int):
lead_file = 'Accelerometer.csv'
time_col = x_columns[lead_file]
delta = delta_in_ms * 1000000
for tap_file in tap_file_names:
tap_feature = tap_file_to_feature_name[tap_file]
# Step 1: Put a 2 at the start and a 3 at the end of each event
indices = data[data[tap_feature] == 1].index
if len(indices) == 0:
continue
for i in range(len(indices)):
if i == 0 or data[time_col][ indices[i] ] - data[time_col][ indices[i - 1] ] > delta:
data[tap_feature].loc[ indices[i] ] = 2
if i > 0:
if data[tap_feature][ indices[i - 1] ] == 1:
data[tap_feature].loc[ indices[i - 1] ] = 3
elif indices[i - 1] + 1 < data.shape[0] and data[tap_feature][ indices[i - 1] + 1 ] == 0:
# In this case, the tap lasted only one time step,
# so we call the end of the last tap the reading after
data[tap_feature].loc[ indices[i - 1] + 1 ] = 3
else:
#Hopefully this case will never occur, where two consecutive taps
#are more than delta apart but with no readings in between
print("Something seems off about this data...")
print(data[ indices[i] - 5 : indices[i] + 5][[time_col, tap_feature]])
return
if i == len(indices) - 1:
# If we're at the end of the list, that must be the end of the last tap
if data[tap_feature][ indices[i] ] == 1:
data[tap_feature].loc[ indices[i] ] = 3
elif indices[i] + 1 < data.shape[0]:
data[tap_feature].loc[ indices[i] + 1] = 3
else:
data[tap_feature].loc[ indices[i] ] = 0 # Remove the miscreant
print("There's an issue with a tap at the very last point of the data...")
if sum(data[data[tap_feature] == 2][tap_feature]) * 3 != sum(data[data[tap_feature] == 3][tap_feature]) * 2:
print("Uh oh, we placed an unbalanced number of 2's and 3's. Thanos would be disappointed.")
# Step 2: Put a 4 at the start of the "before" window
# and a 5 at the end of the "after" window
start_indices = data[data[tap_feature] == 2].index
end_indices = data[data[tap_feature] == 3].index
if len(start_indices) != len(end_indices):
print("Impossible.")
#We should be able to get a half_delta on either side of
#each window
half_delta = delta // 2
for i in range(len(start_indices)):
find_index_before = start_indices[i]
range_min = data[time_col][ start_indices[i] ] - half_delta
while find_index_before > 0 and data[time_col][find_index_before] > range_min \
and data[tap_feature][find_index_before - 1] < 2:
find_index_before -= 1
if data[tap_feature][find_index_before] == 0:
data[tap_feature].loc[find_index_before] = 4
elif data[tap_feature][find_index_before] == 5 and data[tap_feature][find_index_before + 1] == 0:
# Keep our windows from overlapping - don't put the start of one on
# top of the end of the previous
data[tap_feature].loc[find_index_before + 1] = 4
elif find_index_after == 0 and data[tap_feature][find_index_after + 1] == 0:
# If we're at the start of the interval, shift what was there forward one
data[tap_feature].loc[find_index_after + 1] = data[tap_feature].loc[find_index_after]
data[tap_feature].loc[find_index_after] = 4
elif find_index_before == start_indices[i] and data[tap_feature][find_index_before - 1] == 5 \
and find_index_before >= 2 and data[tap_feature][find_index_before - 2] < 2:
data[tap_feature].loc[find_index_before - 2] = 5
data[tap_feature].loc[find_index_before - 1] = 4
else:
# The most likely case is that we hit the beginning or end of the
# interval, in which case we should probably just throw the point out
print("Oh no, that's pretty weird: ", data[tap_feature][find_index_before], find_index_before, start_indices[i])
find_index_after = end_indices[i]
range_max = data[time_col][ end_indices[i] ] + half_delta
while find_index_after + 1 < data.shape[0] and data[time_col][find_index_after] < range_max \
and data[tap_feature][find_index_after + 1] < 2:
find_index_after += 1
if data[tap_feature][find_index_after] == 0:
data[tap_feature].loc[find_index_after] = 5
elif find_index_after == data.shape[0] - 1 and data[tap_feature][find_index_after - 1] == 0:
# If we're at the end of the interval, shift what was there back one
data[tap_feature].loc[find_index_after - 1] = data[tap_feature].loc[find_index_after]
data[tap_feature].loc[find_index_after] = 5
elif find_index_after == end_indices[i] and data[tap_feature][find_index_after + 1] < 2:
data[tap_feature].loc[find_index_before + 1] = 5
else:
# See above comment
print("Oh no, that's REALLY weird", find_index_after, data[tap_feature]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_columns_for_taps(full_data: DataFrame, tap_data: DataFrame):\n for tap_file in tap_file_names:\n tap_type = tap_file_to_feature_name[tap_file]\n data = tap_data[tap_data['Type'] == tap_type].reset_index(drop = True)\n\n lead_file = 'Accelerometer.csv'\n time_column_name = x_columns[lead_file]\n data_times = full_data[time_column_name]\n data_index = 0\n\n new_column = []\n\n for tap_index in range(data.shape[0]):\n try:\n while data_times[data_index] < (data['Start'][tap_index] * 1000000):\n new_column.append(0) # Not in the midst of a tap\n data_index += 1\n if data_index >= full_data.shape[0]: break\n if data_index >= full_data.shape[0]: break\n new_column.append(1) # At least one value in the midst of the tap\n data_index += 1\n if data_index >= full_data.shape[0]: break\n while data_times[data_index] < (data['End'][tap_index] * 1000000):\n new_column.append(1)\n data_index += 1\n if data_index >= full_data.shape[0]: break\n if data_index >= full_data.shape[0]: break\n except KeyError:\n print(\"Okay, here's that thing again\")\n return\n\n \n while data_index < full_data.shape[0]:\n new_column.append(0)\n data_index += 1\n\n full_data[tap_type] = new_column",
"def get_tap_events(user_id: str, user_session_id: str) -> DataFrame:\n full_df = pd.DataFrame()\n for tap_file in tap_file_names:\n columns = tap_file_important_columns[tap_file]\n data = read_file(user_id, user_session_id, tap_file)\n time_data = pd.DataFrame()\n time_data['Start'] = data[columns[0]]\n time_data['End'] = data[columns[-2]]\n time_data['Type'] = tap_file_to_feature_name[tap_file]\n full_df = pd.concat([full_df, time_data], ignore_index = True)\n return full_df.dropna().sort_values(by = 'Start').reset_index(drop = True)",
"def tap():\n return \"I have clicked on the elements\"",
"def compute_tap_intervals(xtaps, t, threshold=20):\n import numpy as np\n\n if isinstance(xtaps, list):\n xtaps = np.asarray(xtaps)\n if isinstance(t, list):\n t = np.asarray(t)\n\n # Set time points:\n tap_times = t - t[0]\n\n # Calculate x offset:\n xtaps_offset = xtaps - np.mean(xtaps)\n\n # Find left/right finger \"press\" events:\n dx = xtaps_offset[1:] - xtaps_offset[:-1]\n ipress = np.where(np.abs(dx) > threshold)\n\n # Filter data:\n #xtaps = xtaps[ipress]\n tap_times = tap_times[ipress]\n\n # Find press event intervals:\n tap_intervals = tap_times[1:] - tap_times[:-1]\n\n return ipress, tap_intervals",
"def plot_tap(file: str, before: DataFrame, during: DataFrame, after: DataFrame, time_col: str):\n\n print(\"Making plots at time \" + str(before[time_col].iloc[0]))\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n ax = before.plot(time_col, y, kind = 'scatter', color = 'blue', label = 'Before Tap')\n after.plot(time_col, y, kind = 'scatter', color = 'red', label = 'After Tap', ax = ax)\n during.plot(time_col, y, kind = 'scatter', color = 'black', label = 'During Tap', ax = ax)\n plt.axes(ax)\n plt.xlabel('Event Time')\n plt.ylabel(y)\n\n min_x = before[time_col].iloc[0] - (before[time_col].iloc[1] - before[time_col].iloc[0]) * 50\n min_y = min([min(during[y]), min(before[y]), min(after[y])])\n # Mark the mean during tap event (Feature 1)\n mean_during = mean(during[y])\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n plt.hlines(y = mean_during, xmin = min_x, xmax = during[time_col].iloc[-1], linestyle='dashed', \\\n color='black')\n plt.annotate(xy = (min_x, mean_during), s = 'avgDuringTap')\n # Mark the mean before\n plt.hlines(y = mean_before, xmin = min_x, xmax = before[time_col].iloc[-1], linestyle='dashed', \\\n color='blue')\n plt.annotate(xy = (min_x, mean_before), s = 'avg100msBefore')\n # Mark the mean after\n plt.hlines(y = mean_after, xmin = min_x, xmax = after[time_col].iloc[-1], linestyle='dashed', \\\n color='red')\n plt.annotate(xy = (min_x, mean_after), s = 'avg100msAfter')\n\n plt.legend()\n\n plt.savefig(file+'_'+y+'_time_'+str(before[time_col].iloc[0]) + '.png')\n\n plt.close()",
"def _(event):\n # The incoming data looks like u'\\x1b[35;1R'\n # Parse row/col information.\n row, col = map(int, event.data[2:-1].split(';'))\n\n # Report absolute cursor position to the renderer.\n cli_ref().renderer.report_absolute_cursor_row(row)",
"def _locate_events(self, start_time, end_time):\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n # Adjust pre- and post-pad to take into account cosine taper\n t_length = self.pre_pad + 4*self.marginal_window + self.post_pad\n self.pre_pad += np.ceil(t_length * 0.06)\n self.post_pad += np.ceil(t_length * 0.06)\n\n trig_events = self.output.read_triggered_events(start_time, end_time)\n n_evts = len(trig_events)\n\n for i, trig_event in trig_events.iterrows():\n event_uid = trig_event[\"EventID\"]\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tEVENT - {} of {} - {}\\n\"\n msg += \"=\" * 120 + \"\\n\\n\"\n msg += \"\\tDetermining event location...\\n\"\n msg = msg.format(i + 1, n_evts, event_uid)\n self.output.log(msg, self.log)\n\n w_beg = trig_event[\"CoaTime\"] - 2*self.marginal_window \\\n - self.pre_pad\n w_end = trig_event[\"CoaTime\"] + 2*self.marginal_window \\\n + self.post_pad\n\n timer = util.Stopwatch()\n self.output.log(\"\\tReading waveform data...\", self.log)\n try:\n self._read_event_waveform_data(trig_event, w_beg, w_end)\n except util.ArchiveEmptyException:\n msg = \"\\tNo files found in archive for this time period\"\n self.output.log(msg, self.log)\n continue\n except util.DataGapException:\n msg = \"\\tAll available data for this time period contains gaps\"\n msg += \"\\n\\tOR data not available at start/end of time period\\n\"\n self.output.log(msg, self.log)\n continue\n self.output.log(timer(), self.log)\n\n timer = util.Stopwatch()\n self.output.log(\"\\tComputing 4D coalescence grid...\", self.log)\n\n daten, max_coa, max_coa_norm, loc, map_4d = self._compute(\n w_beg, w_end,\n self.data.signal,\n self.data.availability)\n coord = self.lut.xyz2coord(np.array(loc).astype(int))\n event_coa_data = pd.DataFrame(np.array((daten, max_coa,\n coord[:, 0],\n coord[:, 1],\n coord[:, 2])).transpose(),\n columns=[\"DT\", \"COA\", \"X\", \"Y\", \"Z\"])\n event_coa_data[\"DT\"] = event_coa_data[\"DT\"].apply(UTCDateTime)\n event_coa_data_dtmax = \\\n event_coa_data[\"DT\"].iloc[event_coa_data[\"COA\"].astype(\"float\").idxmax()]\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n\n if (event_coa_data_dtmax >= trig_event[\"CoaTime\"]\n - self.marginal_window) \\\n and (event_coa_data_dtmax <= trig_event[\"CoaTime\"]\n + self.marginal_window):\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n else:\n msg = \"\\n\\tEvent {} is outside marginal window.\\n\"\n msg += \"\\tDefine more realistic error - the marginal window\"\n msg += \" should be an estimate of the origin time uncertainty,\"\n msg += \"\\n\\tdetermined by the expected spatial uncertainty and\"\n msg += \"the seismic velocity in the region of the earthquake\\n\"\n msg += \"\\n\" + \"=\" * 120 + \"\\n\"\n msg = msg.format(event_uid)\n self.output.log(msg, self.log)\n continue\n\n event_mw_data = event_coa_data\n event_mw_data = event_mw_data[(event_mw_data[\"DT\"] >= w_beg_mw) &\n (event_mw_data[\"DT\"] <= w_end_mw)]\n map_4d = map_4d[:, :, :,\n event_mw_data.index[0]:event_mw_data.index[-1]]\n event_mw_data = event_mw_data.reset_index(drop=True)\n event_max_coa = event_mw_data.iloc[event_mw_data[\"COA\"].astype(\"float\").idxmax()]\n\n # Update event UID; make out_str\n event_uid = str(event_max_coa.values[0])\n for char_ in [\"-\", \":\", \".\", \" \", \"Z\", \"T\"]:\n event_uid = event_uid.replace(char_, \"\")\n out_str = \"{}_{}\".format(self.output.name, event_uid)\n self.output.log(timer(), self.log)\n\n # Make phase picks\n timer = util.Stopwatch()\n self.output.log(\"\\tMaking phase picks...\", self.log)\n phase_picks = self._phase_picker(event_max_coa)\n self.output.write_picks(phase_picks[\"Pick\"], event_uid)\n self.output.log(timer(), self.log)\n\n # Determining earthquake location error\n timer = util.Stopwatch()\n self.output.log(\"\\tDetermining earthquake location and uncertainty...\", self.log)\n loc_spline, loc_gau, loc_gau_err, loc_cov, \\\n loc_cov_err = self._calculate_location(map_4d)\n self.output.log(timer(), self.log)\n\n # Make event dictionary with all final event location data\n event = pd.DataFrame([[event_max_coa.values[0],\n event_max_coa.values[1],\n loc_spline[0], loc_spline[1], loc_spline[2],\n loc_gau[0], loc_gau[1], loc_gau[2],\n loc_gau_err[0], loc_gau_err[1],\n loc_gau_err[2],\n loc_cov[0], loc_cov[1], loc_cov[2],\n loc_cov_err[0], loc_cov_err[1],\n loc_cov_err[2]]],\n columns=self.EVENT_FILE_COLS)\n\n self.output.write_event(event, event_uid)\n\n self._optional_locate_outputs(event_mw_data, event, out_str,\n phase_picks, event_uid, map_4d)\n\n self.output.log(\"=\" * 120 + \"\\n\", self.log)\n\n del map_4d, event_coa_data, event_mw_data, event_max_coa, \\\n phase_picks\n self.coa_map = None",
"def event_starting_point_extractor(row) -> int:\n to_return = None\n # First, define the variables that we will need for the rest of this\n # function.\n positions_list = literal_eval(row[\"positions\"])\n assert isinstance(positions_list, list)\n assert 1 <= len(positions_list) <= 2\n\n # Next, extract the starting and ending positions.\n raw_starting_x = positions_list[0].get(\"x\")\n raw_starting_y = positions_list[0].get(\"y\")\n\n starting_x = (raw_starting_x/100)*104\n starting_y = (raw_starting_y/100)*68\n\n # Finally, validate and return the result.\n to_return = [starting_x, starting_y]\n\n return to_return",
"def get_events_current_row(self):\n\n global ROW\n\n if self.pj[OBSERVATIONS][self.observationId][EVENTS]:\n ct = self.getLaps()\n if ct >= self.pj[OBSERVATIONS][self.observationId][EVENTS][-1][0]:\n ROW = len(self.pj[OBSERVATIONS][self.observationId][EVENTS])\n else:\n cr_list = [idx for idx, x in enumerate(self.pj[OBSERVATIONS][self.observationId][EVENTS][: -1])\n if x[0] <= ct and self.pj[OBSERVATIONS][self.observationId][EVENTS][idx + 1][0] > ct]\n\n if cr_list:\n ROW = cr_list[0]\n if not self.trackingCursorAboveEvent:\n ROW += 1\n else:\n ROW = -1\n\n self.twEvents.setItemDelegate(StyledItemDelegateTriangle(self.twEvents))\n self.twEvents.scrollToItem(self.twEvents.item(ROW, 0))",
"def test_convert_own_goal_touches(self) -> None:\n # An own goal from the game between Leicester and Stoke on 24 Feb 2018.\n # Stoke's goalkeeper Jack Butland allows a low cross to bounce off his\n # gloves and into the net:\n event = pd.DataFrame(\n [\n {\n 'type_id': 8,\n 'subtype_name': 'Cross',\n 'tags': [{'id': 402}, {'id': 801}, {'id': 1802}],\n 'player_id': 8013,\n 'positions': [{'y': 89, 'x': 97}, {'y': 0, 'x': 0}],\n 'game_id': 2499994,\n 'type_name': 'Pass',\n 'team_id': 1631,\n 'period_id': 2,\n 'milliseconds': 1496.7290489999993,\n 'subtype_id': 80,\n 'event_id': 230320305,\n },\n {\n 'type_id': 7,\n 'subtype_name': 'Touch',\n 'tags': [{'id': 102}],\n 'player_id': 8094,\n 'positions': [{'y': 50, 'x': 1}, {'y': 100, 'x': 100}],\n 'game_id': 2499994,\n 'type_name': 'Others on the ball',\n 'team_id': 1639,\n 'period_id': 2,\n 'milliseconds': 1497.6330749999993,\n 'subtype_id': 72,\n 'event_id': 230320132,\n },\n {\n 'type_id': 9,\n 'subtype_name': 'Reflexes',\n 'tags': [{'id': 101}, {'id': 1802}],\n 'player_id': 8094,\n 'positions': [{'y': 100, 'x': 100}, {'y': 50, 'x': 1}],\n 'game_id': 2499994,\n 'type_name': 'Save attempt',\n 'team_id': 1639,\n 'period_id': 2,\n 'milliseconds': 1499.980547,\n 'subtype_id': 90,\n 'event_id': 230320135,\n },\n ]\n )\n actions = wy.convert_to_actions(event, 1639)\n # FIXME: It adds a dribble between the bad touch of the goalkeeper and\n # his attempt to save the ball before crossing the line. Not sure\n # whether that is ideal.\n assert len(actions) == 4\n assert actions.at[1, 'type_id'] == spadl.actiontypes.index('bad_touch')\n assert actions.at[1, 'result_id'] == spadl.results.index('owngoal')",
"def timingColumns(self, results):\n \n pass",
"def update_events_start_stop(self):\n\n # stateEventsList = [self.pj[ETHOGRAM][x][BEHAVIOR_CODE] for x in self.pj[ETHOGRAM] if\n # STATE in self.pj[ETHOGRAM][x][TYPE].upper()]\n\n for row in range(0, self.twEvents.rowCount()):\n\n t = self.twEvents.item(row, tw_obs_fields[\"Tempo\"]).text()\n\n if \":\" in t:\n time = time2seconds(t)\n else:\n time = Decimal(t)\n\n subject = self.twEvents.item(row, tw_obs_fields[\"Sujeito\"]).text()\n key = self.twEvents.item(row, tw_obs_fields[\"Chave\"]).text()\n modifier = self.twEvents.item(row, tw_obs_fields[\"Modificador\"]).text()\n\n # check if code is state\n nbEvents = len(\n [event[EVENT_BEHAVIOR_FIELD_IDX] for event in self.pj[OBSERVATIONS][self.observationId][EVENTS]\n if event[EVENT_BEHAVIOR_FIELD_IDX] == key\n and event[EVENT_TIME_FIELD_IDX] < time\n and event[EVENT_SUBJECT_FIELD_IDX] == subject\n and event[EVENT_MODIFIER_FIELD_IDX] == modifier])\n\n # if nbEvents and (nbEvents % 2): # test >0 and odd\n # self.twEvents.item(row, tw_obs_fields[TYPE]).setText(STOP)\n # else:\n # self.twEvents.item(row, tw_obs_fields[TYPE]).setText(START)",
"def visit_event(self, event):",
"def test_060_tablet(self):\n self.allow_service('qubes.InputTablet')\n # try:\n # root_info = subprocess.check_output(['xwininfo', '-root']).decode()\n # for line in root_info.splitlines():\n # if 'Width:' in line:\n # _, _, width = line.partition('Width: ')\n # elif 'Height:' in line:\n # _, _, height = line.partition('Height: ')\n # tablet_events[1] = 'ABS_X + (0, {}, 0, 0)'.format(width)\n # tablet_events[2] = 'ABS_Y + (0, {}, 0, 0)'.format(height)\n # tablet_events[3] = 'ABS_MT_TOOL_X + (0, {}, 0, 0)'.format(width)\n # tablet_events[4] = 'ABS_MT_TOOL_Y + (0, {}, 0, 0)'.format(height)\n # except subprocess.CalledProcessError:\n # pass\n\n self.setUpDevice(tablet_events)\n self.find_device_and_start_listener()\n\n self.emit_event('ABS_X', 15000)\n self.emit_event('ABS_Y', 15000)\n self.emit_event('BTN_TOUCH', 1)\n self.emit_event('ABS_X', 16000)\n self.emit_event('ABS_Y', 16000)\n self.emit_event('BTN_TOUCH', 0)\n # should be ignored\n self.emit_event('REL_Y', 1)\n self.emit_event('REL_Y', 1)\n\n self.assertEvent(['RawTouchBegin', ANY,\n {'0': '15000.00', '1': '15000.00'}])\n self.assertEvent(['RawTouchUpdate', ANY,\n {'0': '16000.00', '1': '15000.00'}])\n self.assertEvent(['RawTouchUpdate', ANY,\n {'0': '16000.00', '1': '16000.00'}])\n # FIXME: really (0, 0)?\n self.assertEvent(['RawTouchEnd', ANY, {'0': '0.00', '1': '0.00'}])\n self.assertNoEvent(msg=\"rel events should be ignored\")",
"def on_event(self, td, name):\n\n raise NotImplementedError()",
"def on_trace_click(click_data,date,freq):\n p = click_data['points'][0]\n # here, use 'customdata' property of clicked point, \n # could also use 'curveNumber', 'pointIndex', etc.\n key=pd.to_datetime(0)\n if 'x' in p:\n key = pd.to_datetime(p['x'])\n df_f = get_corresponding_rows(df, key,date,freq)\n return df_f.to_dict('records')",
"def on_trace_click(click_data,date,freq):\n p = click_data['points'][0]\n # here, use 'customdata' property of clicked point, \n # could also use 'curveNumber', 'pointIndex', etc.\n key=pd.to_datetime(0)\n if 'x' in p:\n key = pd.to_datetime(p['x'])\n df_f = get_corresponding_rows(df, key,date,freq)\n return df_f.to_dict('records')",
"def xy(event):\n return map(int, event.get_coords())",
"def clickCell(self, event):\n position = self.input.checkMouseInput(event)\n if not position:\n return None\n x = math.floor(position[0] / self.imageWidth)\n y = math.floor(position[1] / self.imageHeight)\n return (int(x), int(y))",
"def processJumpTable(jt_ea):",
"def pressAdjTiles(self, event):\n clickedTile = event.widget\n if clickedTile.isInPlay(): self.changeSmile(2)\n for adjTile in self.getAdjacentTiles(clickedTile.row, clickedTile.col):\n if not adjTile.isFlagged(): adjTile.buttonPress()",
"def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n\n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n # data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n # data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n #data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table['ALT'].mean())\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data",
"def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n \n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n #data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n #data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data",
"def tap(self, locator, x_offset=None, y_offset=None, count=1):\r\n driver = self._current_application()\r\n el = self._element_find(locator, True, True)\r\n action = TouchAction(driver)\r\n action.tap(el, x_offset, y_offset, count).perform()",
"def compute_tap_features(xtaps, ytaps, t, threshold=20):\n import numpy as np\n\n from mhealthx.extractors.tapping import compute_drift, \\\n compute_tap_intervals, compute_intertap_gap\n from mhealthx.extractors.tapping import TapFeatures as T\n from mhealthx.signals import signal_features\n\n if isinstance(xtaps, list):\n xtaps = np.array(xtaps)\n if isinstance(ytaps, list):\n ytaps = np.array(ytaps)\n if isinstance(t, list):\n t = np.array(t)\n\n # Intertap intervals:\n ipress, intervals = compute_tap_intervals(xtaps, t, threshold)\n\n # Filter data:\n t = t[ipress]\n xtaps = xtaps[ipress]\n ytaps = ytaps[ipress]\n\n # Delta between fastest and slowest intertap intervals:\n T.intertap_gap10, T.intertap_gap25, \\\n T.intertap_gap50 = compute_intertap_gap(intervals)\n\n # Left and right taps and drift:\n mean_x = np.mean(xtaps)\n iL = np.where(xtaps < mean_x)\n iR = np.where(xtaps >= mean_x)\n xL = xtaps[iL]\n yL = ytaps[iL]\n xR = xtaps[iR]\n yR = ytaps[iR]\n driftL = compute_drift(xL, yL)\n driftR = compute_drift(xR, yR)\n\n # Number of taps:\n T.num_taps = xtaps.size\n T.num_taps_left = xL.size\n T.num_taps_right = xR.size\n\n # Time:\n T.time_rng = t[-1] - t[0]\n\n # Intertap interval statistics:\n T.intertap_num, T.intertap_min, T.intertap_max, T.intertap_rng, \\\n T.intertap_avg, T.intertap_std, T.intertap_med, T.intertap_mad, \\\n T.intertap_kurt, T.intertap_skew, T.intertap_cvar, T.intertap_lower25, \\\n T.intertap_upper25, T.intertap_inter50, T.intertap_rms, \\\n T.intertap_entropy, T.intertap_tk_energy = signal_features(intervals)\n\n # Tap statistics:\n T.xL_num, T.xL_min, T.xL_max, T.xL_rng, T.xL_avg, T.xL_std, \\\n T.xL_med, T.xL_mad, T.xL_kurt, T.xL_skew, T.xL_cvar, \\\n T.xL_lower25, T.xL_upper25, T.xL_inter50, T.xL_rms, \\\n T.xL_entropy, T.xL_tk_energy = signal_features(xL)\n\n T.xR_num, T.xR_min, T.xR_max, T.xR_rng, T.xR_avg, T.xR_std, \\\n T.xR_med, T.xR_mad, T.xR_kurt, T.xR_skew, T.xR_cvar, \\\n T.xR_lower25, T.xR_upper25, T.xR_inter50, T.xR_rms, \\\n T.xR_entropy, T.xR_tk_energy = signal_features(xR)\n\n # T.yL_num, T.yL_min, T.yL_max, T.yL_rng, T.yL_avg, T.yL_std, \\\n # T.yL_med, T.yL_mad, T.yL_kurt, T.yL_skew, T.yL_cvar, \\\n # T.yL_lower25, T.yL_upper25, T.yL_inter50, T.yL_rms, \\\n # T.yL_entropy, T.yL_tk_energy = signal_features(yL)\n\n # T.yR_num, T.yR_min, T.yR_max, T.yR_rng, T.yR_avg, T.yR_std, \\\n # T.yR_med, T.yR_mad, T.yR_kurt, T.yR_skew, T.yR_cvar, \\\n # T.yR_lower25, T.yR_upper25, T.yR_inter50, T.yR_rms, \\\n # T.yR_entropy, T.yR_tk_energy = signal_features(yR)\n\n # Drift statistics:\n T.driftL_num, T.driftL_min, T.driftL_max, T.driftL_rng, T.driftL_avg, \\\n T.driftL_std, T.driftL_med, T.driftL_mad, T.driftL_kurt, T.driftL_skew, \\\n T.driftL_cvar, T.driftL_lower25, T.driftL_upper25, T.driftL_inter50, \\\n T.driftL_rms, T.driftL_entropy, T.driftL_tk_energy = \\\n signal_features(driftL)\n\n T.driftR_num, T.driftR_min, T.driftR_max, T.driftR_rng, T.driftR_avg, \\\n T.driftR_std, T.driftR_med, T.driftR_mad, T.driftR_kurt, T.driftR_skew, \\\n T.driftR_cvar, T.driftR_lower25, T.driftR_upper25, T.driftR_inter50, \\\n T.driftR_rms, T.driftR_entropy, T.driftR_tk_energy = \\\n signal_features(driftR)\n\n return T",
"def create_annotation(raw):\n annotation_pandas = pd.DataFrame(columns=[\"onset\", \"duration\", \"description\"])\n for idx, event in enumerate(raw.annotations):\n annotation_pandas.loc[idx] = [\n event[\"onset\"],\n event[\"duration\"],\n event[\"description\"],\n ]\n return annotation_pandas",
"def on_eventBox_motion_notify_event(self, widget, data=None):\n\n if self.enabled == True:\n found = False\n for m in self.map:\n x1, y1, x2, y2, xpage, xpart = m\n if x1 <= data.x <= x2 and y1 <= data.y <= y2:\n found = True\n break\n if found == True:\n widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND1))\n else:\n widget.window.set_cursor(None)",
"def event_ending_point_extractor(row) -> int:\n to_return = None\n # First, define the variables that we will need for the rest of this\n # function.\n positions_list = literal_eval(row[\"positions\"])\n assert isinstance(positions_list, list)\n assert 1 <= len(positions_list) <= 2\n\n # Next, extract the starting and ending positions.\n starting_x = positions_list[0].get(\"x\")\n starting_y = positions_list[0].get(\"y\")\n\n try:\n ending_x = positions_list[1].get(\"x\")\n raw_ending_y = positions_list[1].get(\"y\")\n except IndexError:\n # If the event is one where there is no ending point to list (i.e.,\n # a foul).\n ending_x, raw_ending_y = starting_x, starting_y\n\n ending_y = (raw_ending_y/100)*69\n\n # Finally, validate and return the result.\n to_return = [ending_x, ending_y]\n\n return to_return",
"def _pressed(self, evt):\n x, y, widget = evt.x, evt.y, evt.widget\n item = widget.identify_row(y)\n column = widget.identify_column(x)\n\n if not column or not item in self._items:\n # clicked in the weekdays row or just outside the columns\n return\n\n item_values = widget.item(item)['values']\n if not len(item_values): # row is empty for this month\n return\n\n text = item_values[int(column[1]) - 1]\n if not text: # date is empty\n return\n\n bbox = widget.bbox(item, column)\n if not bbox: # calendar not visible yet\n return\n\n # update and then show selection\n text = '%02d' % text\n self._selection = (text, item, column)\n self._show_selection(text, bbox)",
"def on_double_tap_up(self, event_name: str, data: dict, kwargs: dict) -> None:\n pass"
] | [
"0.5943405",
"0.57823676",
"0.5738286",
"0.534211",
"0.52757245",
"0.5235189",
"0.52053845",
"0.5152484",
"0.5096135",
"0.5053855",
"0.5003967",
"0.49919787",
"0.49579182",
"0.49561313",
"0.49047977",
"0.49045599",
"0.49045599",
"0.4898684",
"0.4891326",
"0.48886275",
"0.48792967",
"0.48561555",
"0.48464343",
"0.48195255",
"0.47994003",
"0.47851577",
"0.47769722",
"0.47743085",
"0.47601223",
"0.47472116"
] | 0.6248036 | 0 |
Take the natural log of the specified column | def log_column(data: DataFrame, column: str):
return data[column].map(lambda x: np.log(np.absolute(x))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log_prob_from_logits(x):\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))",
"def ln(x):\n return log(x, const.e)",
"def log(df, cols, base=2, invert=None):\r\n if base == 2:\r\n for c in cols:\r\n df[f\"log2_{c}\"] = np.log2(df[c])\r\n elif base==10:\r\n for c in cols:\r\n df[f\"log10_{c}\"] = np.log10(df[c])\r\n else:\r\n print(\"This base is not implemented!\")\r\n if invert is not None:\r\n lcols = df.filter(regex=\"^log\").columns\r\n df[lcols] = df[lcols] * invert\r\n return df",
"def log_cust(x):\n if type(x) != str:\n if x < 0:\n return 0\n elif x == 0:\n return 0\n elif x > 0:\n return np.log(x)",
"def make_ln_func(variable):\n def safe_ln_queryset(qs):\n \"\"\"Takes the natural log of a queryset's values and handles zeros\"\"\"\n vals = qs.values_list(variable, flat=True)\n ret = np.log(vals)\n ret[ret == -np.inf] = 0\n return ret\n return safe_ln_queryset",
"def safelog(x):\n #return np.log(x)\n return np.log(np.clip(x,floor,np.inf))",
"def _convert_normlogprice(self, series):\n try:\n return np.log(series.div(series[0]))\n except:\n raise TypeError('ERROR: Could not transform prices to log function. Check price history data.')",
"def my_log(num):\n\n if num == 0.0:\n return -9999999999\n return math.log(num)",
"def log_prior(x):\n logp = (-0.5 * x.pow(2) - torch.tensor(2 * math.pi).sqrt().log()).sum(dim=1)\n return logp",
"def log_prob_from_logits(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))",
"def calc_entropy(column):\n # Compute the counts of each unique value in the column\n counts = numpy.bincount(column)\n # Divide by the total column length to get a probability\n probabilities = counts / len(column)\n \n # Initialize the entropy to 0\n entropy = 0\n # Loop through the probabilities, and add each one to the total entropy\n for prob in probabilities:\n if prob > 0:\n entropy += prob * math.log(prob, 2)\n \n return -entropy",
"def get_log(p):\n if p==0:\n return 0.\n return p*np.log2(p)",
"def calc_entropy(column):\r\n # Compute the counts of each unique value in the column\r\n counts = numpy.bincount(column)\r\n # Divide by the total column length to get a probability\r\n probabilities = counts / len(column)\r\n \r\n # Initialize the entropy to 0\r\n entropy = 0\r\n # Loop through the probabilities, and add each one to the total entropy\r\n for prob in probabilities:\r\n if prob > 0:\r\n entropy += prob * math.log(prob, 2)\r\n \r\n return -entropy",
"def log1p(x):\n return Log1p().apply((x,))[0]",
"def ilog(x,delta):\n if(delta < x and x < 1.0 - delta):\n return np.log( -np.log(x) )\n elif(x < delta):\n return np.log( -np.log(delta) )\n else: \n return np.log( -np.log(1.0 - delta) )",
"def log_prob_from_logits(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x-m), axis, keep_dims=True))",
"def Log(num):\n return math.log(float(num))",
"def _log_linear_interpolation(predictions):\n log_probs = utils.average_arrays([mx.nd.log(p) for p in predictions])\n return -mx.nd.log(mx.nd.softmax(log_probs))",
"def log(self, base):\n\n\t\tvalues = map(lambda x: x > 0, self.val)\n\t\tif not all(values):\n\t\t\traise ValueError(\"Non-positive number encountered in log.\")\n\t\telse:\n\t\t\tval = np.array([np.math.log(v, base) for v in self.val])\n\t\t\tif len(self.der.shape):\n\t\t\t\tto_multiply = 1 / np.multiply(np.log(base), self.val)\n\t\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\t\tder = np.multiply(to_multiply, self.der)\n\t\t\telse:\n\t\t\t\tder = None\n\t\treturn Var(val, der)",
"def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:\n x = torch.clamp(x, eps, 1.0 - eps)\n return torch.log(x / (1.0 - x))",
"def logm(self, x):\n\n if K.backend() == 'theano':\n # construct theano tensor operation\n from theano.tensor.nlinalg import svd, diag\n from theano.tensor.elemwise import Elemwise\n from theano.scalar import log\n import theano.tensor as T\n # This implementation would be extremely slow. but efficient?\n u, d, v = svd(x)\n d += self.eps\n inner = diag(T.log(d))\n res = T.dot(u, T.dot(inner, v))\n return res\n else:\n from kyu.tensorflow.ops.svd_gradients import batch_matrix_log\n return batch_matrix_log(x, self.eps)",
"def log10(tensor):\n return log(tensor, base=10)",
"def log_features(data, columns):\n for col in columns:\n # deal with 0/1 values\n if np.sum(data[col] == 0) > 0:\n print('Replacing 0s with 0.025...')\n data.loc[data[col] == 0, col] = 0.025\n\n data[col] = np.log(data[col])",
"def log2(tensor):\n return log(tensor, base=2)",
"def log_sum_exp(x):\n x_max = x.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max",
"def log(tensor, base=np.e):\n if base == np.e:\n return _elementary_op(tensor, np.log, lambda x: 1 / x)\n return log(tensor) / log(base)",
"def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max",
"def log_sum_exp(x):\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max",
"def sentence_logprob(self, sentence):\n line = get_ngrams(sentence,3)\n log_por = 0.0\n for item in line:\n raw_por = self.smoothed_trigram_probability(item)\n log_por = log_por+math.log2(raw_por)\n\n return float(log_por)",
"def log10_inplace(a):"
] | [
"0.69506574",
"0.678407",
"0.66785324",
"0.66712946",
"0.666371",
"0.66378677",
"0.6517011",
"0.6511074",
"0.64745414",
"0.64531463",
"0.644632",
"0.6446281",
"0.6436689",
"0.64360815",
"0.6432946",
"0.64302266",
"0.6428101",
"0.64183277",
"0.64153445",
"0.640937",
"0.63887554",
"0.6362652",
"0.63556707",
"0.6342273",
"0.6325602",
"0.6298407",
"0.62964445",
"0.62964445",
"0.62907225",
"0.6280923"
] | 0.77828926 | 0 |
Make plots for each reading showing the times series during the tap, and write them to the Plots folder. The plots will have the mean within each time segment marked. | def plot_tap(file: str, before: DataFrame, during: DataFrame, after: DataFrame, time_col: str):
print("Making plots at time " + str(before[time_col].iloc[0]))
for file_name in file_names:
for y in y_columns[file_name]:
ax = before.plot(time_col, y, kind = 'scatter', color = 'blue', label = 'Before Tap')
after.plot(time_col, y, kind = 'scatter', color = 'red', label = 'After Tap', ax = ax)
during.plot(time_col, y, kind = 'scatter', color = 'black', label = 'During Tap', ax = ax)
plt.axes(ax)
plt.xlabel('Event Time')
plt.ylabel(y)
min_x = before[time_col].iloc[0] - (before[time_col].iloc[1] - before[time_col].iloc[0]) * 50
min_y = min([min(during[y]), min(before[y]), min(after[y])])
# Mark the mean during tap event (Feature 1)
mean_during = mean(during[y])
mean_before = mean(before[y])
mean_after = mean(after[y])
plt.hlines(y = mean_during, xmin = min_x, xmax = during[time_col].iloc[-1], linestyle='dashed', \
color='black')
plt.annotate(xy = (min_x, mean_during), s = 'avgDuringTap')
# Mark the mean before
plt.hlines(y = mean_before, xmin = min_x, xmax = before[time_col].iloc[-1], linestyle='dashed', \
color='blue')
plt.annotate(xy = (min_x, mean_before), s = 'avg100msBefore')
# Mark the mean after
plt.hlines(y = mean_after, xmin = min_x, xmax = after[time_col].iloc[-1], linestyle='dashed', \
color='red')
plt.annotate(xy = (min_x, mean_after), s = 'avg100msAfter')
plt.legend()
plt.savefig(file+'_'+y+'_time_'+str(before[time_col].iloc[0]) + '.png')
plt.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_plots(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n self.sse_plot()\n self.avg_sse_plot()",
"def initial_plots(runs):\n for run in runs.keys():\n meta = runs[run]\n plot_pdfs(meta)\n plot_priorsamps(meta)\n plot_ivals(meta)\n# if meta.truNz is not None:\n# plot_true(meta)\n timesaver(meta,'iplot',meta.key)",
"def plots(self, events=None, title=None):\n data = self.data\n P = PH.regular_grid(3 , 1, order='columnsfirst', figsize=(8., 6), showgrid=False,\n verticalspacing=0.08, horizontalspacing=0.08,\n margins={'leftmargin': 0.07, 'rightmargin': 0.20, 'topmargin': 0.03, 'bottommargin': 0.1},\n labelposition=(-0.12, 0.95))\n scf = 1e12\n ax = P.axarr\n ax = ax.ravel()\n PH.nice_plot(ax)\n for i in range(1,2):\n ax[i].get_shared_x_axes().join(ax[i], ax[0])\n # raw traces, marked with onsets and peaks\n tb = self.timebase[:len(data)]\n ax[0].plot(tb, scf*data, 'k-', linewidth=0.75, label='Data') # original data\n ax[0].plot(tb[self.onsets], scf*data[self.onsets], 'k^', \n markersize=6, markerfacecolor=(1, 1, 0, 0.8), label='Onsets')\n if len(self.onsets) is not None:\n# ax[0].plot(tb[events], data[events], 'go', markersize=5, label='Events')\n# ax[0].plot(tb[self.peaks], self.data[self.peaks], 'r^', label=)\n ax[0].plot(tb[self.smpkindex], scf*np.array(self.smoothed_peaks), 'r^', label='Smoothed Peaks')\n ax[0].set_ylabel('I (pA)')\n ax[0].set_xlabel('T (s)')\n ax[0].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n \n # deconvolution trace, peaks marked (using onsets), plus threshold)\n ax[1].plot(tb[:self.Crit.shape[0]], self.Crit, label='Deconvolution') \n ax[1].plot([tb[0],tb[-1]], [self.sdthr, self.sdthr], 'r--', linewidth=0.75, \n label='Threshold ({0:4.2f}) SD'.format(self.sdthr))\n ax[1].plot(tb[self.onsets]-self.idelay, self.Crit[self.onsets], 'y^', label='Deconv. Peaks')\n if events is not None: # original events\n ax[1].plot(tb[:self.Crit.shape[0]][events], self.Crit[events],\n 'ro', markersize=5.)\n ax[1].set_ylabel('Deconvolution')\n ax[1].set_xlabel('T (s)')\n ax[1].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n# print (self.dt, self.template_tmax, len(self.template))\n # averaged events, convolution template, and fit\n if self.averaged:\n ax[2].plot(self.avgeventtb[:len(self.avgevent)], scf*self.avgevent, 'k', label='Average Event')\n maxa = np.max(self.sign*self.avgevent)\n #tpkmax = np.argmax(self.sign*self.template)\n if self.template is not None:\n maxl = int(np.min([len(self.template), len(self.avgeventtb)]))\n temp_tb = np.arange(0, maxl*self.dt, self.dt)\n #print(len(self.avgeventtb[:len(self.template)]), len(self.template))\n ax[2].plot(self.avgeventtb[:maxl], scf*self.sign*self.template[:maxl]*maxa/self.template_amax, \n 'r-', label='Template')\n # compute double exp based on rise and decay alone\n # print('res rise: ', self.res_rise)\n # p = [self.res_rise.x[0], self.res_rise.x[1], self.res_decay.x[1], self.res_rise.x[2]]\n # x = self.avgeventtb[:len(self.avg_best_fit)]\n # y = self.doubleexp(p, x, np.zeros_like(x), risepower=4, fixed_delay=0, mode=0)\n # ax[2].plot(x, y, 'b--', linewidth=1.5)\n tau1 = np.power(10, (1./self.risepower)*np.log10(self.tau1*1e3)) # correct for rise power\n tau2 = self.tau2*1e3\n ax[2].plot(self.avgeventtb[:len(self.avg_best_fit)], scf*self.avg_best_fit, 'c--', linewidth=2.0,\n label='Best Fit:\\nRise Power={0:.2f}\\nTau1={1:.3f} ms\\nTau2={2:.3f} ms\\ndelay: {3:.3f} ms'.\n format(self.risepower, self.res_rise.x[1]*1e3, self.res_decay.x[1]*1e3, self.bfdelay*1e3))\n # ax[2].plot(self.avgeventtb[:len(self.decay_fit)], self.sign*scf*self.rise_fit, 'g--', linewidth=1.0,\n # label='Rise tau {0:.2f} ms'.format(self.res_rise.x[1]*1e3))\n # ax[2].plot(self.avgeventtb[:len(self.decay_fit)], self.sign*scf*self.decay_fit, 'm--', linewidth=1.0,\n # label='Decay tau {0:.2f} ms'.format(self.res_decay.x[1]*1e3))\n if title is not None:\n P.figure_handle.suptitle(title)\n ax[2].set_ylabel('Averaged I (pA)')\n ax[2].set_xlabel('T (s)')\n ax[2].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n if self.fitted:\n print('measures: ', self.risetenninety, self.decaythirtyseven)\n mpl.show()",
"def plotPerTimeStamp(options):\n name = options['name'] + '_' + options['scan'] + '_perTime'\n if options['extra']:\n name += '_' + options['extra']\n f = openRootFileR(options['name']+'_perTime')\n histname = plotName(name, timestamp=False)\n filename = plotName(name, timestamp=True)\n filepath = plotPath(name, timestamp=True)\n print '<<< Save plot:', filepath\n hist = f.Get(histname)\n hist.SetErrorOption(options['error'])\n if options['big']:\n canvas = TCanvas('c', '', 8000, 1200)\n else:\n canvas = TCanvas('c', '', 1400, 500)\n canvas.SetLogy(options['logy'])\n gStyle.SetOptStat(options['optstat'])\n hist.Draw()\n gPad.Update()\n hist.GetXaxis().SetTimeDisplay(1)\n hist.GetXaxis().SetTimeFormat('#splitline{%d.%m.%y}{%H:%M:%S}%F1969-12-31' \\\n +' 22:00:00')\n hist.GetXaxis().SetLabelOffset(0.03)\n hist.GetXaxis().SetTitle('')\n if 'xmin' in options and 'xmax' in options:\n hist.GetXaxis().SetRangeUser(options['xmin'], options['xmax'])\n hist.GetYaxis().SetTitle(options['ytitle'])\n hist.GetYaxis().SetTitleOffset(1.2)\n for axis in [hist.GetXaxis(), hist.GetYaxis()]:\n axis.SetTitleFont(133)\n axis.SetTitleSize(16)\n axis.SetLabelFont(133)\n axis.SetLabelSize(12)\n axis.CenterTitle()\n if options['big']:\n axis.SetTickLength(0.01)\n if options['big']:\n hist.GetYaxis().SetTitleOffset(0.25)\n drawSignature(filename)\n gPad.Modified()\n gPad.Update()\n if options['retrn']:\n return [canvas, hist, f]\n else:\n canvas.Print(filepath)\n canvas.Close()\n closeRootFile(f, options['name']+'_perTime')",
"def plot_timeline_overview(logs):\n\tfig, ax = plt.subplots(figsize=(11,6))\n\tfig.autofmt_xdate()\n\tc = 0\n\tline2D_array = []\n\tplot_data_dict = {}\n\tfor l in logs:\n\t\tplot_data, _, dates, _ = l.give_plot_data()\n\t\ttmp, = ax.plot(dates, [c]*len(dates), label=l.name, picker=10, marker='.', linestyle='-', linewidth=0.05, ms=5)\n\t\tplot_data_dict[tmp.get_c()] = plot_data\n\t\tline2D_array.append(tmp)\n\t\tc += 1\n\tmyFmt = DateFormatter(\"%Y %d.%b %H:%M\")\n\tax.xaxis.set_major_formatter(myFmt)\n\tax.set_yticks(range(0,len(logs)))\n\tax.set_yticklabels([x.name for x in logs])\n\tnames = ' and '.join([x.name for x in logs])\n\tplt.title('Analysis of the files ' + names)\n\tt = 0.15+(0.1)*len(logs)\n\tplt.subplots_adjust(left=0.23, bottom=0.2, right=0.9, top=t)\n\n\tannot = ax.annotate(\"\", xy=(0,0), xytext=(0.01,0.01) ,textcoords='figure fraction', bbox=dict(boxstyle=\"round\", fc=\"cyan\"), arrowprops=dict(arrowstyle=\"->\"))\n\tannot.set_visible(False)\n\tax.set_xlabel('timestamps in UTC')\n\tax.set_ylabel('log files')\n\n\tdef update_annot(l,ind):\n\t\tplot_data = plot_data_dict[l.get_c()]\n\t\tx,y = l.get_data()\n\t\tannot.xy = (x[ind[\"ind\"][0]], y[ind[\"ind\"][0]])\n\t\ttext = plot_data[ind[\"ind\"][0]]\n\t\tannot.set_text(text)\n\t\tannot.get_bbox_patch().set_alpha(0.4)\n\n\tdef hover(event):\n\t\tvis = annot.get_visible()\n\t\tif event.inaxes == ax:\n\t\t\tfor l in line2D_array:\n\t\t\t\tcont, ind = l.contains(event)\n\t\t\t\tif cont:\n\t\t\t\t\tupdate_annot(l,ind)\n\t\t\t\t\tannot.set_visible(True)\n\t\t\t\t\tfig.canvas.draw_idle()\n\t\t\t\telse:\n\t\t\t\t\tif vis:\n\t\t\t\t\t\tannot.set_visible(False)\n\t\t\t\t\t\tfig.canvas.draw_idle()\n\n\tfig.canvas.mpl_connect(\"motion_notify_event\", hover)\n\tfig.canvas.mpl_connect('key_press_event', _quit_figure)",
"def plot_traces(self, cellname, targettime, historytime, srctype, syntype):\n self.tstart = targettime - historytime\n self.istart = int(self.tstart / self.plotdt + 0.5)\n self.tend = targettime + historytime\n self.iend = int(self.tend / self.plotdt + 0.5)\n self.tseries = np.linspace(self.tstart, self.tend, \n self.iend - self.istart)\n if cellname not in self.datafile['/Vm']:\n return []\n vm = self.datafile['/Vm/' + cellname] \n plt.plot(self.tseries, \n normalize(vm[self.istart:self.iend]),\n label=cellname)\n stimdata = np.asarray(self.datafile['/stimulus/stim_bg'])\n stim_start = int(self.tstart/self.simdt+0.5)\n stim_end = int(self.tend/self.simdt+0.5)\n stimdata = stimdata[stim_start: stim_end]\n plt.plot(np.linspace(self.tstart, self.tend, len(stimdata)),\n normalize(stimdata),\n 'r--', \n label='STIMULUS')\n precells = self.plot_presynaptic(cellname, srctype, syntype)\n return precells",
"def plot_1():\n p_files = []\n filename = \"energy_data_2D_80\"\n for file in sorted(os.listdir(folder)):\n if file.startswith(filename):\n p_files.append(os.path.join(folder,file))\n T_list = []\n fig, ax = plt.subplots()\n for p_file in p_files[3::3]:\n T = (os.path.splitext(os.path.basename(p_file))[0]).split('_',4)[4]\n #print(T)\n E = []\n t = []\n if (T not in T_list):\n T_list.append(T)\n with open(p_file) as csvfile:\n lines = csv.reader(csvfile, delimiter=' ')\n sweep = 0\n for row in lines:\n E.append(float(row[0]))\n t.append(sweep)\n sweep += 1\n ax.plot(t[0:200], E[0:200],label=\"T = \"+format(T[0:3]))\n ax.set_title(\"Energy per bond vs Time\")\n ax.set_ylabel(\"e / J\")\n ax.set_xlabel(\"t / sweeps\")\n ax.legend()\n\n fig.savefig(folder2+\"energy_vs_time.png\")\n fig.savefig(texfolder+\"energy_vs_time.pdf\")",
"def __do_analysis(self):\n #Step 1: connect to mongodb and pick a streamer\n dbclient = db_connect.DBClient()\n streamer_data = dbclient.analyze_number_of_stream_viewers(self.streamer)\n streamer_messeges_data = dbclient.analyzeStream(self.streamer)\n\n timearr = []\n messagesarr = []\n streamer_timearr = []\n num_chattersarr = []\n\n #create time and messages array for plotting purposes\n for entry in streamer_messeges_data:\n timearr.append(entry['start_time'])\n messagesarr.append(entry['messeges_count'] * entry['messeges_count'])\n #print(entry['start_time'])\n\n #create time and chatters array for plotting purposes\n for entry in streamer_data:\n streamer_timearr.append(entry['deltatime_from_start_of_clip'])\n num_chattersarr.append(entry['num_viewers'])\n\n # print('start time: ' + str(timearr[0]))\n # print('end time: ' + str(timearr[-1]))\n # print('duration: ' + str(timearr[-1] - timearr[0]))\n # print('average views/min = ' + str(sum(messagesarr) / len(messagesarr)))\n\n average_message_count = sum(messagesarr) / len(messagesarr)\n\n averagearr = []\n plotting_time_arr = []\n labelarr = []\n\n for i in range(len(timearr)):\n averagearr.append(average_message_count*1.8)\n #print(str(timearr[i]) + ' converts to ' + str(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i]))\n plotting_time_arr.append(datetime.datetime(2020, 1, 1, 0, 0) + timearr[i])\n labelarr.append(str(i))\n\n plotting_streamer_timearr = []\n for i in range(len(streamer_timearr)):\n plotting_streamer_timearr.append(datetime.datetime(2020, 1, 1, 0, 0) + streamer_timearr[i])\n\n #plot messages and cuttoff\n messeges_over_time_fig = pyplot.figure(1)\n messeges_over_time_fig.set_figheight(15)\n messeges_over_time_fig.set_figwidth(30)\n messeges_over_time_fig.suptitle(self.streamer + \"'s video data\")\n messeges_over_time_sub = messeges_over_time_fig.add_subplot(211)\n\n pyplot.plot(plotting_time_arr,messagesarr,label='messages/min')\n dots = pyplot.plot(plotting_time_arr,messagesarr,'bo',label='messages/min')\n\n #label dots\n count = 0\n last_entry_was_above_line = False\n for i in range(len(plotting_time_arr)):\n #print(str(count) +': comparing ' + str(messagesarr[i]) + ' with ' + str(averagearr[i]))\n if(messagesarr[i] > averagearr[i]):\n if(last_entry_was_above_line):\n #Don't increment the count because this is part of the same clip\n count = count\n else:\n #new clip above the line, increment clip count\n count = count + 1\n messeges_over_time_sub.annotate(count,xy=(plotting_time_arr[i],messagesarr[i]))\n last_entry_was_above_line = True\n else:\n last_entry_was_above_line = False\n # messeges_over_time_sub.annotate('NA',xy=(plotting_time_arr[i],messagesarr[i]))\n\n #finish plotting\n pyplot.plot(plotting_time_arr, averagearr,'',label='average')\n pyplot.gcf().autofmt_xdate()\n pyplot.ylabel('Messeges*Messeges')\n pyplot.xlabel('Time')\n\n viewers_over_time_sub = messeges_over_time_fig.add_subplot(212)\n\n pyplot.plot(plotting_streamer_timearr,num_chattersarr,label='num chatters')\n pyplot.ylabel('Chatters')\n pyplot.xlabel('Time')\n\n pyplot.tight_layout()\n pyplot.savefig(output_file_location+self.streamer+'.png')\n print('saved chart to ' + output_file_location+self.streamer+'.png')\n # pyplot.show()\n return average_message_count, streamer_messeges_data",
"def make_plots(self):\n\n # main fixation times data frame\n average_fixation_df = pd.DataFrame()\n\n # create a data frame with the fixation times for each participant, create a box plot with it,\n # and append it to the main data frame\n for idx, dataframe in enumerate(self.cGOM_dataframes):\n aois = EyeTracking.areas_of_interest(dataframe)\n participant_fixations = EyeTracking.fixations(aois, dataframe)\n\n Plot.make_boxplot(data_frame=participant_fixations,\n figure_save_path=self.PARTICIPANT_FIGURE_PATH.format(idx + 1),\n title='Average fixation duration: participant {}'.format(idx + 1),\n ylabel='Fixation duration [s]',\n xlabel='Area of interest'\n )\n\n average_fixation_df = average_fixation_df.append(participant_fixations, ignore_index=True)\n\n # create a bar plot and a box plot with the fixations of all participants or\n # do nothing if no cGOM data is provided\n try:\n Plot.make_boxplot(data_frame=average_fixation_df,\n figure_save_path=self.BOX_PLOT_FIGURE_PATH,\n title='Average fixation duration',\n ylabel='Fixation duration [s]',\n xlabel='Area of interest'\n )\n Plot.make_barplot(data_frame=average_fixation_df,\n figure_save_path=self.BAR_PLOT_FIGURE_PATH,\n title='Average fixation duration',\n ylabel='Fixation duration [s]',\n xlabel='Area of interest'\n )\n except ValueError:\n pass",
"def plot_multiple_timeline(logs, remove_redundant_entries, select_string):\n\tfig, ax = plt.subplots(figsize=(11,6))\n\tfig.autofmt_xdate()\n\tline2D_array = []\n\tplot_data_dict = {}\n\tfor l in logs:\n\t\tselected_sources = _transform_select_string(select_string,l)\n\t\tpld, lines, dates, _ = l.give_plot_data(remove_redundant_entries=remove_redundant_entries, sources=selected_sources)\n\t\ttmp, = ax.plot(dates, lines, label=l.name, picker=4, marker='.', linestyle='-', linewidth=0.5, ms=3.5)\n\t\tline2D_array.append(tmp)\n\t\tplot_data_dict[tmp.get_c()] = pld\n\tmyFmt = DateFormatter(\"%Y %d.%b %H:%M:%S\")\n\tax.xaxis.set_major_formatter(myFmt)\n\tnames = ' and '.join([x.name for x in logs])\n\tif remove_redundant_entries:\n\t\tplt.title('Analysis of the files ' + names +'\\n' + 'where all entries having the same timestamp are removed')\n\t\tplt.subplots_adjust(left=0.1, bottom=0.18, right=0.9, top=0.90)\n\telse:\n\t\tplt.title('Analysis of the files ' + names)\n\t\tplt.subplots_adjust(left=0.1, bottom=0.18, right=0.9, top=0.95)\n\tplt.legend()\n\tannot = ax.annotate(\"\", xy=(0,0), xytext=(0.01,0.01) ,textcoords='figure fraction', bbox=dict(boxstyle=\"round\", fc=\"cyan\"), arrowprops=dict(arrowstyle=\"->\"))\n\tannot.set_visible(False)\n\tax.set_xlabel('timestamps in UTC')\n\tax.set_ylabel('sequential id')\n\n\tdef update_annot(l,ind):\n\t\tplot_data = plot_data_dict[l.get_c()]\n\t\tx,y = l.get_data()\n\t\tannot.xy = (x[ind[\"ind\"][0]], y[ind[\"ind\"][0]])\n\t\tif remove_redundant_entries == 1:\n\t\t\ttext = plot_data[y[ind[\"ind\"][0]]-1]\n\t\telse:\n\t\t\ttemp = [x for x in plot_data if x.id == y[ind[\"ind\"][0]]]\n\t\t\ttext = temp[0]\n\t\tannot.set_text(text)\n\t\tannot.get_bbox_patch().set_alpha(0.4)\n\n\tdef hover(event):\n\t\tvis = annot.get_visible()\n\t\tif event.inaxes == ax:\n\t\t\tfor l in line2D_array:\n\t\t\t\tcont, ind = l.contains(event)\n\t\t\t\tif cont:\n\t\t\t\t\tupdate_annot(l,ind)\n\t\t\t\t\tannot.set_visible(True)\n\t\t\t\t\tfig.canvas.draw_idle()\n\t\t\t\telse:\n\t\t\t\t\tif vis:\n\t\t\t\t\t\tannot.set_visible(False)\n\t\t\t\t\t\tfig.canvas.draw_idle()\n\n\tfig.canvas.mpl_connect(\"motion_notify_event\", hover)\n\tfig.canvas.mpl_connect('key_press_event', _quit_figure)",
"def save_plots(self):\n pdir = os.path.splitext(self.filename)[0] + '_plots'\n if not os.path.exists(pdir):\n os.mkdir(pdir)\n\n for ii in range(self.uv.n_ant):\n fig, ax = self.plot_single_baseline_dual_pol(ii+1, ii+1)\n print \"Saving ant %i\"%ii\n plt.savefig(os.path.join(pdir, 'ant-%i.png'%ii))\n plt.clf()",
"def PlotTimes(metadata, data):\n\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp.clear()\n gp.xlabel('seconds')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n styles = {}\n line_style = 1\n\n for dataset in data:\n x = numpy.array(dataset.time, dtype='float_')\n if not dataset.name in styles:\n styles[dataset.name] = line_style\n line_style += 1\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='impulses ls %d' % styles[dataset.name])\n else: # no need to repeat a title that exists already.\n d = Gnuplot.Data(x, dataset.data,\n with_='impulses ls %d' % styles[dataset.name])\n\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')",
"def run_plots(self):\n # load the files\n self.pre_dark_file = os.path.join(self.input_dir, 'step_lastframe.fits')\n self.post_dark_file = os.path.join(self.input_dir, 'step_dark_current.fits')\n self.jump_file = os.path.join(self.input_dir, 'step_jump.fits')\n self.rate_file = os.path.join(self.input_dir, 'step_rate.fits')\n self.ramp_file = glob.glob(os.path.join(self.input_dir, '*.fits'))[0]\n\n # plots\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])",
"def real_time_plot(files):\n global len_data, first_iter, colors\n\n for i,F in enumerate(files):\n\n # Load data\n data = pylab.loadtxt(F, delimiter=',', skiprows=1, usecols=(5,6,7))\n\n # Check if new data\n if (len_data!= len(data[:,0])):\n\n # Plot\n label = ntpath.basename(F)\n label = label[0:-4]\n ax.plot(data[:,0], data[:,1], data[:,2], colors[i], label=label)\n\n pyplot.draw()\n\n # Update globals\n len_data = len(data[:,0])\n\n if (first_iter == True):\n ax.legend()\n first_iter = False",
"def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()",
"def display_averaging(self):\r\n\r\n cwd = os.getcwd()\r\n path = cwd + \"/results\"\r\n df1 = pd.read_csv(path + \"/average_U.csv\") # black line\r\n df2 = pd.read_csv(path + \"/average_N.csv\") # green line\r\n chem = 25 # from 0 to 35\r\n\r\n s1 = df1.iloc[chem]\r\n s1.plot()\r\n\r\n plt.show()",
"def trace_plots(self, analytes=None, samples=None, ranges=False,\n focus=None, outdir=None, filt=None, scale='log',\n figsize=[10, 4], stats=True, stat='nanmean',\n err='nanstd', subset=None):\n if focus is None:\n focus = self.data[0].focus_stage\n if outdir is None:\n outdir = self.report_dir + '/' + focus\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n\n if samples is not None:\n subset = self.make_subset(samples)\n elif not hasattr(self, 'subsets'):\n self.make_subset()\n\n if subset is None:\n samples = self.subsets['All_Analyses']\n else:\n try:\n samples = self.subsets[subset]\n except:\n raise ValueError((\"Subset '{:s}' does not .\".format(subset) +\n \"exist.\\nRun 'make_subset' to create a\" +\n \"subset.\"))\n\n for s in tqdm(samples, desc='Drawing Plots'):\n f, a = self.data_dict[s].tplot(analytes=analytes, figsize=figsize,\n scale=scale, filt=filt,\n ranges=ranges, stats=stats,\n stat=stat, err=err, focus_stage=focus)\n # ax = fig.axes[0]\n # for l, u in s.sigrng:\n # ax.axvspan(l, u, color='r', alpha=0.1)\n # for l, u in s.bkgrng:\n # ax.axvspan(l, u, color='k', alpha=0.1)\n f.savefig(outdir + '/' + s + '_traces.pdf')\n # TODO: on older(?) computers raises\n # 'OSError: [Errno 24] Too many open files'\n plt.close(f)\n return",
"def write_plot(self):\n with open(self._graph_data_path, \"w+\") as f:\n run_time = self.start_time\n f.write(\"Time, Temperature\\n\")\n temperature = 0\n for step in self.profile[\"steps\"]:\n keys = list(step)\n if len(keys) > 0:\n if keys[0] == \"start\":\n temperature = step[\"start\"]\n if keys[0] == \"rest\":\n run_time += timedelta(minutes = step[\"rest\"])\n if keys[0] == \"ramp\":\n run_time += timedelta(minutes = step[\"ramp\"])\n temperature = step[\"to\"]\n if keys[0] == \"mashout\":\n temperature = step[\"mashout\"]\n time = run_time.strftime(\"%H:%M:%S, \")\n f.write(time + str(temperature) + \"\\n\")\n run_time += timedelta(minutes = 10)\n if keys[0] == \"jump\":\n temperature = step[\"jump\"]\n\n time = run_time.strftime(\"%H:%M:%S, \")\n f.write(time + str(temperature) + \"\\n\")\n else:\n logger.error(\"Can't make sense of \" + str(step))",
"def generatePlot(data):\n addendum = \"\"\n destination = \"D:\\\\Research\\\\scripts\\\\Results\\\\FullSet1\\\\$FilteredPlots\\\\take 4\\\\\"\n if len(data.detections.smallIncrease) != 0:\n addendum = \"small increases\\\\\"\n if len(data.detections.smallDecrease) != 0:\n addendum = \"small decreases\\\\\"\n if len(data.detections.largeIncrease) != 0:\n addendum = \"large increases\\\\\"\n if len(data.detections.largeDecrease) != 0:\n addendum = \"large decreases\\\\\"\n if addendum == \"\":\n addendum = \"no decreases\\\\\"\n \n plt.figure(1)\n plt.subplot(211)\n #print np.min(data.magdata), np.max(data.magdata)\n axes = plt.gca()\n axes.set_title(\"Year: '{year}, Day: {day}\".format(year=data.calendarDay[:2], day=data.calendarDay[3:] ))\n axes.set_ylim([np.min(data.magdata)-1.2,np.max(data.magdata)+0.25])\n axes.set_ylabel(r'$\\mathbf{B}$ (nT)' )\n\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes.xaxis.set_major_locator(dates.MinuteLocator())\n axes.xaxis.set_major_formatter(formats)\n \n br, = pp.plot(dates.date2num(data.timestamps),[row[0] for row in data.magdata],label='$B_r$')\n bt, = pp.plot(dates.date2num(data.timestamps),[row[1] for row in data.magdata],label='$B_t$')\n bn, = pp.plot(dates.date2num(data.timestamps),[row[2] for row in data.magdata],label='$B_n$')\n b0, = pp.plot(dates.date2num(data.timestamps),[row[3] for row in data.magdata],label='$B_0$')\n print len(data.detections.rotationBoundary)\n if len(data.detections.rotationBoundary) == 1:\n rotation, = pp.plot([dates.date2num(data.detections.rotationBoundary), dates.date2num(data.detections.rotationBoundary)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n else:\n for index, value in enumerate(data.detections.rotationBoundary):\n rotation, = pp.plot([dates.date2num(value), dates.date2num(value)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n if len(data.detections.rotationBoundary) != 0:\n pp.legend(handles=[br,bt,bn,b0,rotation], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n else:\n pp.legend(handles=[br,bt,bn,b0], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n\n start, end = axes.get_xlim()\n axes.xaxis.set_ticks(np.arange(start, end, (end-start)/5))\n \n \n\n plt.subplot(212)\n axes2 = plt.gca()\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes2.xaxis.set_major_locator(dates.MinuteLocator())\n axes2.xaxis.set_major_formatter(formats)\n axes2.set_ylabel(r'$\\theta$ (deg)' )\n rotations, = pp.plot(dates.date2num(data.detections.rotationTimeTags),data.detections.rotations)\n #pp.legend(handles=[rotations], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n \n\n outplotname = 'Plot ' + str(len(os.listdir(destination+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + '.pdf'\n completename = os.path.join(destination+addendum,outplotname)\n plt.savefig(completename, bboxinches='tight')\n plt.clf()\n\n outplotname = 'Plot ' + str(len(os.listdir(destination+'rawdata\\\\'+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + ' rawdata.csv'\n completename1 = os.path.join(destination+'rawdata\\\\'+addendum,outplotname)\n generateDataFile(data.rawdata,completename1)\n\n print \"Done generating plot...\"",
"def plot_stamps(stamps, fig=None, columns=3):\n num_stamps = len(stamps)\n num_rows = math.ceil(num_stamps / columns)\n\n # Create a new figure if needed.\n if fig is None:\n fig = plt.figure()\n\n for i in range(num_stamps):\n ax = fig.add_subplot(num_rows, columns, i + 1)\n ResultsVisualizer.plot_single_stamp(stamps[i], axes=ax)\n ax.set_title(f\"Time {i}\")",
"def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)",
"def _PlotGraph(self, event):\n self._rcvLock.acquire()\n for j in event.data[0].keys():\n data = event.data[0][j]\n #print data\n line = []\n for k in data.keys():\n if k in COLORS.keys():\n c = COLORS[k]\n else:\n c = 'black'\n line.append(plot.PolyLine(data[k], colour=c, width=1,\n legend=\"Node %d\"%(k,)))\n # To draw markers: default colour = black, size = 2\n # shapes = 'circle', 'cross', 'square', 'dot', 'plus'\n #marker = plot.PolyMarker(event.data[1], marker='triangle')\n\n # set up text, axis and draw\n if j == ERRORPLOT:\n t = \"Synchronization Error\"\n xa = \"Time [s]\"\n ya = \"Error [ms]\"\n elif j == TEMPPLOT:\n t = \"Temperature Index\"\n xa = \"Time [s]\"\n ya = \"Index\"\n elif j == SKEWPLOT:\n t = \"Frequency Error\"\n xa = \"Time [s]\"\n ya = \"Frequency Error [ppm]\"\n gc = plot.PlotGraphics(line, t, xa, ya)\n # Draw graphs for each plot\n self.plotter[j].Draw(gc, xAxis=(self._x_lower,\n self._x_upper), yAxis=(float(self._y_lower[j]),\n float(self._y_upper[j])))\n self._rcvLock.release()",
"def superplot(logs, remove_redundant_entries, select_string):\n\tfor l in logs:\n\t\tplot_bar_chart(l)\n\t\tplot_single(l,remove_redundant_entries,select_string)\n\t\tplot_single_file_colored(l, remove_redundant_entries, select_string)\n\tplot_multiple_timeline(logs,remove_redundant_entries,select_string)\n\tplot_timeline_overview(logs)\n\tshow()",
"def plot_time(time_to_complete, plot_num):\n average = []\n for i, point in enumerate(time_to_complete):\n average.append(sum(time_to_complete[:i+1])/ (i+1))\n plt.plot(time_to_complete, color= 'blue', label=\"Epoch Time\")\n plt.plot(average, color = 'red', label= \"Average Time\", zorder = 3)\n plt.legend()\n plt.title(\"Time to complete FetchReach\")\n plt.ylabel(\"Time (seconds)\")\n plt.xlabel(\"Number iterations\")\n plt.savefig(\"./plots/time/time_to_complete_{}.png\".format(plot_num))\n plt.clf()",
"def gentoplot(time):\n \n toplot = {}\n\n # Generates a list of movie paths in the data folder.\n files = dftf.batch_s('.') \n\n # Generates dft traces and plots for each roi in each movie.\n for file in files:\n os.chdir(file)\n print(os.path.basename(file))\n\n for col in COLS:\n \n if os.path.exists('params') == True:\n rawtracedata = dftf.TraceData(fname=RESULTS_FILE, paramsfile=PARAMS_FILE, \n corrparamsfile=CORRPARAMS_FILE, colname=col)\n td = rawtracedata.Processrawtrace(DFTSIZE, HZ_BOUND1, HZ_BOUND2)\n moviename = os.path.basename(os.path.abspath('.'))\n \n # Selects the area of the raw trace to plot.\n frames = time * td['fps']\n #print(frames)\n plottime = td['seltrace'][:frames]/10\n #print(len(plottime))\n ms = plottime-np.mean(plottime)\n xsec = np.linspace(0, len(plottime)/td['fps'], len(plottime))\n #print(xsec)\n condition = td['condition']\n toplot[moviename] = [xsec, ms, condition]\n print(np.max(ms), np.min(ms))\n \n return(toplot)",
"def run_and_plot(self):\n self.raw_processing()\n self.data_averaging_and_cleaning()\n\n print(self.organized_names)\n print(self.organized_film)\n print(self.organized_plank)\n\n height = self.organized_film\n bars = tuple(self.organized_names.copy())\n y_pos = np.arange(len(bars))\n\n plt.bar(y_pos, height)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('CFU/mL count')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()\n\n height2 = self.organized_plank\n\n plt.bar(y_pos, height2)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('Proportion of Biofilm CFUs to Planktonic CFUs')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()",
"def all(folder, mt=False):\n handles = []\n experiments = get_experiment_series(folder, mT=mt)\n for ex in experiments:\n if mt:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm {}mT'.format(ex.height, ex.magnet))[0])\n else:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm'.format(ex.height))[0])\n plt.legend()\n plt.show()",
"def plot(accessToken, collection):\n \n plt.xlabel('Date/Time')\n plt.ylabel('Sensor Value')\n plt.title(\"Sensors Monitor\")\n \n # to save png files\n i = 0\n \n # set interactive mode on\n plt.ion()\n \n # set figure to full screen\n mng = plt.get_current_fig_manager()\n mng.full_screen_toggle()\n\n while True:\n jsondata = getJsonData(accessToken)\n if jsondata:\n #limit date string\n jsondata[DATE] = jsondata[DATE][8:13]\n appendJsonData(jsondata, collection)\n \n # clear figure\n plt.clf()\n \n # limit samples to be viewed\n if (len(collection[DATE]) > SAMPLE_SIZE_LIMIT):\n plt.xticks(range(SAMPLE_SIZE_LIMIT), collection[DATE][-SAMPLE_SIZE_LIMIT:])\n plt.plot(collection[SENSOR1][-SAMPLE_SIZE_LIMIT:], 'k', label='sensor 1')\n plt.plot(collection[SENSOR2][-SAMPLE_SIZE_LIMIT:], 'b', label='sensor 2')\n plt.plot(collection[SENSOR3][-SAMPLE_SIZE_LIMIT:], 'g', label='sensor 3')\n plt.plot(collection[SENSOR4][-SAMPLE_SIZE_LIMIT:], 'r', label='sensor 4')\n else:\n plt.xticks(range(len(collection[DATE])), collection[DATE])\n plt.plot(collection[SENSOR1], 'k', label='sensor 1')\n plt.plot(collection[SENSOR2], 'b', label='sensor 2')\n plt.plot(collection[SENSOR3], 'g', label='sensor 3')\n plt.plot(collection[SENSOR4], 'r', label='sensor 4')\n \n plt.legend(loc='upper left')\n plt.show()\n \n # Take a screenshot on Gnome desktop\n if os.environ.get(\"XDG_MENU_PREFIX\").startswith(\"gnome\"):\n os.system(\"gnome-screenshot -f screenshot{}.png\".format(i))\n i = i+1\n \n #plt.pause(1)\n plt.pause(60*60) # one hour\n else:\n print(str(datetime.datetime.now()) + \" Empty json data\")",
"def fn_intensitytrace(file_name,folder,data,time,i,x,y):\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n figure_name=file_name+'_intensity_trace'\n for a in range(0,len(data),int(len(data)/2)):\n if i==a:\n x_coord=x[i+1]\n y_coord=y[i+1]\n max_int=np.max(data[i])\n min_int=np.min(data[i])\n #norm_int = [b / max_int for b in data[i]]\n plt.figure()\n #plt.plot(time[0:len(time)-1],norm_int,'g')\n plt.plot(time[0:len(time)-1],data[i],'g')\n plt.xlim(0, 100)\n plt.ylim(min_int, (max_int+100))\n plt.xlabel('Time (s)', fontname='Arial', fontsize=12)\n plt.ylabel('Photon counts (photons)', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial',fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '_'+str(x_coord)+','+str(y_coord)+'.png', dpi=500)\n\n return (plt.show())",
"def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()"
] | [
"0.67501664",
"0.63098764",
"0.6244525",
"0.62386",
"0.61215127",
"0.6111329",
"0.605923",
"0.6054306",
"0.6047053",
"0.6033733",
"0.60212713",
"0.5992883",
"0.59579474",
"0.5934605",
"0.5910154",
"0.589949",
"0.58453524",
"0.5829939",
"0.5825189",
"0.58133894",
"0.5805124",
"0.5788275",
"0.5778996",
"0.57755536",
"0.57642764",
"0.57430893",
"0.57380503",
"0.57379353",
"0.57334626",
"0.57283115"
] | 0.68030167 | 0 |
This will create a list of motionrelated features characterizing each tap of the given user and session | def get_feature_vector(user_id: str, session: str) -> DataFrame:
#Find the time windows during which the reader is doing the desired task
activity_data = read_file(user_id, session, 'Activity.csv')
task_number = mode(activity_data['TaskID'])
task_name = task_names[(task_number - 1) % len(task_names)]
tap_windows = get_tap_events(user_id, session)
data = get_user_session_data(user_id, session)
add_magnitude_columns(data)
add_columns_for_taps(data, tap_windows)
mark_tap_start_and_end(data, delta_in_ms = 200)
column_names = get_feature_names()
#A feature vector for each tap, to be filled in subsequently:
featureVectors = pd.DataFrame(columns = column_names)
for tap_file in tap_file_names:
tap_feature = tap_file_to_feature_name[tap_file]
print(tap_feature)
window_start_indices = data[data[tap_feature] == 4].index
window_end_indices = data[data[tap_feature] == 5].index
if len(window_start_indices) == 0:
continue
for i in range(len(window_start_indices)):
start, end = window_start_indices[i], window_end_indices[i]
window_of_interest = data[start : end + 1]
features = feature_list(user_id, session, tap_feature, task_name, window_of_interest)
if features != None:
featureVectors.loc[featureVectors.shape[0]] = features
return featureVectors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def feature_list(user_id: str, session: str, tap_feature: str, task_name: str, window: DataFrame):\n if window.shape[0] == 0:\n return None\n #Add user ID, session, task name\n features = [user_id, session, task_name]\n\n #Add orientation\n orientation = mode(window['Phone_orientation_accel'])\n features.append(orientation)\n\n #Add tap type\n features.append(tap_feature)\n\n lead_file = 'Accelerometer.csv'\n\n time_col = x_columns[lead_file]\n\n before_start = window[window[tap_feature] == 4].index[0]\n during_start = window[window[tap_feature] == 2].index[0]\n after_start = window[window[tap_feature] == 3].index[0] + 1\n after_end = window[window[tap_feature] == 5].index[0]\n\n before = window.loc[before_start : during_start]\n during = window.loc[during_start : after_start]\n after = window.loc[after_start : after_end + 1]\n\n if during.shape[0] < 2:\n # If there were none or one measurements during the tap,\n # add the closest ones\n during = window[during_start - 1 : after_start + 1]\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n # Feature 1: Mean during\n mean_during = mean(during[y])\n\n # Feature 2: SD during\n sd_during = sd(during[y])\n\n # Feature 3: Difference before/after\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n difference_before_after = mean_after - mean_before\n\n # Feature 4: Net change from tap\n net_change_due_to_tap = mean_during - mean_before\n\n # Feature 5: Maximal change from tap\n max_tap = max(during[y])\n max_change = max_tap - mean_before\n\n # Feature 6: Restoration time\n avgDiffs = []\n for j in range(after[y].shape[0]):\n subsequentValues = after[y].iloc[j:]\n subsequentDistances = subsequentValues.map(lambda x: abs(x - mean_before))\n averageDistance = mean(subsequentDistances)\n avgDiffs.append(averageDistance)\n time_of_earliest_restoration = min(avgDiffs)\n restoration_time = time_of_earliest_restoration - during[time_col].iloc[-1]\n\n # Feature 7: Normalized duration\n t_before_center = (before[time_col].iloc[0] + before[time_col].iloc[-1]) / 2 \n t_after_center = (after[time_col].iloc[0] + after[time_col].iloc[-1]) / 2\n normalized_duration = (t_after_center - t_before_center) / (mean_after - mean_before)\n \n # Feature 8: Ndormalized duration max\n t_max_in_tap = during[during[y] == max_tap][time_col].iloc[0]\n normalized_duration_max = (t_after_center - t_max_in_tap) / (mean_after - max_tap)\n\n\n features += [mean_during, sd_during, difference_before_after,\n net_change_due_to_tap, max_change, restoration_time,\n normalized_duration, normalized_duration_max]\n\n if random.choice(range(100))== 0:\n plot_tap('Plots/Project/' + session, before, during, after, time_col)\n \n return features",
"def session():\n \n # ind is a list of dictionaries for the actions. \n ind=[]\n for i in range(IND_INIT_SIZE):\n ind.append(action())\n ind.sort(key=lambda r: r[\"date\"]) # sorts the sequences by date of action\n \n beginning=ind[0]['date']\n feature_vect=creator.Individual()\n feature_vect.append(beginning.hour)\n for i in range(5):\n feature_vect.append(0)\n\n for act in ind:\n duration=act['date']-beginning\n if act['type']=='logon':\n feature_vect[2]+=1\n elif act['type']=='email' and act['activity']=='Send':\n feature_vect[3]+=1\n elif act['type']=='file' and (act[\"to_removable_media\"]==True or act[\"from_removable_media\"]==True):\n feature_vect[4]=+1\n elif act[\"type\"]==\"http\":\n feature_vect[5]+=1\n\n feature_vect[1]=duration.total_seconds()/60 # the duration is in minutes\n \n # Normalize the vector\n maxFV=max(feature_vect)\n for i in range(len(feature_vect)):\n feature_vect[i]/=maxFV\n \n return feature_vect",
"def _get_features(self, session):\n feature_utils.qsr_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n feature_utils.standardize_simple(session, self.config)\n\n # feature_utils.marker_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n\n return session[SESSION_FEAT]",
"def create_new_features(self):\n train = self.train\n \n train['is_context'] = train['context_type'].isin(CONTEXT_TYPE_TEST)\n train['is_context_flow'] = train['listen_type'] * train['is_context']\n \n train['is_listened_context'] = train['is_listened'] * train['is_context']\n train['is_listened_flow'] = train['is_listened'] * train['listen_type']\n train['is_listened_context_flow'] = train['is_listened'] * train['is_context_flow']\n \n for feature in self.categorize_features:\n gby_feat = train.groupby(feature)\n new_features(train, gby_feat, feature, feature in self.listen_type_features, self.context_features, self.flow_features, self.fillna)\n \n # Variable combinations\n for feat1 in self.combo_features1:\n for feat2 in self.combo_features2:\n gby_feat = train.groupby([feat1, feat2])\n name = feat1 + '_' + feat2\n new_features(train, gby_feat, name, feat1 in self.listen_type_features, self.context_features, self.flow_features, self.fillna)",
"def user_features(posts):\n \n result = []\n for post in posts:\n #n_comments = get_hostile_indices(post)[0] + 1\n n_comments = post['n_comments_observed']\n feature_list = []\n num_mentioned = 0\n user_list = post['users'][:n_comments]\n comment_list = post['comments'][:n_comments]\n ratio_users = len(set(user_list))/len(user_list)\n for c in comment_list:\n flag = has_mentioned(c) \n if flag:\n num_mentioned += 1\n \n ratio_mentioned = num_mentioned/len(user_list)\n \n feature_list.append(ratio_users)\n feature_list.append(ratio_mentioned)\n \n result.append(feature_list)\n\n X_matrix = np.asarray(result)\n headers = [\"other_features_1st\", \"other_features_2st\"]\n \n return X_matrix, headers",
"def get_feature_names():\n return ['UserID', 'SessionID', 'TaskName', 'Orientation', 'TapType'] + get_numerical_feature_names()",
"def extract_all_features(directory, feature_type, session_type=None, \n le=None, dummies=None, dummy_train=None, \n use_scaler=True, scaler=None, should_augment=False, lda=None):\n samples = []\n original_transcripts = []\n phone_labels = []\n is_audible_sample = []\n modes = []\n sessions = []\n\n meta_info_path = os.path.join(directory, \"utteranceInfo.pkl\")\n try:\n with open(meta_info_path, \"rb\") as f:\n meta = pickle.load(f)\n meta[\"speakerSess\"] = meta[\"speakerId\"] + \"_\" + meta[\"sessionId\"]\n \n # If we want dummies, create/store the dummies as needed\n if dummies is not None:\n # Create them\n meta_dummies = pd.get_dummies(meta[dummies])\n if dummy_train is None:\n # Store it for future\n dummy_train = meta_dummies\n else:\n # Reindex to match the training data, filling blanks with zeros\n meta_dummies = meta_dummies.reindex(columns = dummy_train.columns, fill_value=0)\n meta_dummies = meta_dummies.as_matrix()\n except FileNotFoundError:\n print(\"Cannot open file %s -- check that directory to see if it needs to be renamed to the hardcoded path\" % os.path.join(directory, \"utteranceInfo.pkl\"))\n\n ctr = 1\n for _, utterance in meta.iterrows():\n if session_type is not None and utterance[\"mode\"] != session_type:\n continue\n pkl_filename = os.path.join(directory, utterance[\"label\"] + \".pkl\")\n \n # Figure out how we want to add noise, as per user specifications\n add_addls = [False]\n if should_augment:\n add_addls += [True]*1\n \n # Add original, then then with any data augmentation added\n for add_addl in add_addls:\n (features, phones), transcript = extract_features(pkl_filename, feature_type, \n should_subset=add_addl, should_address_noise=add_addl)\n # Print current status to user\n print(ctr, pkl_filename, transcript, features.shape, len(phones))\n ctr += 1\n \n samples.append(features)\n modes.append(utterance[\"mode\"])\n sessions.append(utterance[\"speakerSess\"])\n if transcript is None: # use original\n original_transcripts.append(utterance[\"transcript\"])\n else: # use doctored transcript from subsetting\n original_transcripts.append(transcript)\n phone_labels.append(phones)\n is_audible_sample.append(utterance[\"mode\"] == \"audible\")\n \n if len(samples) == 0:\n raise ValueError(\"Dataset %s has no entries when filtered for '%s' \" % \n (meta_info_path, session_type if session_type is not None else \"(none)\"))\n \n if feature_type == \"wand_lda\":\n samples, lda = wand_lda(samples, phone_labels, lda=lda)\n elif feature_type == \"wand_ldaa\":\n samples, lda = wand_lda(samples, phone_labels, subset_to_use=is_audible_sample, lda=lda)\n\n # Build the encodings\n if le is None:\n le = preprocessing.LabelEncoder()\n le.fit(list(chain.from_iterable(list(x) for x in original_transcripts)))\n transcripts = []\n for text in original_transcripts:\n transcripts.append(le.transform([c for c in list(text) if c in le.classes_]))\n \n # Get lengths\n sample_lens = []\n for i, s in enumerate(samples):\n sample_lens.append(s.shape[1])\n if dummies is not None:\n dummies_through_time = np.ones((meta_dummies[i].shape[0], s.shape[1]))\n dummies_through_time *= meta_dummies[i][:,np.newaxis]\n s = np.vstack([s, dummies_through_time])\n sample_lens = np.array(sample_lens, dtype=np.int64)\n\n n_samples = len(samples)\n n_feats = samples[0].shape[0]\n\n assert all(s.shape[0] == n_feats for s in samples)\n\n padded_samples = np.zeros((n_samples, n_feats, max(sample_lens)))\n\n for i, sample in enumerate(samples):\n padded_samples[i,:,0:sample_lens[i]] = sample\n\n # Ensure samples are shaped (n_samples, max_timesteps, n_features)\n padded_samples = np.transpose(padded_samples, (0, 2, 1))\n n_signals, max_timesteps, n_feats = padded_samples.shape\n \n if use_scaler:\n if scaler is None:\n scaler = preprocessing.StandardScaler()\n padded_samples = np.reshape(padded_samples, (-1, n_feats))\n scaler.fit(padded_samples)\n padded_samples = np.reshape(padded_samples, (n_samples, max_timesteps, n_feats))\n padded_samples = np.reshape(padded_samples, (-1, n_feats))\n padded_samples = scaler.transform(padded_samples)\n padded_samples = np.reshape(padded_samples, (n_samples, max_timesteps, n_feats))\n \n return (padded_samples, sample_lens, np.array(transcripts), le, \n dummy_train, np.array(modes), np.array(sessions), scaler, lda)",
"def new_features(train, gby_feat, name, is_listen_type_feature, context_features, flow_features, fillna):\n \n # count and ratio on the all train\n count = gby_feat['is_listened'].transform('count')\n train[name + '_count'] = count\n train[name + '_count_bis'] = count\n train[name + '_ratio'] = gby_feat['is_listened'].transform('mean')\n \n if context_features:\n # Count and ratio for context observations\n count = gby_feat['is_context'].transform('sum')\n train[name + '_context_count'] = count\n train[name + '_context_count_bis'] = count\n train[name + '_context_ratio'] = gby_feat['is_listened_context'].transform('sum')/(1.*count)\n # Note that there should be NaN values if count=0.\n if fillna:\n train[name + '_context_ratio'].fillna(0.5, inplace=True)\n \n # Count and ration fot the flow observations\n if is_listen_type_feature:\n if flow_features:\n count = gby_feat['listen_type'].transform('sum')\n train[name + '_flow_count'] = count\n train[name + '_flow_count_bis'] = count\n train[name + '_flow_ratio'] = gby_feat['is_listened_flow'].transform('sum')/(1.*count)\n if fillna:\n train[name + '_flow_ratio'].fillna(0.5, inplace=True)\n \n count = gby_feat['is_context_flow'].transform('sum')\n train[name + '_context_flow_count'] = count\n train[name + '_context_flow_count_bis'] = count\n train[name + '_context_flow_ratio'] = gby_feat['is_listened_context_flow'].transform('sum')/(1.*count)\n if fillna:\n train[name + '_context_flow_ratio'].fillna(0.5, inplace=True)",
"def create_features(self, answer):\n # Get the teacher's stuff\n a_stopwords = sf.remove_stopwords(self.teacher_answer)\n a_stemmed = sf.stem_sentence(a_stopwords)\n a_stemmed_ordered = sf.order_sentence(a_stemmed)\n teacher_answers = [\n a_stemmed,\n a_stemmed_ordered,\n ]\n \n # Change sentence into multiple versions\n log = dict()\n log['student_answer'] = answer\n log['teacher_answer'] = self.teacher_answer\n log['q_answer'] = answer\n log['q_stopwords'] = sf.remove_stopwords(answer)\n log['q_stemmed'] = sf.stem_sentence(answer)\n log['q_stem_ordered'] = sf.order_sentence(log['q_stemmed'])\n \n # Might need to save scaling until jsut before modeling\n log['wordcount'] = sf.word_count(answer)\n log['wordcount'] = sf.scale_column(self.word_scaler, log['wordcount'])\n\n\n# Stem sim\n log['stem_g_similarity'] = sf.generic_similarity(log['q_stemmed'], a_stemmed)\n log['stem_j_similarity'] = sf.jaccard_similarity(log['q_stemmed'], a_stemmed)\n log['stem_c_similarity'] = sf.cosine_similarity(log['q_stemmed'], a_stemmed)\n # Ordered\n log['stem_ordered_g_similarity'] = sf.generic_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_j_similarity'] = sf.jaccard_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_c_similarity'] = sf.cosine_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n\n\n \n # Appending New Answer\n self.new_answers = self.new_answers.append(log, ignore_index = True)\n \n # Entity Extraction\n types_of_sentences = [\n 'q_stemmed',\n 'q_stem_ordered',\n ]\n \n for sent_type, teach_ans in zip(types_of_sentences, teacher_answers):\n \n self.new_answers = sf.unigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.bigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.trigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)",
"def token_features(tokens, feats):\n \n count = Counter(tokens)\n for i in count:\n feats[\"token=\"+i] = count[i]",
"def alspostprocess(data, prediction, features, user_features, movie_features, n_features=10):\r\n \r\n\r\n data['ALS'] = prediction[data.loc[:, 'userID']-1, data.loc[:, 'movieID']-1]\r\n features.append('ALS')\r\n \r\n total_features = len(movie_features)\r\n if n_features>total_features:\r\n n_features = total_features\r\n \r\n for i in range(n_features):\r\n data[\"UserFeature{}\".format(i)] = user_features[data.loc[:, 'userID']-1, i]\r\n features.append(\"UserFeature{}\".format(i))\r\n data[\"MovieFeature{}\".format(i)] = movie_features[i, data.loc[:, 'movieID']-1]\r\n features.append(\"MovieFeature{}\".format(i))\r\n return data, features",
"def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields",
"def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features",
"def extract_features_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon, mode='training'):\n instances = []\n for sent in sentence_dicts:\n # print(sent)\n for key, value in sent.items():\n features = {}\n if isinstance(key, int):\n if not_known_cue_word(value[3].lower(), cue_lexicon, affixal_cue_lexicon):\n sent[key]['not-pred-cue'] = True\n continue\n\n features['token'] = value[3].lower()\n features['lemma'] = value[4].lower()\n features['pos'] = value[5]\n\n if key == 0:\n features['bw-bigram1'] = 'null'\n else:\n features['bw-bigram1'] = \"%s_*\" %sent[key-1][4].lower()\n if not (key+1) in sent:\n features['fw-bigram1'] = 'null'\n else:\n features['fw-bigram1'] = \"*_%s\" %sent[key+1][4].lower()\n \n affix = get_affix_cue(value[3].lower(), affixal_cue_lexicon)\n if affix != None:\n base = value[3].lower().replace(affix, \"\")\n features['char-5gram1'], features['char-5gram2'] = get_character_ngrams(base, affix, 5)\n features['char-4gram1'], features['char-4gram2'] = get_character_ngrams(base, affix, 4)\n features['char-3gram1'], features['char-3gram2'] = get_character_ngrams(base, affix, 3)\n features['char-2gram1'], features['char-2gram2'] = get_character_ngrams(base, affix, 2)\n features['char-1gram1'], features['char-1gram2'] = get_character_ngrams(base, affix, 1)\n features['affix'] = affix\n else:\n features['char-5gram1'], features['char-5gram2'] = 'null','null'\n features['char-4gram1'], features['char-4gram2'] = 'null','null'\n features['char-3gram1'], features['char-3gram2'] = 'null','null'\n features['char-2gram1'], features['char-2gram2'] = 'null','null'\n features['char-1gram1'], features['char-1gram2'] = 'null','null'\n features['affix'] = 'null'\n \n instances.append(features)\n if mode == 'training':\n labels = extract_labels_cue(sentence_dicts, cue_lexicon, affixal_cue_lexicon)\n return sentence_dicts, instances, labels\n return sentence_dicts, instances",
"def generateFeatures(self, data):\n pass",
"def get_feature_set_PB(tweet):\n features= {\n 'text_length': np.log(len(tweet.text))\n } #ADD ADDITIONAL FEATURES\n if tweet.nrof_sademoticons>0:\n features['sademoticons'] = tweet.nrof_sademoticons\n if tweet.nrof_happyemoticons>0:\n features['happyemoticons'] = tweet.nrof_happyemoticons\n \n return features",
"def extract_video_features():\r\n\r\n # Face feature extraction from Openface output file\r\n file = open(\"Extracted_Features/\"+input_video[:len(input_video)-4]+\"_Features/\"+input_video[:len(input_video)-4]+\".csv\")\r\n reader = csv.DictReader(file)\r\n features = {}\r\n\r\n for row in reader:\r\n\r\n # Taking only good frames where faces have been detected with a confidence higher than 0.8 (Openface standard)\r\n if int(row[' success']) == 1 and float(row[' confidence']) > 0.5:\r\n face_id = int(row[' face_id'])\r\n frame = int(row['frame']) - 1\r\n\r\n features.setdefault(frame, {})\r\n face_features = []\r\n\r\n # Mouth LandMarks\r\n for i in range(0, 68):\r\n face_features.append(float(row[' x_' + str(i)]))\r\n\r\n for i in range(0, 68):\r\n face_features.append(float(row[' y_' + str(i)]))\r\n\r\n if f_type == \"AU\":\r\n au = [\"10\", \"12\", \"14\", \"15\", \"17\", \"20\", \"23\", \"25\", \"26\"]\r\n for i in au:\r\n face_features.append(float(row[' AU' + i + '_r']))\r\n\r\n features[frame][face_id] = face_features\r\n\r\n return features",
"def _get_observation(self, session):\n object_data = session[SESSION_OBJ_2D]\n sess_len = session[SESSION_LEN]\n # print (object_data)\n # print (sess_len)\n\n object_1_name, object_2_name = feature_utils.get_most_active_objects_interval(object_data, object_data.keys(), 0, sess_len)\n\n features = []\n\n for name in [object_1_name, object_2_name]:\n for frame in [-2, -1]:\n object_data[name][frame].transform.position\n features.append(object_data[name][frame].transform.get_feat())\n\n return np.concatenate( features ).flatten()",
"def features(self, state, action, next_state):\n raise NotImplementedError",
"def get_chase_stats_features(tweets, cleaned_tweets,out_folder):\n feats=[]\n count=0\n #hashtags = get_hashtags_in_tweets(tweets, out_folder)\n #mispellings = get_misspellings(tweets, cleaned_tweets, out_folder)\n specialpunc = get_specialpunct(tweets, cleaned_tweets,out_folder)\n specialchars = get_specialchars(tweets, cleaned_tweets,out_folder)\n capitalization = get_capitalization(tweets,cleaned_tweets,out_folder)\n for t, tc in zip(tweets, cleaned_tweets):\n feats.append(other_features_(t, tc))\n count+=1\n # if count%100==0:\n # print(\"\\t {}\".format(count))\n feat_names = [\"SPECIALPUNC\",\"SPECIALCHAR\", \"CAPT\"]\n pickle.dump(feat_names,\n open(out_folder+\"/\"+TWEET_TD_OTHER_FEATURES_VOCAB+\".pk\", \"wb\" ))\n feature_matrix=np.column_stack((specialpunc, specialchars,\n capitalization))\n\n\n return feature_matrix, feat_names",
"def session_to_input(session):\n all_players = get_player_order(session)\n hand_vec = get_hand_vec(all_players)\n board = session.board()\n board_vec = get_board_vec(board, all_players)\n feature_vec = get_feature_vec(board, all_players)\n data = np.hstack([hand_vec, board_vec, feature_vec])\n return data",
"def getFeatures(featureInput):\n featureList = []\n for defTerm,candidateSent in featureInput:\n tokens = nltk.word_tokenize(candidateSent)\n features = {}\n POScenter,POSleft,POSright = wordPOS(tokens,defTerm)\n features['Pos of first Article'] = posFirstArticle(tokens)\n## features['Num Punct Marks'] = numPunctuation(tokens)\n features['Subj words Predicate'] = subWordPerdicate(candidateSent,defTerm,tokens)\n features['Word before def term'] = wordBeforeDef(tokens,defTerm)\n features['POS centered word'] = POScenter\n features['POS left word'] = POSleft\n## features['POS right word'] = POSright \n featureList.append(features)\n return featureList",
"def get_features(self):\n if self.strokes is False:\n print('Isolating strokes')\n self.isolate_strokes()\n # List of features to use (sm1 omitted because always nan)\n feature_names = ('zrc', 'centroid',\n 'cm0', 'cm1', 'cm2', 'cm3', 'cm4',\n 'sm0', 'sm2')\n features_list = []\n for istroke in self.strokes:\n if not self.isGoodFrame(istroke):\n continue\n ifeature_dic = self.extract_features_from_frame(istroke)\n ifeature_list = []\n for ifeature in feature_names:\n ifeature_list.append(ifeature_dic[ifeature])\n features_list.append(ifeature_list)\n return {'feature_names': feature_names,\n 'feature_table': np.array(features_list)}",
"def token_pair_features(tokens, feats, k=3):\n# windows = []\n # Window creation\n #--------------------------------------------------------------\n# def window_creator(list,degree):\n# for ws in range(len(tokens) - degree + 1):\n# yield [list[ws+l] for l in range(degree)]\n\n# window_generator = window_creator(tokens,k)\n# #---------------------------------------------------------------\n\n# for window in window_generator:\n# subseq = [c[0]+\"__\"+c[1] for c in combinations(window,2)]\n# for sub in subseq:\n# if \"token_pair=\"+sub not in feats:\n# feats[\"token_pair=\"+sub] = 1\n# elif \"token_pair=\"+sub in feats:\n# feats[\"token_pair=\"+sub] = feats[\"token_pair=\"+sub] + 1\n# # pair=Counter()\n# for i in range(len(tokens)-k+1):\n# pair.update(list(combinations(tokens[i:i+k],2)))\n# for k in pair:\n# feats[\"token_pair=\"+k[0]+\"__\"+k[1]]=pair[k]\n \n\n# for i in range(len(tokens) - k+1):\n# for x, y in combinations(tokens[i:i + k], 2):\n# feats[\"token_pair=\" + x + \"__\" + y ] += 1\n\n count=Counter()\n for i in range(len(tokens)-k+1):\n count.update(list(combinations(tokens[i:i+k],2)))\n for k in count:\n feats[\"token_pair=\"+k[0]+\"__\"+k[1]]=count[k]",
"def sent_features(tweet):\n twitter_objs = count_twitter_objs(tweet)\n tweet=clean_tweet(tweet) \n sentiment = sentiment_analyzer.polarity_scores(tweet)\n #Get text only\n words = preprocess(tweet) \n syllables = textstat.syllable_count(words)\n num_chars = sum(len(w) for w in words)\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n \n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n \n \\\n retweet = 0\n if \"rt\" in words:\n retweet = 1\n features = [FKRA, FRE,syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],\n twitter_objs[2], twitter_objs[1],\n twitter_objs[0], retweet]\n return features",
"def _get_features(self, session, problem_name=\"\", user_name=\"\"):\n\n #TODO pivot metrics tables\n query = session.query(Feature, User.name)\n #query = session.query(Feature, User.name, Metric)\n\n if user_name:\n query = query.filter(User.name == user_name)\n\n if problem_name:\n query = query.filter(Feature.problem.name == problem_name)\n\n return query",
"def to_features(self):\n to_return = dict()\n\n to_return['bias'] = 1.0\n to_return['user:' + self.user] = 1.0\n to_return['format:' + self.format] = 1.0\n to_return['token:' + self.token.lower()] = 1.0\n\n to_return['part_of_speech:' + self.part_of_speech] = 1.0\n for morphological_feature in self.morphological_features:\n to_return['morphological_feature:' + morphological_feature] = 1.0\n to_return['dependency_label:' + self.dependency_label] = 1.0\n\n return to_return",
"def getFeatures(self, gameState, action):\n # features = util.Counter()\n # successor = self.getSuccessor(gameState, action)\n # features['successorScore'] = self.getScore(successor)\n # return features\n if self.isOffensive:\n return self.getOffensiveFeatures(gameState, action)\n else:\n return self.getDefensiveFeatures(gameState, action)",
"def training_features(orientation=8, pix_per_cell=8, cell_per_block=2,\n spatial_size=16, hist_bins=32, color_space='HLS', sample_window=64,\n channels=[0], debug=False):\n def extract(paths, augment=False): # extract and augment\n features = []\n for file in paths:\n image = utils.imread_scaled_unified(file)\n if color_space != ident_config['default_color_space']:\n image_color_converted = cv2.cvtColor(\n image,\n eval('cv2.COLOR_' + ident_config['default_color_space'] + '2' + color_space))\n else:\n image_color_converted = image\n # End of if color_space\n\n image_resized = cv2.resize(image_color_converted, (sample_window, sample_window))\n if augment:\n brightened = utils.brighten(image_resized, bright=1.2)\n flipped = cv2.flip(utils.brighten(image_resized, bright=1.1), 1) # horizontal flip\n to_process = [brightened, flipped]\n else:\n to_process = [image_resized]\n # End of if augment\n\n for x in to_process: # must use square bracket for single element in list to iterate\n # using tuple, it will iterate the single image's row dimension. \n hog_features = utils.get_hog_features_channels(\n x, orientation, pix_per_cell, cell_per_block, channels)\n spatial_features, hist_features = utils.color_features(\n x, spatial_size=spatial_size, hist_bins=hist_bins, channels=channels)\n image_features = np.hstack(\n (spatial_features, hist_features, hog_features)).reshape(1, -1)\n image_features = np.squeeze(image_features)\n # remove the redundant dimension, StandardScaler does not like it\n features.append(image_features)\n # End of for x ...\n # End of for file\n return features\n cars, noncars, cars_to_be_augmented, num_cars, num_noncars = samples_sorted()\n num_samples = 30000 # limit the number of samples to be selected from each group.\n print('num_cars: ', num_cars, ' num_noncars: ', num_noncars, ' max. samples: ', 3*num_samples)\n\n car_features = extract(cars[:min(num_samples, len(cars))], augment=False)\n car_augmented_features = extract(cars_to_be_augmented[:min(num_samples, len(cars_to_be_augmented))], augment=True)\n noncar_features = extract(noncars[:min(num_samples, len(noncars))], augment=False)\n\n # Create an array stack of feature vectors\n X = np.vstack((car_features, car_augmented_features, noncar_features)).astype(np.float64)\n # Fit a per-column scaler\n X_scaler = StandardScaler().fit(X)\n # Apply the scaler to X\n scaled_X = X_scaler.transform(X)\n del X # X, scaled_X consumes much memory, should be released ASAP.\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features) + len(car_augmented_features)), np.zeros(len(noncar_features))))\n\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.1, random_state=rand_state)\n return X_train, X_test, y_train, y_test, X_scaler",
"def getUserFeatures(input_dict, prefecture_location_dict, assessment_date):\n header = ['UserGender', 'UserAge', 'UserPrefName', 'UserPrefLat', 'UserPrefLon', 'UserDaysSinceRegistered', 'UserRegisteredMonth', 'UserRegisteredYear', 'UserWithdrawn']\n # getting gender variable #\n if input_dict['SEX_ID'] == 'f':\n feat_list = [1]\n else:\n feat_list = [0]\n\n # getting age variable #\n feat_list.append(float(input_dict['AGE']))\n\n # getting pref_name variable #\n\tpref_name = input_dict['PREF_NAME']\n feat_list.append(pref_name)\n\tlat = 0\n\tlon = 0\n\tif prefecture_location_dict.has_key(pref_name):\n\t\tlat = prefecture_location_dict[pref_name]['LATITUDE']\n\t\tlon = prefecture_location_dict[pref_name]['LONGITUDE']\n\tfeat_list.extend([lat,lon])\n\n # getting reg month and year #\n reg_date = input_dict['REG_DATE']\n reg_date = datetime.datetime.strptime(reg_date, \"%Y-%m-%d %H:%M:%S\").date()\n\tfeat_list.append((assessment_date - reg_date).days)\n feat_list.append( reg_date.month )\n feat_list.append( reg_date.year )\n\n # getting withdraw date #\n wd_date = input_dict['WITHDRAW_DATE']\n if wd_date == \"NA\":\n feat_list.append(0)\n else:\n\t\twd_date = datetime.datetime.strptime(wd_date, \"%Y-%m-%d %H:%M:%S\").date()\n\t\tif wd_date < assessment_date:\n \tfeat_list.append(1)\n\t\telse:\n\t\t\tfeat_list.append(0)\n\n return feat_list, header"
] | [
"0.65042937",
"0.58511",
"0.5717541",
"0.56386614",
"0.55402297",
"0.5528694",
"0.5486018",
"0.5403201",
"0.5337727",
"0.5314406",
"0.5313272",
"0.53073186",
"0.5303535",
"0.52877325",
"0.524555",
"0.5222801",
"0.52193296",
"0.5213846",
"0.5202224",
"0.51974803",
"0.5184841",
"0.5174971",
"0.5149259",
"0.51241356",
"0.511072",
"0.5109556",
"0.5104665",
"0.510304",
"0.5089285",
"0.50819373"
] | 0.6129802 | 1 |
Uses some metric TBD to evaluate a distance between the two users' feature vectors | def get_distance(user_id1: str, user_id2: str) -> float:
features1 = get_feature_vector(user_id1)
features2 = get_feature_vector(user_id2)
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetDist(feature_1, feature_2):\n return np.linalg.norm(feature_1 - feature_2)",
"def euclidean_distance(user1: User, user2: User) -> float:\r\n common_animes = set.intersection(set(user1.neighbor_anime.keys()),\r\n set(user2.neighbor_anime.keys()))\r\n return sqrt(sum(pow(anime.neighbor_users[user1] - anime.neighbor_users[user2], 2)\r\n for anime in common_animes))",
"def compute_feature_distances(features1: np.ndarray, \r\n features2: np.ndarray) -> np.ndarray:\r\n #broadcasting trick\r\n a = features1[:, np.newaxis, :]\r\n b = features2[np.newaxis, :, :]\r\n \r\n return np.linalg.norm( (a-b), axis=-1)",
"def distance_metric(u, v):\n if len(u) != len(v):\n raise Exception(\n \"Distance metric not valid for differently sized vectors\")\n sum = 0.\n for i in range(len(u)):\n sum += ((u[i] - v[i]) ** 2)\n return math.sqrt(sum)",
"def minkowski_distance(user1: User, user2: User) -> float:\r\n # predefined p_value\r\n p_value = 3\r\n common_animes = set.intersection(set(user1.neighbor_anime.keys()),\r\n set(user2.neighbor_anime.keys()))\r\n return _nth_root(sum(pow(abs(anime.neighbor_users[user1] - anime.neighbor_users[user2]),\r\n p_value) for anime in common_animes), p_value)",
"def distance(self, u, v):\n numerator = np.dot(u,v)\n denominator = np.linalg.norm(u) * np.linalg.norm(v)\n similarity = numerator/(denominator +1e-7)\n return similarity",
"def feature_distance(feat1, feat2, eps=1e-7, sqrt=True):\n diff = torch.pow((feat1 - feat2), 2).sum(-1)\n if sqrt:\n diff = (diff + eps).sqrt()\n return diff",
"def _compute_user_similarity(self, user1, user2):\n return self._compute_pearson(user1, user2)",
"def hausdorff_distance(self, other):\n ...",
"def compute_distance (uVector, uOther):\n ## since each element can be either 0 or 1,\n ## no need for square roots and pow\n d = 0\n for i in range (len(uVector)):\n d = d + math.pow((int(uVector [i]) - int(uOther [i])), 2)\n\n return d",
"def __dist(u, v):\n return spatial.distance.euclidean(u, v)",
"def distances(self):",
"def distance(self, u, v):\n # Implement the distance function between vectors u and v]\n # Note: you can also think of this as computing a similarity measure\n # Use of cosine similarity measure, assumes u and v have equal length\n num = np.dot(u,v)\n # den_u = np.sum(u**2)\n # den_v = np.sum(v**2)\n den_u = np.linalg.norm(u)\n den_v = np.linalg.norm(v)\n if den_u == 0.0 or den_v == 0.0:\n return 0.0\n # return num / (math.sqrt(den_u) * math.sqrt(den_v))\n return num / (den_u * den_v)",
"def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))",
"def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))",
"def dist(v1: vect2d, v2: vect2d) -> float:\n d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5\n return d",
"def testCreateGeoPairsMatchingMetricUserDistance(self):\n self.test_class._matching_metrics = {\n 'metric': 1.0,\n 'response': 0,\n 'spend': 0\n }\n self.test_class.create_geo_pairs(use_cross_validation=True)\n self.test_class.create_geo_level_eval_data()\n self.test_class.geo_level_eval_data[0].sort_values(by='geo', inplace=True)\n self.test_class.geo_level_eval_data[0].reset_index(drop=True, inplace=True)\n self.assertTrue(\n self.test_class.geo_level_eval_data[0].sort_index(axis=1).equals(\n pd.DataFrame({\n 'geo': [1, 2, 3, 4],\n 'pair': [1, 1, 2, 2],\n 'response': [2, 5, 2, 4],\n 'spend': [1.5, 2.5, 1.5, 6]\n })))\n self.assertTrue(\n self.test_class.pairs[0].equals(\n pd.DataFrame({\n 'geo1': [2, 4],\n 'geo2': [1, 3],\n 'distance': [1/8, 1/8],\n 'pair': [1, 2]\n })))",
"def test_vector_dist(self):\r\n v1 = [1, 4, 2]\r\n v2 = [-1, 12, 4]\r\n\r\n exp = 8.48528137424\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)\r\n\r\n v1 = [1, 2, 100, 4, 2]\r\n v2 = [-1, 12, 4, 12, 99]\r\n\r\n exp = 137.087563258\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)",
"def distance(self,pt1,pt2):\n #productive #frequent\n if frequent: profprint()\n d = ( ( float(pt1[0]) - float(pt2[0]) )**2 + ( float(pt1[1]) - float(pt2[1]) )**2 + ( float(pt1[2]) - float(pt2[2]) )**2 )**0.5\n return d",
"def compute_distance(df):\n pass",
"def two_user_route_statistics(i,j, source_data, destination_data, source_destination_data, delta=1.2):\n\toccupancy_ratio = 0.0\n\tminimum_distance_so_far = 0.0\n\tcommon_travel_distance = 0.0\n\n\ttry:\n\t\tif source_destination_data[j][i] + source_data[i][j] <= 1.2*source_destination_data[i][i] and source_destination_data[j][i] + destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[j][i] + source_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\tsecond = ((source_destination_data[j][i] + destination_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\toccupancy_ratio = (first+second)/2\n\t\t\tcommon_travel_distance = source_destination_data[j][i]\n\t\t\tminimum_distance_so_far = source_data[i][j] + source_destination_data[j][i] + destination_data[i][j]\n\n\t\tif source_destination_data[i][j] + destination_data[j][i] <= 1.2*source_destination_data[i][i] and source_destination_data[i][j] + source_data[j][i] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[i][j] + destination_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\t\t\n\t\t\tsecond = ((source_destination_data[i][j] + source_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\n\t\t\ttotal_distance = source_data[j][i] + source_destination_data[i][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[i][j]+source_destination_data[j][j]+destination_data[j][i] <= 1.2*source_destination_data[i][i]:\n\t\t\tfirst = (1)\n\t\t\tsecond = (source_destination_data[j][j]/(source_data[i][j]+source_destination_data[j][j]+destination_data[j][i]))\n\n\t\t\ttotal_distance = source_data[i][j] + source_destination_data[j][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[j][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[j][i]+source_destination_data[i][i]+destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = (source_destination_data[i][i]/(source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]))\n\t\t\tsecond = (1)\n\n\t\t\ttotal_distance = source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][i]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\texcept Exception as e:\n\t\toccupancy_ratio = 1.0\n\t\tminimum_distance_so_far = 0.0\n\t\tcommon_travel_distance = 0.0\n\n\n\treturn occupancy_ratio, common_travel_distance, minimum_distance_so_far",
"def distance(a, b):\n return (np.sum((a - b)**2))**0.5",
"def dist(v1, v2):\n return ( (v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 )**0.5",
"def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)",
"def distance_metric(y_true, y_pred):\n diff = y_true - y_pred\n sqr = K.square(diff)\n total = K.sum(sqr, axis=1)\n return K.sqrt(total)",
"def get_distance(self,row_vector):\n d = row_vector-self.X_test\n \n return np.sqrt(np.dot(d,d)) # return the euclidean distance",
"def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )",
"def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d",
"def get_distance(descriptive_vector1, descriptive_vector2 ):\n return np.linalg.norm(descriptive_vector1 - descriptive_vector2)",
"def distance_metric(actions1, actions2):\n diff = actions1-actions2\n mean_diff = np.mean(np.square(diff), axis=0)\n dist = sqrt(np.mean(mean_diff))\n return dist"
] | [
"0.70376164",
"0.68441004",
"0.6769662",
"0.6710365",
"0.6704376",
"0.6573292",
"0.65396273",
"0.6442715",
"0.6419095",
"0.6325092",
"0.63205504",
"0.6293817",
"0.62658757",
"0.62401307",
"0.62383014",
"0.6234865",
"0.6229674",
"0.61967367",
"0.6192722",
"0.6184472",
"0.618372",
"0.6173346",
"0.6172002",
"0.6168662",
"0.6144876",
"0.6142118",
"0.61140454",
"0.61115044",
"0.61014795",
"0.6083523"
] | 0.8190472 | 0 |
Writes a .csv file of features for each user in the given location. If label is True, the first row of the files will be a header. | def write_all_users(folder_name: str, label: bool):
make_directory(folder_name)
for user in get_user_ids():
print("Analysis of user: " + user)
subfolder_name = folder_name + "/" + user
make_directory(subfolder_name)
for session in get_user_session_ids(user):
print("Session: " + session)
file_name = subfolder_name + "/" + session + ".csv"
data = get_feature_vector(user, session)
if data == None:
continue
if label:
data = [labels] + data
write_to_csv(data, file_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def writeFeatures(features, labels, output_filename):\n\twith open(output_filename, 'w') as csvfile:\n\t fieldnames = features[0].keys()\n\t fieldnames.append('label')\n\t writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n\t writer.writeheader()\n\t for i in range(len(features)):\n\t \tfeatures[i]['label'] = labels[i]\n\t \twriter.writerow(features[i])\n\n\treturn",
"def write_csv_label(labels, csv_file):\n with open(csv_file, 'w') as f:\n writer = csv.writer(f)\n for key, value in labels.items():\n writer.writerow([key, value])",
"def generate_labels_csv(csv_location, *args):\n os.chdir(csv_location) # Navigate into the right directory\n\n # Initilize and open the Labels csv\n with open('labels.csv', mode='w') as csv_file:\n fieldnames = ['Frame_ID', 'Class']\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n\n count = 0\n label = 0\n for classes in args:\n # Write into the CSV the frames with their associated class\n for i in range(count, classes):\n writer.writerow({'Frame_ID': 'frame' + str(i) + '.jpg', 'Class': label})\n\n # Increment label and count\n count = classes\n label += 1",
"def save_labels_to_disk(labels: list, label_path: str):\n\n with open(label_path, \"w\") as result_file:\n wr = csv.writer(result_file, dialect=\"excel\")\n wr.writerows(labels)",
"def save_features_to_file(path: str, features: Data_dict_type, labels: Labels_dict_type_numpy):\n for key, item in features.items():\n filename = key\n values, sample_rate = item\n window_labels = labels[filename].reshape((-1, 1))\n concatenated_data = np.concatenate(\n [np.array([i for i in range(values.shape[0])])[..., np.newaxis], # window_idx\n values, # features\n window_labels], axis=-1) # labels\n df_to_save = pd.DataFrame(data=concatenated_data)\n columns = ['window_idx'] + ['feature_%i' % i for i in range(values.shape[-1])] + ['label']\n df_to_save.columns = columns\n df_to_save.to_csv(os.path.join(path, filename.split('.')[0] + '.csv'), index=False)",
"def write_feature_labels(output, feature_labels):\n with open(os.path.join(output, 'features.list'), 'w') as out_file:\n out_file.write('\\n'.join(feature_labels))",
"def write_csv(image_names, image_classes, filename):\n with open(filename, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['filename', 'label'])\n writer.writerows(zip(image_names, image_classes))",
"def labels2csv(labels, csv_path):\n with open(csv_path, \"w\") as file:\n file.write(\"id,label\\n\")\n for i, label in enumerate(labels):\n file.write(\"{},{}\\n\".format(i, label))",
"def make_csv(filename):\n usernames = ['kissinfashion', 'instagood', 'beautifuldestinations', 'etdieucrea', 'josecabaco']\n train, test = split_data(filename)\n for user in usernames:\n trainname = '../data/' + 'train_' + user + '.csv'\n testname = '../data/' + 'test_' + user + '.csv'\n train[user].to_csv(path_or_buf=trainname, sep='\\t', index=False, encoding='utf-8')\n test[user].to_csv(path_or_buf=testname, sep='\\t', index=False, encoding='utf-8')",
"def create_csv_file(data_root, output_file):\r\n image_folder = data_root + \"/\" + \"training_set\"\r\n label_folder = data_root + \"/\" + \"training_set_label\"\r\n filenames = os.listdir(label_folder)\r\n filenames = [item for item in filenames if item[0] != '.']\r\n file_list = []\r\n for filename in filenames:\r\n image_name = \"training_set\" + \"/\" + filename.replace(\"_seg.\", \".\")\r\n label_name = \"training_set_label\" + \"/\" + filename\r\n file_list.append([image_name, label_name])\r\n \r\n with open(output_file, mode='w') as csv_file:\r\n csv_writer = csv.writer(csv_file, delimiter=',', \r\n quotechar='\"',quoting=csv.QUOTE_MINIMAL)\r\n csv_writer.writerow([\"image\", \"label\"])\r\n for item in file_list:\r\n csv_writer.writerow(item)",
"def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]",
"def create_output_csv(labels, filename):\n\n keyframe_ind = [labels[i] != labels[i-1] for i, val in enumerate(labels)]\n keyframe_idxs = [i for i, val in enumerate(keyframe_ind) if val==True]\n keyframe_filenames = [\"%06d\" % (i+1) + \".jpg\" for i, val in enumerate(keyframe_ind) if val==True]\n keyframe_scenes = labels[keyframe_idxs]\n keyframe_scenes_ascii = [string.ascii_lowercase[i] for i in keyframe_scenes]\n result = pd.DataFrame([keyframe_filenames, keyframe_scenes_ascii]).transpose()\n result.columns = ['keyframe', 'scene id']\n filepath = os.getcwd()\n result.to_csv(filepath + '/' + filename)",
"def write_to_csv(self, verbose: bool = False) -> None: \n Path(self.csv_dir).mkdir(exist_ok=True)\n with open(f\"{self.csv_dir}/train.csv\", \"wt\", encoding=\"utf-8\", newline=\"\") as train_file:\n with open(f\"{self.csv_dir}/test.csv\", \"wt\", encoding=\"utf-8\", newline=\"\") as test_file:\n csv_header = (\"phone\", \"phone_class_index\", \"f1\", \"f2\", \"f3\", \"f4\", \"f5\")\n train_csvwriter = csv.writer(train_file)\n test_csvwriter = csv.writer(test_file)\n train_csvwriter.writerow(csv_header)\n test_csvwriter.writerow(csv_header)\n for vowels_and_formants, wav_path, category in self:\n if verbose:\n print(f\"File: {wav_path} (category: {category})\")\n writer = train_csvwriter if category == \"TRAIN\" else test_csvwriter\n for vowel_and_formants in vowels_and_formants:\n phone, formants = vowel_and_formants\n row = (phone, ipa_class_index[phone]) + tuple(formants)\n writer.writerow(row)\n if verbose:\n print(row)",
"def write_features_to_csv(pairs, features, filename):\n\tids = []\n\n\tfor pair in pairs:\n\t\tids.append(pair.id)\n\n\tfeatures_dataframe = pd.DataFrame(features)\n\tfeatures_dataframe.insert(0, column=\"ID\", value=ids)\n\tfeatures_dataframe.to_csv(filename, index=False)",
"def write_labels_txt(labels: pd.DataFrame, path: str):\n\n # If the file containing the labels already exist, delete it\n if os.path.isfile(path):\n print('\\nA labels file already exists at {}, deleting it...'.format(path))\n os.remove(path)\n\n # Write the names of the labels on a txt\n labels.to_csv(path, header=None, index=None, sep=' ', mode='a')\n\n print('\\nThe labels file has been written at', path)",
"def write_csv_file(filepath, fieldnames, rows):\n headers = [{'label': field} for field in fieldnames]\n with open(filepath, 'w') as f_buf:\n outfile = CsvWriter()\n outfile.set_headers(headers)\n outfile._datas = rows\n outfile.render(f_buf)",
"def run(self, dataset_path):\n features = self._generate_features(self._feature_extractors)\n features.to_csv(dataset_path)",
"def _write_input(\n self, X: List[str], labels: Optional[List[List[str]]], input_path: Path\n ):\n df = pd.DataFrame({\"Text\": X})\n\n if labels is not None:\n df[\"Label\"] = labels\n\n df.to_csv(input_path, sep=\"\\t\", index=False)",
"def write_features(self):\n num_features_per_file = math.ceil(len(self.features) / self.num_jobs)\n for idx in range(self.num_jobs):\n job_features = self.features[idx * num_features_per_file: (idx + 1) * num_features_per_file]\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n with open(features_filename, \"wb\") as features_file:\n cloudpickle.dump(job_features, features_file, protocol=pickle.DEFAULT_PROTOCOL)",
"def _write_input(\n self,\n X: List[str],\n labels: Optional[Union[List[str], List[List[str]]]],\n input_path: Path,\n ):\n df = pd.DataFrame({\"Text\": X})\n\n if labels is not None:\n df[\"Label\"] = labels\n\n df.to_csv(input_path, sep=\"\\t\", index=False)",
"def ExportUser():\n UrlTodos = \"https://jsonplaceholder.typicode.com/todos/?userId={}\".format(\n sys.argv[1])\n DataTask = requests.get(UrlTodos).json()\n\n UrlInfo = \"https://jsonplaceholder.typicode.com/users/{}\".format(\n sys.argv[1])\n DataInfo = requests.get(UrlInfo).json()\n\n USER_ID = sys.argv[1]\n USERNAME = DataInfo.get(\"username\")\n FileName = USER_ID+\".csv\"\n with open(FileName, 'w', newline='') as f:\n writer = csv.writer(f, quoting=csv.QUOTE_ALL)\n for i in DataTask:\n writer.writerow([USER_ID, USERNAME, i.get(\"completed\"),\n i.get(\"title\")])",
"def export_users(_request):\n query = models.UserProfile.all().order('email')\n rows = []\n for user in query:\n is_superuser = 0\n if user.is_superuser:\n is_superuser = 1\n rows.append('%s,%s\\n' % (user.email, is_superuser))\n\n response = http.HttpResponse(''.join(rows), mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=users.csv'\n return response",
"def write_header(indir, nb_landmark, nb_feature, mirror_factor, order_factor, feature_names=None):\n assert nb_landmark > 0\n assert os.path.exists(indir) and os.path.isdir(indir), indir + \" not found.\"\n if indir[-1] != os.sep:\n indir += os.sep\n axis = [\"x\", \"y\", \"z\"]\n header = \"ID\"\n for numb in range(1, nb_landmark + 1):\n for axe in axis:\n header += \",\" + axe + str(numb)\n if feature_names is not None:\n assert len(feature_names) == nb_feature\n header += \",\" + \",\".join(feature_names)\n else:\n for numb in range(1, nb_feature + 1):\n header += \",Feature\" + str(numb)\n header += \"\\n\"\n with open(indir + \"../landmarks.csv\", \"w\") as filep:\n filep.write(header)\n modif = \"\"\n if mirror_factor is not None:\n modif += \"_reversed\"\n if order_factor is not None:\n modif += \"_reordered\"\n if mirror_factor is not None or order_factor is not None:\n with open(indir + \"../landmarks\" + modif + \".csv\", \"w\") as filep:\n filep.write(header)",
"def train_for(labels, filenames):\n stt = stt_google\n csvfiles = []\n writers = []\n for index, filename in enumerate(filenames):\n currfile = open(filename, 'ab')\n csvfiles.append(currfile)\n writers.append(csv.writer(currfile))\n # record instances until it doesn't interpret any text\n speech = stt.listen_for_speech()\n while(speech):\n hypotheses = []\n for hypothesis in speech:\n hypotheses.append(hypothesis['utterance'])\n #write hypotheses\n for index, label in enumerate(labels):\n writers[index].writerow([label] + hypotheses)\n speech = stt.listen_for_speech()\n for csvfile in csvfiles:\n csvfile.close",
"def save_object_features(object_features, super_object_features,\n which_object_categories):\n with open(\"./var/visual_feat_statics/category_names.txt\", encoding=\"utf-8\") as file:\n category_names = [l.rstrip(\"\\n\") for l in file]\n\n super_category_names = ['person', 'vehicle', 'outdoor', 'animal',\n 'accessory', 'sports', 'kitchen', 'food',\n 'furniture', 'electronic', 'appliance',\n 'indoor']\n\n if which_object_categories > 1:\n pandas_df = pd.DataFrame(data=object_features,\n index=[\"labels_freq\", \"labels_avg_confidence\",\n \"labels_area_ratio\"],\n columns=category_names)\n pandas_df.to_csv(\"object_features.csv\")\n\n pandas_df = pd.DataFrame(data=super_object_features,\n index=[\"labels_freq\", \"labels_avg_confidence\",\n \"labels_area_ratio\"],\n columns=super_category_names)\n pandas_df.to_csv(\"object_features_super_categories.csv\")\n\n elif which_object_categories > 0:\n pandas_df = pd.DataFrame(data=super_object_features,\n index=[\"labels_freq\", \"labels_avg_confidence\",\n \"labels_area_ratio\"],\n columns=super_category_names)\n pandas_df.to_csv(\"object_features_super_categories.csv\")\n else:\n pandas_df = pd.DataFrame(data=object_features,\n index=[\"labels_freq\", \"labels_avg_confidence\",\n \"labels_area_ratio\"],\n columns=category_names)\n pandas_df.to_csv(\"object_features.csv\")\n\n return None",
"def create(self, label_dir, model_type):\n labels = sorted(list(path.name for path in Path(label_dir).glob('./*png')))\n df = pd.DataFrame({'filename': labels,\n 'train': ['Not Checked']*len(labels)})\n csv_path = os.path.join(label_dir, f'{model_type}_train0.csv')\n df.to_csv(csv_path)\n return df, csv_path",
"def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)",
"def write_file(self, filename):\n\n with open(filename, 'w', newline = '') as csvfile:\n langwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for key in self.features:\n value = self.features[key]\n l = []\n for val in value:\n l.append(str(val))\n langwriter.writerow([l])\n return",
"def writeUser(userid, rating, location, country):\n\tlst = [userid, rating, location, country]\n\twriteLine(lst, users_file)",
"def write_label_file(labels_to_class_names, labels_filename):\n with tf.gfile.Open(labels_filename, \"w\") as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n'%(label, class_name))"
] | [
"0.7046409",
"0.65013045",
"0.63628066",
"0.61785483",
"0.61771864",
"0.6093858",
"0.60692143",
"0.6046009",
"0.5960273",
"0.594843",
"0.5785874",
"0.57570076",
"0.57210046",
"0.571924",
"0.5612553",
"0.56121415",
"0.5561343",
"0.5466026",
"0.5433147",
"0.5433038",
"0.5402878",
"0.53830606",
"0.5373494",
"0.53685486",
"0.5344572",
"0.5340003",
"0.5328065",
"0.5325848",
"0.5303831",
"0.5278402"
] | 0.7268666 | 0 |
Will make a directory with the given name, unless such a directory already exists, in which case nothing will happen | def make_directory(name: str):
try:
os.mkdir(name)
except:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_directory_if_needed(directory_name):\n if not os.path.isdir(directory_name):\n os.makedirs(directory_name)",
"def create_directory(path, name):\n new_path = os.path.join(path, name)\n if not os.path.isdir(new_path):\n subprocess.run(['mkdir', new_path])",
"def make_dir_if_needed(dir) :\n\tif not exists(dir) :\n\t\tos.makedirs(dir)",
"def _ensure_dir(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)",
"def create_directory(directory_name):\n directory = \"./\" + directory_name + \"/\"\n if not os.path.exists(directory):\n os.makedirs(directory)",
"def makeDir(dirName):\n try:\n os.makedirs(dirName)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(dirName):\n ## The path already exists so we can safely ignore this exception\n pass\n else:\n ## If it failed for some other reason, we want to see what the\n ## error is still\n raise",
"def find_and_create_dirs(dir_name):\n if os.path.exists(dir_name) is False:\n os.makedirs(dir_name)\n return dir_name",
"def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)",
"def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)",
"def make_dir_if_needed(path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n return path",
"def new_dir(the_dir):\n try:\n os.makedirs(the_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass #not a problem if file exists",
"def make_dir(name='results'):\n if os.path.isabs(name):\n output_path = name\n else:\n output_path = os.path.join(os.getcwd(), name)\n\n if ('.' not in output_path):\n directory = os.path.dirname(os.path.join(output_path, 'toto')) # doesn't work w/o 'toto'\n else :\n directory = os.path.dirname(output_path);\n\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return output_path",
"def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def make_dir(dir_path):\n if os.path.isdir(dir_path) == False:\n os.mkdir(dir_path)",
"def ensure_dir( dirName ):\r\n if not os.path.exists( dirName ):\r\n os.makedirs( dirName )",
"def make_dir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path, exist_ok=True)\n return True",
"def make_dir(path=None):\n\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n exit(\"\\nOSError: You can not use that directory!\\n\")",
"def makedirs(name, exist_ok=False):\n if not os.path.exists(name) or not exist_ok:\n os.makedirs(name)",
"def MakeDir(path):\n if os.path.exists(path):\n return False\n else:\n os.makedirs(path)\n return True",
"def make_dir(self):\n if not os.path.exists(self.d):\n try:\n os.mkdir(self.d)\n except OSError, e:\n if e.errno != 17:\n raise\n pass",
"def ensure_dir_exists(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)",
"def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)",
"def ensure_dir(dirName):\n if not os.path.exists(dirName):\n os.makedirs(dirName)",
"def safe_makedir(dname):\n if not os.path.exists(dname):\n # we could get an error here if multiple processes are creating\n # the directory at the same time. Grr, concurrency.\n try:\n os.makedirs(dname)\n except OSError:\n if not os.path.isdir(dname):\n raise\n else:\n LOG.warning(\"Directory {} already exists; not making directory\".format(dname))\n return dname",
"def makeDir(dir_path):\n if os.path.exists(dir_path): return\n dir_path = os.path.realpath(dir_path)\n dir_path = os.path.normpath(dir_path)\n if os.path.exists(os.path.dirname(dir_path)):\n os.mkdir(dir_path)\n else:\n makeDir(os.path.dirname(dir_path))\n os.mkdir(dir_path)",
"def create_dir(name):\n root_dir = get_data_dir()\n new_path = root_dir / name.strip(\"/\")\n if is_relative_to(new_path, root_dir):\n new_path.mkdir(parents=True, exist_ok=True)\n return str(new_path.relative_to(root_dir))\n return False",
"def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass",
"def _MakeDirs(self, dir_name):\n try:\n os.makedirs(dir_name, 0755)\n except OSError:\n pass",
"def MaybeMakeDirectory(*path):\n file_path = os.path.join(*path)\n try:\n os.makedirs(file_path)\n except OSError, e:\n if e.errno != errno.EEXIST:\n raise",
"def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass"
] | [
"0.8227327",
"0.8119038",
"0.78837943",
"0.7879888",
"0.78108305",
"0.7782611",
"0.7776862",
"0.77424526",
"0.77424526",
"0.77287775",
"0.7720759",
"0.77165735",
"0.7680433",
"0.76724714",
"0.75956064",
"0.7593501",
"0.7579252",
"0.757156",
"0.75657177",
"0.75417477",
"0.75402176",
"0.7537404",
"0.75286674",
"0.7528629",
"0.7513203",
"0.75095135",
"0.7496446",
"0.74850166",
"0.7480294",
"0.7468456"
] | 0.8610117 | 0 |
The tool this action is a method of. (May be None) | def tool(self):
return self._tool | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def obtain_action(self):\r\n\t\treturn",
"def tool(self):\n if self._tool is None:\n return SE3()\n else:\n return self._tool",
"def tool(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tool\")",
"def tool(self):\n tool_type = self.__class__.__module__.split('.')[-1]\n return g.config.tools[tool_type]",
"def get_action(self):\n raise NotImplementedError",
"def _get_action(self):\n return self.__action",
"def getUseAction(self):\n return self.useAction",
"def get_action(self):\n return self.__action",
"def get_action(self):\n return self.current_action",
"def tool(self):\n return equipment_module.Equipment(self._get_attr('extraction_tool_id'))",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def action(self):\n return self._action",
"def get_action_expert_agent(self):\n raise NotImplementedError",
"def action_spec(self):\r\n pass",
"def choose_action(self):\r\n pass",
"def get_tool(self, ToolKlass):\n for tool in self.tools:\n if isinstance(tool, ToolKlass):\n return tool",
"def getTool( self, ang ):\n M = self.at(ang)[-1]\n return dot(M, self.tool)",
"def action_type(self):",
"def command(self):\n if self.model is self.model_action:\n return self.command_action\n else:\n return self.command_candidate",
"def _action(self):\n pass",
"def _GetToolInfo(tool):\n matches = [t for t in _TOOLS if t[0] == tool]\n if not matches:\n return None\n else:\n return matches[0][1]",
"def get_action(self, context):\n pass",
"def on_tool(self):\n if self.tmFile is not None:\n self.log.info(\"Launch tool %s\" % self.pItem.itemName)\n toolMngrCmds.launchTools(self.pItem.itemName, self.tmFile, self.log.level)",
"def __init__(self, toolName):\n\t\tself.toolName = toolName",
"def act(self, infoset):\n assert self.action in infoset.legal_actions\n return self.action",
"def action(self):\n return self._get_field(\"action\")"
] | [
"0.6823224",
"0.678028",
"0.67726797",
"0.6660057",
"0.6566384",
"0.6510465",
"0.6404679",
"0.63478595",
"0.63472354",
"0.63340336",
"0.6235975",
"0.6235975",
"0.6235975",
"0.6235975",
"0.6235975",
"0.6235975",
"0.6136724",
"0.6131193",
"0.60622597",
"0.60507506",
"0.6038814",
"0.6038483",
"0.6032852",
"0.59957844",
"0.5989275",
"0.5985483",
"0.5945797",
"0.5932562",
"0.59309846",
"0.5928823"
] | 0.7286446 | 0 |
An environment with some Scheme standard procedures. | def standard_env():
env = Env()
env.update(vars(math)) # gives us sin, cos, sqrt, pi
env.update({
'+': op.add, '-': op.sub, '*':op.mul, '/': op.truediv,
'>': op.gt, '<': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq,
'begin': lambda *x: x[-1],
'or': op.or_,
'even?': lambda x: x % 2 == 0
})
return env | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def standard_env():\n \"\"\"An environment with some Scheme standard procedures.\"\"\"\n env = Env()\n env.update(\n {\n \"+\": op.add,\n \"-\": op.sub,\n \"*\": op.mul,\n \"/\": op.truediv,\n \"car\": lambda x: x[0],\n \"cdr\": lambda x: x[1:] if len(x) > 2 else x[1],\n \"cons\": lambda x, y: [x] + y if isinstance(y, list) else [x, y],\n \"eq?\": op.eq,\n \"atom?\": lambda x: type(x) in AtomicTypes,\n }\n )\n return env",
"def standard_env():\n env = {}\n env.update(vars(math)) # sin, cos, exp...\n env.update({\n '+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv,\n '>': op.gt, '<': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq,\n 'abs': abs,\n 'car': lambda x: x[0],\n 'cdr': lambda x: x[1:],\n 'cons': lambda x, y: [x] + y,\n 'map': lambda *args: list(map(*args)),\n 'apply': lambda proc, args: proc(*args),\n 'number?': lambda num: isinstance(num, Number),\n 'begin': lambda *x: x[-1],\n 'length': len,\n 'equal?': op.eq,\n 'list': lambda *x: list(x),\n 'list?': lambda x: isinstance(x, List),\n '#t': True,\n '#f': False,\n ' ': None,\n 'positive?': lambda x: x > 0,\n 'negative?': lambda x: x <= 0\n })\n return env",
"def make_env():\n return {\n 'init': init,\n 'step': step,\n 'is_terminal': is_terminal,\n 'state_as_example': state_as_example,\n }",
"def _run_env(self):\n raise NotImplementedError()",
"def init():\n env = Environment(5, 5, 20, [10, 20, 10, 5])\n return env",
"def __init__(self): \n\t\n\t # get the environment\n\t\tself.env = env()",
"def env(parser, args):\n action = subcommand_functions[args.env_command]\n action(args)",
"def B():\n set_env()",
"def makeenv(outer=None):\n\n retval = {'outer': outer}\n return retval",
"def SetupEnvironment(self):\n pass",
"def _create_environment(config):\n if isinstance(config.env, str):\n env = gym.make(config.env)\n else:\n env = config.env()\n if config.max_length:\n env = tools.wrappers.LimitDuration(env, config.max_length)\n env = tools.wrappers.RangeNormalize(env)\n env = tools.wrappers.ClipAction(env)\n env = tools.wrappers.ConvertTo32Bit(env)\n return env",
"def init_environment(self):\n # import outside of cell so we don't get a traceback\n from sage import all_cmdline\n from sage.repl.user_globals import initialize_globals\n initialize_globals(all_cmdline, self.shell.user_ns)\n self.run_init()",
"def step_env(self):\n raise NotImplementedError\n # Not needed for this homework",
"def get_environmentals(self):\n for k, v in utils.slurm_envs(default.SBATCH_VARS_FOR_WORKFLOW).items():\n setattr(self, k, v)",
"def __MakeEnvironment(self):\n environment= os.environ.copy()\n\n for key, value in self.__context.items():\n if type(value) is str:\n name = \"QMV_\" + key.replace(\".\", \"__\")\n environment[name]= value\n\n return environment",
"def initialize():\n environment = Environment()\n environment.setup()",
"def base_env(*args, **kwargs):\n try:\n # regular gym\n env = gym.make(*args, **kwargs)\n except:\n try:\n # gym retro\n env = retro.make(*args, **kwargs)\n except:\n # gym-super-mario-bros\n env = gym_super_mario_bros.make(*args, **kwargs)\n env.recognized = None\n return env",
"def setup_development_environment():\n import hashlib\n\n # Enable SQL debug\n sql_debug(True)\n\n # Create test user\n test_name = \"test\"\n test_salt = \"salt\"\n test_password = hashlib.sha256(\"{name}{salt}\".format(name=test_name, salt=test_salt).encode(\"utf8\")).hexdigest()\n account = Account(name=test_name,\n password=test_password,\n salt=test_salt)\n\n # Create test universe\n universe = Universe.create(name=\"Test universe\", owner=account)\n planet = Planet(name=\"Test planet\")\n region = Region(name=\"Test region\", planet=planet)\n\n place = Place(\n name=\"Void\",\n description=\"You are in the void\",\n universe=universe,\n region=region\n )",
"def get_empty_env():\n return EvalEnvironment(namespaces={})",
"def get_empty_env():\n return EvalEnvironment(namespaces={})",
"def repl(existing_env=None, get_input=None):\n if existing_env is None:\n env = environment.Environment()\n else:\n env = existing_env\n if get_input is None:\n get_input = raw_input\n brace_matcher = prepare_program.BraceMatcher(True)\n while True:\n brace_matcher.reset()\n expr = get_input('Capacita> ')\n expr = expr.strip()\n if expr == 'exit()':\n break\n elif len(expr) == 0:\n continue\n elif expr == ':program':\n prgm = store_program(get_input)\n execute_program(prgm)\n elif expr == ':code':\n prgm = store_program(get_input)\n execute_program(prgm, env)\n elif expr.startswith('when ') or is_clause_opener(expr) or \\\n not brace_matcher.match_line(expr).is_complete():\n prgm = store_code_block(get_input, expr, brace_matcher)\n if prgm.rstrip('\\n').count('\\n') == 0 and \\\n not line_manager.is_statement(prgm):\n print_evaluated_expr(prgm, env)\n else:\n execute_program(prgm, env)\n elif expr == 'this':\n print(env.frames)\n else:\n # Since expr could contain semicolon-separated lines of code,\n # extract all the lines:\n line_mgr, _ = convert_program_to_lines(expr, env)\n line_mgr.classify_statements()\n if line_mgr.has_nothing():\n continue\n line_mgr.drop_empty()\n if len(line_mgr) > 1:\n leading_lines = line_mgr.subset(0, -1)\n execution.execute_lines(leading_lines, env)\n last_expr_data = line_mgr.get_line_data(-1)\n last_expr = last_expr_data.line\n if last_expr_data.is_statement:\n execution.execute_statement(last_expr_data, False, env)\n else:\n print_evaluated_expr(last_expr, env)",
"def __init__(self, env):\n super().__init__(env)",
"def __init__(self, env):\n super().__init__(env)",
"def __init__(self, env):\n super().__init__(env)",
"def init_environment(env_name):\n env = gym.make(env_name)\n discrete = False\n if type(env.action_space) is gym.spaces.Discrete:\n discrete = True\n else:\n env = NormalizedActions(env)\n return env, discrete",
"def init_envs():\n myenv = env(\n Ndim=N_DIMS,\n lambda_over_dx=LAMBDA_OVER_DX,\n R_dt=R_DT,\n norm_Poisson=NORM_POISSON,\n Ngrid=N_GRID,\n Nhits=N_HITS,\n )\n if POLICY == -1:\n mymodel = reload_model(MODEL_PATH, inputshape=myenv.NN_input_shape)\n mypol = RLPolicy(\n env=myenv,\n model=mymodel,\n )\n else:\n mypol = HeuristicPolicy(\n env=myenv,\n policy=POLICY,\n steps_ahead=STEPS_AHEAD,\n )\n return myenv, mypol",
"def running_environment(self) -> type:\n return OrdinaryMNLBandit",
"def _env_setup(self, initial_qpos):\n raise NotImplementedError()",
"def scheme(self):",
"def make_shell_context():\n return dict(app=app)"
] | [
"0.82678694",
"0.6152458",
"0.5960297",
"0.584735",
"0.57988155",
"0.5716739",
"0.54847705",
"0.5483914",
"0.5445518",
"0.5431235",
"0.5369716",
"0.5353979",
"0.5348337",
"0.534168",
"0.5328177",
"0.53272533",
"0.53268397",
"0.53201205",
"0.52317595",
"0.52317595",
"0.52239025",
"0.52197963",
"0.52197963",
"0.52197963",
"0.5216185",
"0.51954377",
"0.51732546",
"0.514948",
"0.51411617",
"0.5130544"
] | 0.65491146 | 1 |
Verifica se a categoria existe em todas as letras. | def category_exists(self, category: str) -> bool:
return all(category in self.data[letter] for letter in self.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _verify_basic_categories(self):\n for cat in CATEGORIES:\n if not Category.objects.filter(name=cat).exists():\n self.add_category(cat)",
"def test_categories_add(self):\n categories = [category.category for category in self.note.categories.all()]\n self.assertIn('test', categories)\n self.assertNotIn('note', categories)",
"def _check_unique_insesitive(self, cr, uid, ids, context=None):\n for category in self.browse(cr, uid, ids, context=context):\n if len(self.search(cr, uid, [('name','=ilike',category.name)], context=context)) > 1:\n raise osv.except_osv(_('Constraint Error'), _(\"The Name Must Be Unique!\"))\n return True",
"def check_categories():\n categorized_fixtures = set(\n dimmer.args[1]\n + strip.args[1]\n + plug.args[1]\n + bulb.args[1]\n + lightstrip.args[1]\n )\n diff = set(SUPPORTED_DEVICES) - set(categorized_fixtures)\n if diff:\n for file in diff:\n print(\n \"No category for file %s, add to the corresponding set (BULBS, PLUGS, ..)\"\n % file\n )\n raise Exception(\"Missing category for %s\" % diff)",
"def _check_categories(categories: list) -> bool:\n categories_count = Category.objects.all() \\\n .filter(id__in=categories) \\\n .distinct().count()\n\n if categories_count != len(set(categories)):\n return False\n\n return True",
"def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )",
"def check_categories_slugs(cls, slugs):\n CategoryModel = apps.get_model(settings.DJCAT_CATEGORY_MODEL)\n for node in CategoryModel.objects.all():\n if node.slug in slugs:\n raise ItemAttributeChoicesSlugsDuplicateWithcCategory(cls, node)",
"def test_unused_categories_logic(self):\n s = ak.array([str(i) for i in range(10)])\n s12 = s[1:3]\n cat = ak.Categorical(s)\n cat12 = cat[1:3]\n self.assertListEqual(ak.in1d(s, s12).to_list(), ak.in1d(cat, cat12).to_list())\n self.assertSetEqual(set(ak.unique(s12).to_list()), set(ak.unique(cat12).to_list()))\n\n cat_from_codes = ak.Categorical.from_codes(ak.array([1, 2]), s)\n self.assertListEqual(ak.in1d(s, s12).to_list(), ak.in1d(cat, cat_from_codes).to_list())\n self.assertSetEqual(\n set(ak.unique(s12).to_list()),\n set(ak.unique(cat_from_codes).to_list()),\n )",
"def test_hasUniqueCategoryValues(self):\n obs = self.overview_map.hasUniqueCategoryValues('Treatment')\n self.assertEqual(obs, False)\n\n obs = self.overview_map.hasUniqueCategoryValues('DOB')\n self.assertEqual(obs, False)\n\n obs = self.overview_map.hasUniqueCategoryValues('Description')\n self.assertEqual(obs, True)",
"def test_hasUniqueCategoryValues(self):\r\n obs = self.overview_map.hasUniqueCategoryValues('Treatment')\r\n self.assertEqual(obs, False)\r\n\r\n obs = self.overview_map.hasUniqueCategoryValues('DOB')\r\n self.assertEqual(obs, False)\r\n\r\n obs = self.overview_map.hasUniqueCategoryValues('Description')\r\n self.assertEqual(obs, True)",
"def has_category(business, category):\n\t\tcategories = set()\n\t\tif business['categories']:\n\t\t\tcategories = set(strip_categories(business['categories']))\n\t\tif category in categories:\n\t\t\treturn True \n\t\treturn False",
"def test_contains_wrong_shape(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, 4: 0.4}\n dim = Categorical(\"yolo\", categories, shape=2)\n\n assert 3 not in dim\n assert (\"asdfa\", 2) in dim",
"def test_delete_category_does_not_exist(self):\n self.delete_does_not_exist_fail('hats')",
"def unique_categories(categories):\n\t\treturn all(value < 3 for value in list(categories.values()))",
"def unwanted_catalogue(cat_name, banned_cat_list):\n if any(elem in cat_name for elem in banned_cat_list):\n return True\n else:\n return False",
"def test_get_categories(self):\n pass",
"def uncategorized_apply(x): \n uncategorized_lst = ['Rescue of Prisoner',\n 'Conspiracy',\n 'Felony']\n for elem in x:\n if elem in uncategorized_lst:\n return 1\n return 0",
"def isA(citem, testCategory):\n try:\n return testCategory.lower().strip() in citem.category\n except:\n for tc in testCategory:\n if tc.lower().strip() in citem.category:\n return True\n return False",
"def isA(citem, testCategory):\n try:\n return testCategory.lower().strip() in citem.category\n except:\n for tc in testCategory:\n if tc.lower().strip() in citem.category:\n return True\n return False",
"def test_index_view_with_categories(self):\n add_cat('test',1,1)\n add_cat('temp',1,1)\n add_cat('tmp',1,1)\n add_cat('tmp test temp',1,1)\n\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"tmp test temp\")\n\n num_cats =len(response.context['categories'])\n self.assertEqual(num_cats , 4)",
"def valid_category(tags):\n for category in tags:\n if \"task-\" in category:\n if \"bug\" in category:\n return True, \"task-bug-hunting\"\n return True, category\n if category == \"blog\" or category == \"blogs\":\n return True, \"blog\"\n elif category == \"iamutopian\":\n return True, \"iamutopian\"\n elif \"idea\" in category or \"suggestion\" in category:\n return True, \"ideas\"\n elif category == \"development\":\n return True, \"development\"\n elif category == \"graphic\" or category == \"graphics\":\n return True, \"graphics\"\n elif \"bughunt\" in category or \"bug-hunt\" in category:\n return True, \"bug-hunting\"\n elif \"analysis\" in category:\n return True, \"analysis\"\n elif \"visibility\" in category or \"social\" in category:\n return True, \"social\"\n elif \"videotut\" in category or \"video-tut\" in category:\n return True, \"video-tutorials\"\n elif category == \"tutorial\" or category == \"tutorials\":\n return True, \"tutorials\"\n elif \"copywrit\" in category:\n return True, \"copywriting\"\n elif \"document\" in category:\n return True, \"documentation\"\n elif \"translation\" in category:\n return True, \"translations\"\n elif category == \"antiabuse\" or category == \"anti-abuse\":\n return True, \"anti-abuse\"\n return False, \"\"",
"def test_add_same_category(self):\n response = self.client.post('/api/v1/categories',\n data=json.dumps(category[0]),\n content_type='application/json',\n headers=self.admin_headers)\n self.assertEqual(response.status_code, 409)\n self.assertIn('category with name already exist',\n str(response.data))",
"def test_contains_false(self):\n self.assertFalse('Not_a_Category' in self.tester)",
"def test_contains_false(self):\n self.assertFalse('Not_a_Category' in self.tester)",
"def test_warning_on_duplicate_category(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n response = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertRedirects(response, '/categories/')\n response2 = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertContains(response2, \"already exists\")",
"def test_categoryQuery(self) -> None:\n result = self.entries.filter(category__iexact='suncare')\n self.assertGreater(len(result), 0)\n\n result = self.entries.filter(category__iexact='xxxxxx')\n self.assertEqual(len(result), 0)",
"def test_compare_categories_categorical_variables(self):\r\n for method in self.cat_methods:\r\n compare_categories(self.dm1_fp, self.map1_fp, method,\r\n self.cat_categories, self.num_perms, self.test_dir)\r\n results_fp = join(self.test_dir, '%s_results.txt' % method)\r\n self.files_to_remove.append(results_fp)\r\n results_f = open(results_fp, 'U')\r\n results = results_f.readlines()\r\n results_f.close()\r\n\r\n # Make sure the files aren't empty.\r\n self.assertTrue(len(results) > 0)",
"def test_contains_from_categorical(self, tdim2):\n assert (0, 0, 0, 1) in tdim2\n assert (0, 2, 0, 1) in tdim2\n assert (0, 2, 0) not in tdim2",
"def test_get_categories_success(self):\n self.test_add_category_success()\n response = self.client.get('/categories',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('asian', response.data.decode())",
"def test_categorical_column_validates_categories(self):\n\n categories = 1\n\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", 1]\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", \"Blue\"]\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)"
] | [
"0.68643826",
"0.65396655",
"0.6447354",
"0.6423177",
"0.6365938",
"0.6034636",
"0.5962784",
"0.59085315",
"0.584652",
"0.5841601",
"0.58201087",
"0.5816927",
"0.5771564",
"0.5758775",
"0.559042",
"0.5577145",
"0.5576167",
"0.55607396",
"0.55607396",
"0.5521435",
"0.54990447",
"0.54810625",
"0.54796326",
"0.54796326",
"0.5467923",
"0.5459759",
"0.54327047",
"0.5432413",
"0.54112667",
"0.54090714"
] | 0.6557177 | 1 |
Edit a comment. Requires HTTP POST and "can change comments" or "can moderate comments", permission. Users can also only edit comments they own, unless they are granted "comments.can_moderate" permissions. If ``POST['submit'] == "preview"`` or there are errors, a preview template ``comments/preview.html`` will be rendered. | def edit(request, comment_id, next=None):
comment = get_object_or_404(
get_model(), pk=comment_id, site__pk=settings.SITE_ID
)
# Make sure user has correct permissions to change the comment,
# or return a 401 Unauthorized error.
if not (request.user == comment.user and request.user.has_perm("comments.change_comment")
or request.user.has_perm("comments.can_moderate")):
return HttpResponse("Unauthorized", status=401)
# Populate POST data with all required initial data
# unless they are already in POST
data = request.POST.copy()
if not data.get("user_name", ""):
data["user_name"] = request.user.get_full_name() or request.user.username
if not data.get("user_email"):
data["user_email"] = request.user.email
next = data.get("next", next)
CommentEditForm = comments_extension.get_edit_form()
form = CommentEditForm(data, instance=comment)
if form.security_errors():
# NOTE: security hash fails!
return CommentEditBadRequest(
"The comment form failed security verification: %s" % \
escape(str(form.security_errors())))
# If there are errors, or if a preview is requested
if form.errors or "preview" in data:
app_label, model = (form.instance.content_type.app_label, form.instance.content_type.model)
template_search_list = [
"comments/%s/%s/edit-preview.html" % (app_label, model),
"comments/%s/edit-preview.html" % model,
"comments/edit-preview.html"
]
return render_to_response(
template_search_list, {
"comment_obj": comment,
"comment": form.data.get("comment", ""),
"form": form,
"next": next,
},
RequestContext(request, {})
)
# Otherwise, try to save the comment and emit signals
if form.is_valid():
MODERATOR_EDITED = "moderator edited"
flag, created = CommentFlag.objects.get_or_create(
comment = form.instance,
user = request.user,
flag = MODERATOR_EDITED
)
form.instance.is_removed = False
form.save()
comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request
)
return utils.next_redirect(
request, fallback=next or 'comments-comment-done', c=comment._get_pk_val()
)
else:
# If we got here, raise Bad Request error.
return CommentEditBadRequest("Could not complete request!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self):\n modified_content = self.request.get('comment_edit')\n comment_id = self.request.get('comment_id')\n comment = Comments.get_by_id(int(comment_id))\n user = self.get_active_user()\n\n if user.key().id() == comment.submitter_id:\n comment.content = modified_content\n comment.put()\n self.redirect('/%s' % str(comment.post_id))\n else:\n self.error(403)",
"def edit_comment(self, comment_id, comment):\n return self.proxy.wp.editComment(self.blog_id, self.username, self.password,\n comment_id, comment)",
"async def edit_comment(*, comment: models.Comment = Depends(resolve_user_owned_comment), edited_comment: EditComment,\n db: Session = Depends(get_db)):\n return crud.update_comment(db, comment_id=comment.id, **edited_comment.dict(exclude_unset=True))",
"def edit_comment():\n # Implement me!\n\n logger.info(\"vars: %r\" % request.vars)\n logger.info(\"vars_comment_text: %r\" % request.vars.comment_text)\n logger.info(\"vars id: %r\" % request.vars.comment_id)\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n\n #comment.comment_text = request.vars.comment_text\n #comment.edited_on = datetime.datetime.utcnow()\n db(db.Comments.id == request.vars.comment_id).update(comment_text=request.vars.comment_text, edited_on=datetime.datetime.utcnow())\n db.commit()\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n return \"ok\"",
"def edit_comment(bid, pid, cid):\n # pylint: disable=unused-argument\n comment = Comment.query.get(cid)\n form = CommentForm(request.form)\n if request.method == 'POST' and current_user.uid == comment.uid:\n if form.validate():\n if comment.text != form.text.data:\n comment.text = form.text.data\n DB.session.commit()\n flash('Comment successfully edited!')\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)",
"def edit_comment(self, id, body, **args):\n args.update(id=id, body=body)\n return self.fetch(\"/comment\", post_args=args)",
"def update(self, request, pk=None, **kwargs):\n self.permission_classes.append(IsAuthorOrReadOnly)\n comment = get_object_or_404(Comment, pk=self.kwargs[\"id\"])\n self.check_object_permissions(self.request, comment)\n data = request.data\n serializer = self.serializer_class(comment, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({\"comment\" : serializer.data, \"Status\": \"Edited\" }, status=status.HTTP_201_CREATED)",
"def submit_comment_edit(self, comment_id, new_comment_body):\r\n self._find_within(\"#comment_{} .post-update\".format(comment_id)).first.click()\r\n EmptyPromise(\r\n lambda: (\r\n not self.is_comment_editor_visible(comment_id) and\r\n self.is_comment_visible(comment_id) and\r\n self.get_comment_body(comment_id) == new_comment_body\r\n ),\r\n \"Comment edit succeeded\"\r\n ).fulfill()",
"def edit_accomment(request, pk):\n\n comment = get_object_or_404(ActorComment, pk=pk)\n form = ActorCommentForm()\n if request.method == \"POST\":\n form = ActorCommentForm(request.POST, instance=comment)\n\n if form.is_valid():\n form.save()\n url = '../../' + str(comment.actor.pk)\n return redirect(url)\n\n context = {\n 'form': form,\n 'comment': comment,\n }\n return render(request, context)",
"def edit_comment(self, board_name, card_names):\n result_flag = False\n self.payload = self.auth.copy()\n card_id = self.get_card_id(board_name, card_names)\n action_id = self.get_action_id_name(board_name, card_names)\n self.payload['text'] = \"Comment edited / Updated the comment\"\n url = self.url + \"/cards/\" + card_id + \"/actions/\" + action_id + \"/comments\"\n response = requests.put(url=url, data=self.payload)\n if response.status_code == 200:\n result_flag = True\n return result_flag",
"def post(self):\n comment_id = self.request.get('comment_id')\n post_id = self.request.get('post_id')\n comment = Comment.get_by_id(int(comment_id), parent=comment_key())\n post = Post.get_by_id(int(post_id), parent=blog_key())\n if comment and self.user.key().id() == comment.user.key().id():\n comment.content = self.request.get('content')\n\n have_errors = False\n\n if not comment.content:\n error_content = \"Content is required\"\n have_errors = True\n\n if have_errors:\n self.render(\"edit_comment.html\",\n comment=comment,\n error_content=error_content,\n user=self.user)\n else:\n comment.put()\n time.sleep(0.1)\n\n self.redirect('/blog/%s' % str(post.key().id()))",
"def edit_post(request, post_id):\n post = Post.objects.get(id=post_id)\n check_post_owner(request, post)\n\n if request.method != 'POST':\n # Initial request; pre-fill form with the current entry.\n form = PostForm(instance=post)\n else:\n # POST data submitted; process data.\n form = PostForm(instance=post, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('blogs:post', post_id=post.id)\n\n context = {'post': post, 'form': form}\n return render(request, 'blogs/edit_post.html', context)",
"def edit(self, comment):\n try:\n self.comment = comment\n self.save()\n except Exception as e:\n raise Exception(\"Failed to save, rolling back transaction.\" \\\n \"Details: %s\" % e)",
"def edit_post(bid, pid):\n # pylint: disable=unused-argument\n pst = Post.query.get(pid)\n form = PostForm(request.form)\n if request.method == 'POST' and current_user.uid == pst.uid:\n if form.validate():\n if pst.name != form.name.data or pst.text != form.desc.data:\n og_name = pst.name\n pst.name = form.name.data\n pst.text = form.desc.data\n DB.session.commit()\n flash('Post ({}) successfully edited!'.format(og_name))\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)",
"def editComment(owner_id=None, comment_id=None, message=None, attachments=None):\n params = {\n 'owner_id': owner_id,\n 'comment_id': comment_id,\n 'message': message,\n 'attachments': attachments\n }\n result = call('wall.editComment', **params)\n return parse_response(result)",
"def edit_mvcomment(request, pk):\n\n comment = get_object_or_404(MovieComment, pk=pk)\n\n if request.method == \"POST\":\n form = MovieCommentForm(request.POST, instance=comment)\n\n if form.is_valid():\n form.save()\n url = '../../' + str(comment.movie.pk)\n return redirect(url)\n else:\n form = MovieCommentForm(instance=comment)\n\n context = {\n 'form': form,\n 'comment': comment,\n }\n return render(request, context)",
"def process_comment(request, comment, post):\n\n if request.user.is_authenticated:\n # We already set auth user's name and email in the form's inital vals.\n comment.author = request.user\n\n # Is this a threaded comment?\n if request.POST.get(\"parent_id\"):\n comment.parent = Comment.objects.get(id=request.POST.get(\"parent_id\"))\n\n # If commenter is logged in, override name and email with stored values from User object\n if request.user.is_authenticated:\n comment.name = request.user.get_full_name()\n comment.email = request.user.email\n\n # Set required relationship to Post object\n comment.post = post\n\n # Get commenter's IP and User-Agent string\n # ip = get_ip(request)\n # if ip is not None:\n # comment.ip_address = ip\n comment.user_agent = request.META.get(\"HTTP_USER_AGENT\", \"\")\n\n # Run spam check\n comment.spam = spam_check(comment)\n\n # Strip disallowed HTML tags. See tangerine docs to customize.\n comment.body = sanitize_comment(comment.body)\n\n # Call comment approval workflow\n comment.approved = get_comment_approval(comment.email, request.user.is_authenticated)\n if comment.approved:\n messages.add_message(request, messages.SUCCESS, \"Your comment has been posted.\")\n else:\n messages.add_message(request, messages.INFO, \"Your comment has been held for moderation.\")\n\n comment.save()\n\n # Alert post author that comment needs moderation, or that it's been auto-published:\n send_comment_moderation_email(comment)",
"def test_editing_comment(self):\n\n data = {\"comment\": \"Edited comment body.\"}\n result = self.client.post(\"/comment/1/edit.json\", data=data)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Edited comment\", result.data)",
"def update_comment(request, course_id, comment_id):\r\n comment = cc.Comment.find(comment_id)\r\n if 'body' not in request.POST or not request.POST['body'].strip():\r\n return JsonError(_(\"Body can't be empty\"))\r\n comment.body = request.POST[\"body\"]\r\n comment.save()\r\n if request.is_ajax():\r\n return ajax_content_response(request, SlashSeparatedCourseKey.from_deprecated_string(course_id), comment.to_dict())\r\n else:\r\n return JsonResponse(utils.safe_content(comment.to_dict()))",
"def _preview(request, context_processors, extra_context, form_class=ThreadedCommentForm):\r\n _adjust_max_comment_length(form_class)\r\n form = form_class(request.POST or None)\r\n context = {\r\n 'next' : _get_next(request),\r\n 'form' : form,\r\n }\r\n if form.is_valid():\r\n new_comment = form.save(commit=False)\r\n context['comment'] = new_comment\r\n else:\r\n context['comment'] = None\r\n return render_to_response(\r\n 'threadedcomments/preview_comment.html',\r\n extra_context, \r\n context_instance = RequestContext(request, context, context_processors)\r\n )",
"def edit_reply(praw_comment, reply_msg):\n try:\n praw_comment.edit(reply_msg)\n except Exception as e:\n logger.exception('Exception while editing')\n return False\n\n logger.info(' => Edit was made!')\n return True",
"def edit_awcomment(request, pk):\n comment = get_object_or_404(AwardComment, pk=pk)\n\n if request.method == \"POST\":\n form = AwardCommentForm(request.POST, instance=comment)\n\n if form.is_valid():\n form.save()\n url = '../../' + str(comment.award.pk)\n return redirect(url)\n else:\n form = AwardCommentForm(instance=comment)\n\n context = {\n 'form': form,\n 'comment': comment,\n }\n return render(request, context)",
"def post(self):\n post_id = int(self.request.get('post_id'))\n post = Posts.get_by_id(post_id)\n comment = self.request.get('comment')\n submitter_id = self.get_active_user().key().id()\n\n if submitter_id:\n comment = Comments(post_id=post_id, content=comment,\n submitter_id=submitter_id)\n comment.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)",
"def editComment(owner_id=None, comment_id=None, message=None, attachments=None):\n params = {\n 'owner_id': owner_id,\n 'comment_id': comment_id,\n 'message': message,\n 'attachments': attachments\n }\n result = call('photos.editComment', **params)\n return parse_response(result)",
"def post_comment(id):\n \n form = CommentForm()\n title = 'post comment'\n post = Post.query.filter_by(id=id).first()\n\n if post is None:\n\n abort(404)\n\n if form.validate_on_submit():\n comment = form.comment.data\n new_comment = Comments(opinion = comment, user_id = current_user.id, posts_id = post.id)\n new_comment.save_comment()\n return redirect(url_for('main.view_post', id = post.id))\n\n return render_template('comments.html', form = form, title = title)",
"def show_edit_post_form(user_id, post_id):\n\n post = Post.query.get_or_404(post_id)\n user = post.user\n\n return render_template('edit_post.html', post=post, user=user)",
"def edit_review(review_id):\n form = EditReviewForm()\n try:\n review = Review.from_mongo(**mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)}))\n except Exception as e:\n raise Exception(e)\n else:\n game = Game.from_mongo(**mongo.db.games.find_one({\"_id\": ObjectId(str(review.game_id))}))\n user_name = session.get('username')\n if user_name == review.author_ref['author_name']:\n user = User.from_mongo(**mongo.db.users.find_one({\"name\": user_name}))\n\n if form.validate_on_submit():\n review.name = form.title.data\n review.text = form.review_text.data\n review_ref = review.create_review_ref()\n review.update_review()\n for game_review in game.reviews:\n if game_review.get('review_pub_date') == review.pub_date:\n game.reviews.remove(game_review)\n game.reviews.append(review_ref)\n game.update_game()\n for user_review in user.reviews:\n if user_review.get('review_pub_date') == review.pub_date:\n user.reviews.remove(user_review)\n user.reviews.append(review_ref)\n user.update_user()\n return redirect(url_for('review', review_id=review_id))\n\n elif request.method == \"GET\":\n form.title.data = review.name\n form.review_text.data = review.text\n\n return render_template('edit_review.html.jinja',\n title='Edit Review',\n review_id=review_id,\n form=form\n )",
"def test_editing_post_comment(self):\n\n form_data = {\"comment\": \"Here's my new comment!\"}\n new_comment = edit_post_comment(1, form_data)\n\n self.assertIn(\"my new comment\", new_comment.comment_body)",
"def edit_profile_post(request, pk=None):\n profilepost = get_object_or_404(ProfilePost, pk=pk) \n if (request.user == profilepost.user or\n request.user.is_superuser):\n if request.method == \"POST\":\n profile_post_form = ProfilePostForm(request.POST, request.FILES, instance=profilepost)\n if profile_post_form.is_valid():\n profilepost = profile_post_form.save()\n messages.success(request, 'Your post has been updated!') \n return redirect(reverse('profile'))\n else:\n profile_post_form = ProfilePostForm(instance=profilepost)\n else:\n return HttpResponseForbidden()\n\n return render(request, 'newprofilepost.html', {'profile_post_form': profile_post_form})",
"def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)"
] | [
"0.6931212",
"0.69125295",
"0.6838766",
"0.6713624",
"0.66921437",
"0.64680856",
"0.6270694",
"0.62142336",
"0.61379904",
"0.6110858",
"0.61102504",
"0.59949285",
"0.59053326",
"0.5895494",
"0.5882727",
"0.5831428",
"0.57694393",
"0.5735642",
"0.573556",
"0.56685895",
"0.56433225",
"0.5636594",
"0.56166494",
"0.5588227",
"0.5587442",
"0.5548592",
"0.55422366",
"0.5465059",
"0.54643106",
"0.54386467"
] | 0.72437614 | 0 |
Returns a `mu` value corresponding to an area under the ROC curve. Under the signal detection theory framework, when the positive and negative score distributions are modeled as Gaussians with unit variance, this function finds a separation in means that yields a given AUC. mu is equivalent to the sensitivity index, d' | def auc_to_mu(auc):
return np.sqrt(2) * scipy.stats.norm.ppf(auc) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def roc_auc(y_true, y_score, pos_label=..., ascending_score=...):\n ...",
"def reg_auroc(y_true, y_pred, th=0.5):\n y_true = np.where(y_true < th, 1, 0)\n y_score = np.where(y_pred < th, 1, 0)\n reg_auroc_score = sklearn.metrics.roc_auc_score(y_true, y_score)\n return reg_auroc_score",
"def calculate_mu(return_data):\n return np.array(return_data.mean())",
"def calculate_auc(df, neg, pos):\n\n nc = df[df['Strain ID'].isin(neg)]['Penetrance'].values\n pc = df[df['Strain ID'].isin(pos)]['Penetrance'].values\n\n y_score = np.append(nc, pc)\n y_true = np.append(np.repeat(0, len(nc)), np.repeat(1, len(pc)))\n sample_weights = np.append(np.repeat(float(len(pc)) / len(nc), len(nc)), np.repeat(1, len(pc)))\n aupr = metrics.average_precision_score(y_true, y_score)\n aupr_b = metrics.average_precision_score(y_true, y_score, sample_weight=sample_weights)\n auroc = metrics.roc_auc_score(y_true, y_score)\n\n return aupr, aupr_b, auroc,",
"def calc_auc(y, pred_y):\n fpr, tpr, thresholds = metrics.roc_curve(y, pred_y)\n return metrics.auc(fpr, tpr)",
"def roc_log_auc(y_true, y_score, pos_label=..., ascending_score=..., log_min=..., log_max=...):\n ...",
"def normalized_mean_absolute_error(y_real, y_pred, max_rating, min_rating):\n y_real, y_pred = check_arrays(y_real, y_pred)\n mae = mean_absolute_error(y_real, y_pred)\n return mae / (max_rating - min_rating)",
"def get_auc(self, x, mask):\n # mask the statistic\n stat_vec = np.array(x[mask.astype(bool)])\n\n # mask the ground truth to relevant area\n truth_vec = np.array(self.mask[mask.astype(bool)])\n\n # compute feat, y\n x = stat_vec[truth_vec == 0]\n y = stat_vec[truth_vec == 1]\n try:\n u = mannwhitneyu(x, y, alternative='greater')\n except ValueError:\n # all values are same\n return .5\n auc = u.statistic / (len(x) * len(y))\n auc = max(auc, 1 - auc)\n # pval = min(u.pvalue, 1 - u.pvalue)\n\n return auc",
"def mse(o, r):\r\n\r\n return np.mean(np.square((np.abs(o).astype(float) - np.abs(r).astype(float))))",
"def auc(y_true,y_pred):\n with tf.name_scope(\"RocAucScore\"):\n\n pos = tf.boolean_mask(y_pred, tf.cast(y_true, tf.bool))\n neg = tf.boolean_mask(y_pred, ~tf.cast(y_true, tf.bool))\n\n pos = tf.expand_dims(pos, 0)\n neg = tf.expand_dims(neg, 1)\n\n # original paper suggests performance is robust to exact parameter choice\n gamma = 0.2\n p = 3\n\n difference = tf.zeros_like(pos * neg) + pos - neg - gamma\n\n masked = tf.boolean_mask(difference, difference < 0.0)\n\n return tf.reduce_sum(tf.pow(-masked, p))",
"def _predictive_mean_analytical(self, mu, sigma):\r\n #FIXME: Not correct\r\n return mu",
"def logAUC(points):\n\t# assumes we have previously interpolated to get y-value at x = 0.1% \n\t# generate new points array clamped between 0.1% and 100%\n\n\t# constants\n\t## if you modify also change in plots.py \n\tLOGAUC_MAX = 1.0 ## this should not change\n\tLOGAUC_MIN = 0.001 ## this you may want to change if you database is large and you have strong early enrichment. \n\tRANDOM_LOGAUC = (LOGAUC_MAX-LOGAUC_MIN)/np.log(10)/np.log10(LOGAUC_MAX/LOGAUC_MIN)\n\n\tnpoints = []\n\tfor x in points:\n\t\tif (x[0] >= LOGAUC_MIN*100) and (x[0] <= LOGAUC_MAX*100):\n\t\t\tnpoints.append( [x[0]/100 , x[1]/100] )\n\n\tarea = 0.0\n\tfor point2, point1 in zip(npoints[1:], npoints[:-1]):\n\t\tif point2[0] - point1[0] < 0.000001:\n\t\t\tcontinue\n\t\t# segment area computed as integral of log transformed equation\n\t\tdx = point2[0]-point1[0]\n\t\tdy = point2[1]-point1[1]\n\t\tintercept = point2[1] - (dy)/(dx) * point2[0]\n\t\tarea += dy/np.log(10) + intercept*(np.log10(point2[0])-np.log10(point1[0]))\n\n\treturn area/np.log10(LOGAUC_MAX/LOGAUC_MIN) - RANDOM_LOGAUC",
"def get_auc(data, idx):\n r = data[\"r\"] * 10\n g = data[\"g\"][idx]\n\n min1, _ = find_local_minima(r, data[\"g\"][idx], 0.15 * 10)\n min2, _ = find_local_minima(r, data[\"g\"][idx], 0.34 * 10) # Changed from 3.6 to 3.4\n\n # When this occurs, min2 is usually too low\n if min1 == min2:\n min2 = 0.34 * 10\n\n min1_idx = np.where(np.isclose(r, min1, rtol=0.02))[0][0]\n min2_idx = np.where(np.isclose(r, min2, rtol=0.02))[0][0]\n\n r_peak = r[min1_idx:min2_idx]\n g_peak = g[min1_idx:min2_idx]\n\n auc = np.trapz(g_peak[g_peak > 1] - 1, r_peak[g_peak > 1])\n\n return auc",
"def mean(self):\n return self.mu",
"def avg_iou(self):\n return np.mean(self.ious_[np.arange(len(self.labels_)), self.labels_])",
"def auc_for_result(result_dir):\n rr = ResultReader(result_dir)\n aucs = [rr.auc_from_fn(fold) for fold in rr.present_folds()]\n return np.mean(aucs), np.std(aucs)",
"def auqc(y_true, uplift, treatment):\n warnings.warn(\n 'Metric `auqc` was renamed to `qini_auc_score`'\n 'in version 0.1.0 and will be removed in 0.2.0',\n FutureWarning\n )\n return qini_auc_score(y_true, uplift, treatment)",
"def auuc(y_true, uplift, treatment):\n warnings.warn(\n 'Metric `auuc` was renamed to `uplift_auc_score`'\n 'in version 0.1.0 and will be removed in 0.2.0',\n FutureWarning\n )\n return uplift_auc_score(y_true, uplift, treatment)",
"def reader_score():\n reader_ranef_negative, reader_ranef_positive = sigma_r * rng.randn(2)\n error_term = np.sqrt(1 - sigma_c**2) * rng.randn(num_cases)\n reader_score = (mu + delta_mu) * disease\n reader_score += reader_ranef_negative * (1 - disease)\n reader_score += reader_ranef_positive * disease\n reader_score += sigma_c * case_random_effect\n reader_score += error_term\n return reader_score",
"def optimize_g_mean(self):\n g_means = []\n fpr, tpr, thresholds = metrics.roc_curve(self.target, self.prediction, pos_label=1)\n roc_auc = metrics.auc(fpr, tpr)\n for i in range(len(fpr)):\n g_means.append(sqrt(tpr[i] * (1 - fpr[i])))\n plt.figure()\n idx = argmax(g_means)\n lw = 2\n print('Best Threshold=%f, G-Mean=%.3f' % (thresholds[idx], g_means[idx]))\n plt.plot(fpr, tpr, color='darkorange', lw=lw, label='Curva ROC (area ={0:.2f})'.format(roc_auc))\n plt.scatter(fpr[idx], tpr[idx], marker='o', color='black', label='Melhor Resultado')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Especificidade')\n plt.ylabel('Sensibilidade')\n plt.title('Curva ROC')\n plt.legend(loc=\"lower right\")\n plt.show()\n matplotlib.use(\"pgf\")\n matplotlib.rcParams.update({\n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n })\n plt.savefig('ROC_Curve2.pgf')\n self.threshold = thresholds[idx]\n self.set_variables()\n self.eval()\n return self",
"def TestStatistic(self, data):\n group1, group2 = data\n n1, n2 = len(group1), len(group2)\n pred1 = [i/n1 for i in range(n1, 0, -1)] \n pred2 = [i/n2 for i in range(n2, 0, -1)] \n test_stat = abs(\n roc_auc_score(group1, pred1) \n - roc_auc_score(group2, pred2)\n )\n return test_stat",
"def avgmu(self):\n if self._dataframe is DataframeEnum.SkimmedNtuple:\n return self._event.averageIntPerXing\n elif self._dataframe is DataframeEnum.PhysVal:\n return self._event.avgmu\n else:\n self._logger.warning(\"Impossible to retrieve the value of avgmu. Unknow dataframe.\")",
"def DA_AUC(data):\r\n xs = []\r\n ys =[]\r\n for row in data:\r\n xs.append(row[0])\r\n ys.append(row[2]) #DA0 for now idrk\r\n print(auc(xs,ys))",
"def auc(classification_metric):\n y_true = classification_metric.dataset.labels\n y_pred = classification_metric.classified_dataset.scores\n return roc_auc_score(y_true, y_pred)",
"def auc(x, y, session=None, run_kwargs=None):\n check_consistent_length(x, y)\n x = column_or_1d(x)\n y = column_or_1d(y)\n\n if x.shape[0] < 2:\n raise ValueError(\n \"At least 2 points are needed to compute\"\n f\" area under curve, but x.shape = {x.shape}\"\n )\n\n direction = 1\n dx = mt.diff(x)\n any_dx_lt_0 = mt.any(dx < 0)\n all_dx_le_0 = mt.all(dx <= 0)\n mt.ExecutableTuple([x, any_dx_lt_0, all_dx_le_0]).execute(\n session=session, **(run_kwargs or dict())\n )\n if any_dx_lt_0.fetch(session=session):\n if all_dx_le_0.fetch(session=session):\n direction = -1\n else:\n x_data = x.fetch(session=session)\n raise ValueError(f\"x is neither increasing nor decreasing : {x_data}.\")\n\n area = direction * mt.trapz(y, x)\n return area.execute(session=session, **(run_kwargs or dict()))",
"def mean(self, mu, sigma):\n return mu",
"def qini_auc_score(y_true, uplift, treatment):\n # ToDO: Add normalization\n # ToDO: Add baseline\n return auc(*qini_curve(y_true, uplift, treatment))",
"def receiver_operating_characteristic_auc(self):\r\n\r\n labels, scores = self.receiver_operating_characteristic_labels_scores()\r\n # what's the ROC AUC if there is only one class?\r\n if numpy.unique(labels).shape[0] == 1:\r\n return 1\r\n else:\r\n return sklearn.metrics.roc_auc_score(labels, scores)",
"def uplift_auc_score(y_true, uplift, treatment):\n # ToDO: Add normalization\n # ToDO: Add baseline\n return auc(*uplift_curve(y_true, uplift, treatment))",
"def auroc(self, option='average'):\n\t\tif option == 'classwise':\treturn self.class_auroc_\n\t\telif option == 'average':\treturn self.avg_auroc_"
] | [
"0.5588348",
"0.5521685",
"0.5497163",
"0.5482069",
"0.54366106",
"0.5420568",
"0.54193854",
"0.5410214",
"0.53788114",
"0.53528005",
"0.53356016",
"0.5305437",
"0.529486",
"0.52760977",
"0.5267704",
"0.5251772",
"0.52025825",
"0.5185603",
"0.51713854",
"0.5145581",
"0.5145538",
"0.5080318",
"0.5069261",
"0.50675744",
"0.5058177",
"0.5049169",
"0.50400984",
"0.5032585",
"0.5027177",
"0.5025919"
] | 0.59110963 | 0 |
Generates the scores for a single reader. | def reader_score():
reader_ranef_negative, reader_ranef_positive = sigma_r * rng.randn(2)
error_term = np.sqrt(1 - sigma_c**2) * rng.randn(num_cases)
reader_score = (mu + delta_mu) * disease
reader_score += reader_ranef_negative * (1 - disease)
reader_score += reader_ranef_positive * disease
reader_score += sigma_c * case_random_effect
reader_score += error_term
return reader_score | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _scan_scores(self,handle, consumer):\n read_and_call(handle, consumer.scores, start=\"Smith-Waterman\")",
"def update_scores(read_scores, result):\n\n\t# check that there are some results for this read\n\tassert len(result) > 0\n\t# write this read to output\n\tfor sim_match in result.keys():\n\n\t\t# get type (discordant or chimeric)\n\t\tjunc_type = sim_match.split('_')[2]\n\n\t\tfor analysis_match in result[sim_match]:\n\t\t\t\n\t\t\t# get each score type\n\t\t\tfor score_type in read_scores:\n\t\t\t\tscore = analysis_match[score_type]\n\t\t\t\tread_scores[score_type][junc_type][score] += 1",
"def scoring(self):\n pass",
"def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc",
"def find_reader_relations():\n for reader in readers:\n d100 = random.randint(1, 100)\n if d100 <= 50:\n reader_favourite_book[readers[reader]] = random.choice(list(book_ids.values()))\n\n d100 = random.randint(1, 100)\n if d100 <= 5:\n #TODO: fix so that you cannot be friend of yourself\n reader_knows[readers[reader]] = [random.choice(list(authors.values()))] + [random.choice(list(readers.values()))]\n elif d100 > 5 and d100 <= 10:\n reader_knows[readers[reader]] = [random.choice(list(authors.values()))]\n elif d100 > 10 and d100 <= 25:\n reader_knows[readers[reader]] = [random.choice(list(readers.values()))] + [random.choice(list(readers.values()))]\n elif d100 > 25 and d100 <= 50:\n reader_knows[readers[reader]] = [random.choice(list(readers.values()))]",
"def generateScore(self):\n totalFreq = self.lazySum(key=None)\n for file in self._candidate_files:\n filename = os.path.basename(file)\n score_file = os.path.join(self._score_dir, filename)\n with open(score_file, 'w') as ofd:\n with open(file) as ifd:\n for line in ifd:\n words = line.strip().split('\\t')\n if len(words) < 2 or any(map(lambda word:len(word)<2, words)):\n continue\n\n XFreq = self.lazySum(words[0])\n YFreq = self.lazySum(words[1])\n XYFreq = self.lazySum(line.strip())\n # frequences filter\n #if XYFreq < 2 or XYFreq > 24:\n # continue\n if YFreq == 0 or XFreq == 0 or XYFreq == 0:\n # because when generating grams, we last last words' frequency\n continue\n PX = XFreq * 1.0 / totalFreq\n PY = YFreq * 1.0 / totalFreq\n PXY = XYFreq * 1.0 / totalFreq\n score = math.log(PXY/PX/PY, 2) * XYFreq\n #print \"Freq:\", XFreq, YFreq, XYFreq\n result = \"{0}\\t{1:.2f}\\n\".format(line.strip(), score)\n ofd.write(result)",
"def rouge_score(references, generated):\r\n score = rouge(generated, references)\r\n rouge_s = {k: (v * 100) for (k, v) in score.items()}\r\n '''\r\n \"rouge_1/f_score\": rouge_1_f,\r\n \"rouge_1/r_score\": rouge_1_r,\r\n \"rouge_1/p_score\": rouge_1_p,\r\n \"rouge_2/f_score\": rouge_2_f,\r\n \"rouge_2/r_score\": rouge_2_r,\r\n \"rouge_2/p_score\": rouge_2_p,\r\n \"rouge_l/f_score\": rouge_l_f,\r\n \"rouge_l/r_score\": rouge_l_r,\r\n \"rouge_l/p_score\": rouge_l_p,\r\n '''\r\n return rouge_s",
"def score(self):",
"def get_scores(self):\n return self.score",
"def readScore(self):\n return self.zmwMetric(\"ReadScore\")",
"def generate_reader(n):\n counter = 1\n for i in range(n):\n name = generate_reader_name()\n if not name in readers:\n readers[name] = f'Reader/{counter}'\n counter += 1",
"def fit_reader(self, reader):\n return self.fit(line for (_, line) in reader.readsents(silent=False))",
"def get_score(self, student_answers):\r\n pass",
"def compute_scores(self, *scorers):\n if self.nodes[0]:\n list_ = self.nodes\n else:\n list_ = self.reaction_trees\n\n for idx, item in enumerate(list_):\n scores = {repr(scorer): scorer(item) for scorer in scorers}\n self.all_scores[idx].update(scores)\n self._update_route_dict(self.all_scores, \"all_score\")",
"def create_scores(self):\n fout = open('scores','w')\n fout.write(reduce(lambda x, key: '{0}\\n{1:<15}{2}'.format(x, key, str(self.scores[key])), self.scores, ''))\n pass",
"def read_score(self):\n file_path = 'score.txt'\n \n with open(file_path, 'r') as f:\n score = f.read()\n\n if score == '':\n return 0\n else:\n return int(score)",
"def get_score(self):\n for response in self.response_list:\n self.score += response.get_score",
"def get_r_score(self):\n return self.r_score",
"def makeScore(self):\n sc = stream.Score()\n num_voices = 2\n pitches = ['C', 'A-']\n for i in range(num_voices):\n part = stream.Part()\n part.id = 'part %d' % i\n time_sig = meter.TimeSignature('4/4')\n key_sig = key.Key('c')\n\n # Make a note.\n n1 = music21_note.Note(pitches[i])\n n1.duration.quarterLength = 1\n\n # Add full measure.\n full_m = stream.Measure()\n full_m.append(time_sig)\n full_m.append(key_sig)\n full_m.append(n1)\n n2 = n1.transpose('M2')\n full_m.append(n2)\n full_m.repeatAppend(n1, 2)\n part.append(full_m)\n\n # Add another full measure.\n full_m = stream.Measure()\n full_m.append(n1)\n n2 = n1.transpose('M2')\n full_m.append(n2)\n full_m.repeatAppend(n1, 2)\n part.append(full_m)\n\n sc.insert(0, part)\n\n # Show the full score and all score elements in indented text.\n # sc.show('text')\n return sc",
"def score(self):\n return self.client.call('GET', self.name + 'score')",
"def get_score(self):\n files_flare = self.generate_flare_set()\n files_non_flare = self.generate_non_flare_set()\n timeseries = []\n y = []\n scores = {}\n column_mapping = self.__get_column_mapping()\n for col in tqdm(range(1, 25)):\n for file in tqdm(files_flare):\n s = Sample(\"FL\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n\n for file in tqdm(files_non_flare):\n s = Sample(\"NF\", file).get_data().iloc[:, col].values\n y.append(self.mapping[file[0]])\n timeseries.append(s)\n embed = self.get_embed_vector(timeseries)\n\n embed_y = KMeans(n_clusters=5).fit_predict(embed)\n y = np.array(y).flatten()\n scores[column_mapping[col]] = self.relevance_score(embed_y, y)\n timeseries = []\n y = []\n scores_data = pd.DataFrame.from_dict(scores, orient='index', columns=['Relevance Score']).sort_values(\n by='Relevance Score', ascending=False)\n return scores_data",
"def main():\n given_scores = []\n num_grades = int(raw_input())\n for i in xrange(num_grades):\n given_scores.append(int(raw_input()))\n for score in grading_students(given_scores):\n print score",
"def get_score(self):\n return self.score",
"def score_files(generator, bleurt_checkpoint):\n ref_buffer = []\n cand_buffer = []\n scores_buffer = []\n\n if not FLAGS.batch_same_length:\n scorer = score_lib.BleurtScorer(bleurt_checkpoint)\n else:\n logging.warning(\n \"Enabling same length batching. BEWARE: this is an experimental \"\n \"feature.\")\n scorer = score_lib.LengthBatchingBleurtScorer(bleurt_checkpoint)\n\n def _consume_buffer():\n scores = scorer.score(\n references=ref_buffer,\n candidates=cand_buffer,\n batch_size=FLAGS.bleurt_batch_size)\n del ref_buffer[:]\n del cand_buffer[:]\n scores_buffer.extend(scores)\n\n logging.info(\"Computing BLEURT scores...\")\n for ref_sentence, cand_sentence in generator:\n ref_buffer.append(ref_sentence)\n cand_buffer.append(cand_sentence)\n if len(ref_buffer) >= FLAGS.read_buffer_size:\n _consume_buffer()\n if ref_buffer:\n _consume_buffer()\n logging.info(\"BLEURT scores computed.\")\n\n if FLAGS.scores_file:\n logging.info(\"Writing to disk.\")\n with tf.io.gfile.GFile(FLAGS.scores_file, \"w+\") as score_file:\n for s in scores_buffer:\n score_file.write(\"{}\\n\".format(str(s)))\n else:\n for s in scores_buffer:\n print(\"{}\".format(str(s)))\n logging.info(\"Done.\")",
"def test_read(self):\n self.reader._timing = [3, 2, 2, 1, 1, 1]\n score, time = self.reader.read(self.books[0], 0, 3)\n self.assertTrue(self.books[0].id_book not in self.reader._books)\n self.assertEqual(3, score)\n self.assertEqual(6, time)\n self.assertEqual([3, 3, 3, 2, 2, 2], self.reader._timing)\n score, time = self.reader.read(self.books[3], 4, 5)\n self.assertTrue(self.books[3].id_book not in self.reader._books)\n self.assertEqual(0, score)\n self.assertEqual(7, time)\n self.assertEqual([3, 3, 3, 2, 3, 3], self.reader._timing)",
"def update_score():\n pass",
"def _compute_score(self):\n\n sgml_path = str(self.sgml_file.name)\n text_path = sgml_path.replace('.sgm', '.txt')\n ref_path = 'testsets/wmt18.ende.ref.txt'\n\n from sacrebleu import process_to_text, corpus_bleu\n from pathlib import Path\n\n if not Path(text_path).exists():\n process_to_text(sgml_path, text_path)\n\n hyp_stream = [x for x in open(text_path, encoding='utf-8')]\n ref_stream = [r for r in open(ref_path, encoding='utf-8')]\n\n bleu = corpus_bleu(hyp_stream, [ref_stream])\n\n self.score = bleu.score\n self.save()",
"def get_rouge(ref_path, pred_path):\n print(\"###########cal rouge###############\")\n source_lines = [line.strip() for line in codecs.open(ref_path, \"r\", \"utf-8\").readlines()]\n pred_lines = [ pred.strip() for pred in codecs.open(pred_path, \"r\", \"utf-8\").readlines() ]\n assert len(source_lines) == len(pred_lines)\n rou = rouge.Rouge()\n scores = rou.get_scores(pred_lines, source_lines)\n ave_scores = rou.get_scores(pred_lines, source_lines, avg=True)\n result = OrderedDict()\n result[\"ave_scores\"] = ave_scores\n result[\"detail_scores\"] = scores\n print(ave_scores)\n print(\"############end rouge#############\\n\")\n return result",
"def print_scores(self):\n print(\"scores: \", self.get_scores())",
"def write_scores(self, result):\n\n df = pd.read_csv('RPSscores.csv')\n for i in range(2):\n if not str(self.rps_data[i][0]) in df['Name'].to_dict().values():\n df.loc[len(df.index)] = [str(self.rps_data[i][0]),\n 0, 0, 0]\n first_player_index = int(df.loc[df['Name'] == str(self.rps_data[0][0])].index[0])\n second_player_index = int(df.loc[df['Name'] == str(self.rps_data[1][0])].index[0])\n if result == 'Draw':\n df.iloc[first_player_index, 2] += 1\n df.iloc[second_player_index, 2] += 1\n if result == 'First':\n df.iloc[first_player_index, 1] += 1\n df.iloc[second_player_index, 3] += 1\n if result == 'Second':\n df.iloc[first_player_index, 3] += 1\n df.iloc[second_player_index, 1] += 1\n df.to_csv('RPSscores.csv', index=False)"
] | [
"0.6255468",
"0.58662325",
"0.5689173",
"0.5652576",
"0.5598593",
"0.55352265",
"0.55189174",
"0.5504589",
"0.5471471",
"0.5432648",
"0.54194653",
"0.54157305",
"0.5404302",
"0.53856426",
"0.5372259",
"0.533972",
"0.5336711",
"0.53053385",
"0.52998555",
"0.52965283",
"0.52870035",
"0.52719",
"0.5256125",
"0.52279377",
"0.52129",
"0.5197309",
"0.51905453",
"0.5182806",
"0.5165126",
"0.5145826"
] | 0.6479283 | 0 |
Simulates a singlemodality reader data according to the RoeMetz model. This is the sort of data you might acquire when seeking to compare the performance of multiple readers with a standalone algorithm. Continuous "suspicion scores" are produced by both a model and the readers, but they can be thresholded to simulate binary predictions. Model scores are simulated by sampling two unitvariance normal distributions separated by mu. Model scores are assumed to be deterministic for a given case. Reader scores are simulated by sampling from two unit variance normal distributions separated by mu + delta_mu. For any specific reader, the two score distributions also have unit variance. The reader scores for a given case are correlated with those of the algorithm. | def simulate_model_vs_readers(disease,
model_auc,
reader_auc,
sigma_r,
sigma_c,
num_readers,
rng=np.random):
mu = auc_to_mu(model_auc)
delta_mu = auc_to_mu(reader_auc) - mu
return simulate_single_modality(
disease,
mu,
delta_mu,
sigma_r=sigma_r,
sigma_c=sigma_c,
num_readers=num_readers,
rng=rng) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def simulate_single_modality(disease,\n mu,\n delta_mu,\n sigma_r,\n sigma_c,\n num_readers,\n rng=np.random):\n if sigma_c < 0 or sigma_c > 1:\n raise ValueError('sigma_c should be in [0, 1]')\n\n disease = np.array(disease, dtype=np.int32)\n if set(disease) - set([0, 1]):\n raise ValueError('disease indicators must be in {0, 1}')\n\n num_cases = len(disease)\n case_random_effect = rng.randn(num_cases)\n model_score = disease * mu + case_random_effect\n\n def reader_score():\n \"\"\"Generates the scores for a single reader.\"\"\"\n reader_ranef_negative, reader_ranef_positive = sigma_r * rng.randn(2)\n error_term = np.sqrt(1 - sigma_c**2) * rng.randn(num_cases)\n reader_score = (mu + delta_mu) * disease\n reader_score += reader_ranef_negative * (1 - disease)\n reader_score += reader_ranef_positive * disease\n reader_score += sigma_c * case_random_effect\n reader_score += error_term\n return reader_score\n\n reader_scores = np.column_stack([reader_score() for _ in range(num_readers)])\n\n return model_score, reader_scores",
"def reader_score():\n reader_ranef_negative, reader_ranef_positive = sigma_r * rng.randn(2)\n error_term = np.sqrt(1 - sigma_c**2) * rng.randn(num_cases)\n reader_score = (mu + delta_mu) * disease\n reader_score += reader_ranef_negative * (1 - disease)\n reader_score += reader_ranef_positive * disease\n reader_score += sigma_c * case_random_effect\n reader_score += error_term\n return reader_score",
"def run_epochs(self, in_reader, epochs, run_type=0):\n\n # assign variables\n self.log_object = LogObject()\n self.reader = in_reader\n\n # read state and reset variables\n avg_sum = 0\n avg_vector_sum = np.zeros(3, dtype=np.int)\n state = self.reader.read_state()\n self.offset, self.normalizer = (0, 100) if isinstance(state, ChaosState) else self.calculate_offset()\n default_reward_matrix = state.reward_matrix if isinstance(state, ChaosState) else None\n pareto_filter = state.pareto_defense_actions()\n\n # create the DQN\n self.create_model(state)\n\n # start the runs\n print(\"------------------ configuration {} ({} : {}) {} ------------------\".format(\n self.reader.default_file_name, state.config.num_nodes, state.config.sparcity, run_type))\n\n # dry run of the experiment to offset first move bias\n start_time = time.time()\n for i in range(state.size_graph + 1):\n state = self.reader.read_state(None, default_reward_matrix)\n self.run_game(0, state, run_type, pareto_filter, state.size_graph - i)\n print(\"--- run 0 %s seconds ---\" % (time.time() - start_time))\n\n # actual run of the experiment\n start_time = time.time()\n for i in range(epochs):\n\n # play the game\n state = self.reader.read_state(None, default_reward_matrix)\n epsilon = (1 - (i / epochs)) if i < (epochs * 3 / 5) else 0.2\n self.run_game(epsilon, state, run_type, pareto_filter)\n\n # save the output data\n avg_sum += self.log_object.reward_sum\n avg_vector_sum += self.log_object.vector_reward_sum\n if (i % (epochs / 100)) == 0:\n self.log_object.output_string += \"{0}\\n\".format(int((avg_sum * 100) / epochs))\n self.log_object.output_string2 += \"{0!r}\\n\".format(\n np.divide(self.log_object.vector_reward_sum * 100, epochs).astype(int).tolist())\n self.log_object.output_string3 += \"({0}) {1} \\n\".format(\n self.log_object.step_count, self.log_object.chosen_action)\n avg_vector_sum = np.zeros(3, dtype=np.int)\n avg_sum = 0\n\n # output the data\n print(\"--- run 1 %s seconds ---\" % (time.time() - start_time))\n print(self.log_object.output_string)\n print(\"------------------\")\n print(self.log_object.output_string2)\n print(\"------------------\")\n # print (log_object.output_string3)\n # print (\"------------------\")",
"def model_vs_readers_orh(disease,\n model_score,\n reader_scores,\n fom_fn,\n coverage=0.95,\n margin=0):\n if margin < 0:\n raise ValueError('margin parameter should be nonnegative.')\n\n num_cases, num_readers = reader_scores.shape\n if len(disease) != num_cases or len(model_score) != num_cases:\n raise ValueError(\n 'disease, model_score and reader_scores must have the same size '\n 'in the first dimension.')\n\n model_fom = fom_fn(disease, model_score)\n reader_foms = [fom_fn(disease, rad_scores) for rad_scores in reader_scores.T]\n average_reader_fom = np.mean(reader_foms)\n observed_effect_size = model_fom - average_reader_fom\n\n covariances = _jackknife_covariance_model_vs_readers(disease, model_score,\n reader_scores, fom_fn)\n off_diagonals = []\n for offset in range(1, num_readers):\n off_diagonals.extend(np.diag(covariances, k=offset))\n cov2 = np.mean(off_diagonals)\n\n # msr = mean squared reader difference\n msr = np.var(reader_foms - model_fom, ddof=1)\n se = np.sqrt((msr + max(num_readers * cov2, 0)) / num_readers)\n dof = (num_readers - 1) * ((msr + max(num_readers * cov2, 0)) / msr)**2\n\n return _test_result(\n effect=observed_effect_size,\n margin=margin,\n se=se,\n dof=dof,\n coverage=coverage,\n effect_size_constituents=EffectSizeConstituents(\n model_fom=model_fom, average_reader_fom=average_reader_fom))",
"def test_shared_data_as_rv_input(self):\n with pm.Model() as m:\n x = pm.MutableData(\"x\", [1.0, 2.0, 3.0])\n y = pm.Normal(\"y\", mu=x, size=(2, 3))\n assert y.eval().shape == (2, 3)\n idata = pm.sample(\n chains=1,\n tune=500,\n draws=550,\n return_inferencedata=True,\n compute_convergence_checks=False,\n )\n samples = idata.posterior[\"y\"]\n assert samples.shape == (1, 550, 2, 3)\n\n np.testing.assert_allclose(np.array([1.0, 2.0, 3.0]), x.get_value(), atol=1e-1)\n np.testing.assert_allclose(\n np.array([1.0, 2.0, 3.0]), samples.mean((\"chain\", \"draw\", \"y_dim_0\")), atol=1e-1\n )\n\n with m:\n pm.set_data({\"x\": np.array([2.0, 4.0, 6.0])})\n assert y.eval().shape == (2, 3)\n idata = pm.sample(\n chains=1,\n tune=500,\n draws=620,\n return_inferencedata=True,\n compute_convergence_checks=False,\n )\n samples = idata.posterior[\"y\"]\n assert samples.shape == (1, 620, 2, 3)\n\n np.testing.assert_allclose(np.array([2.0, 4.0, 6.0]), x.get_value(), atol=1e-1)\n np.testing.assert_allclose(\n np.array([2.0, 4.0, 6.0]), samples.mean((\"chain\", \"draw\", \"y_dim_0\")), atol=1e-1\n )",
"def test(model, dataloader, params, args, val):\n\n # evaluation mode\n model.eval()\n\n # initialise buffers\n dice_lv_buffer = []\n dice_myo_buffer = []\n dice_rv_buffer = []\n\n mcd_lv_buffer = []\n hd_lv_buffer = []\n mcd_myo_buffer = []\n hd_myo_buffer = []\n mcd_rv_buffer = []\n hd_rv_buffer = []\n\n mean_mag_grad_detJ_buffer = []\n negative_detJ_buffer = []\n\n\n with tqdm(total=len(dataloader)) as t:\n # iterate over validation subjects\n for idx, (image_ed_batch, image_es_batch, label_ed_batch, label_es_batch) in enumerate(dataloader):\n # (data all in shape of (c, N, H, W))\n\n # extend to (N, c, H, W)\n image_ed_batch = image_ed_batch.permute(1, 0, 2, 3).to(device=args.device)\n image_es_batch = image_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n label_es_batch = label_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n\n with torch.no_grad():\n # compute optical flow and warped ED images towards ES\n dvf = model(image_ed_batch, image_es_batch)\n\n # transform label mask of ES frame\n warped_label_es_batch = resample_transform(label_es_batch.float(), dvf, interp='nearest')\n\n\n \"\"\" Move data to device \"\"\"\n if args.cuda:\n # move data to cpu to calculate metrics\n # (the axis permutation is to comply with metric calculation code which takes input shape H, W, N)\n warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n else:\n # CPU version of the code\n warped_label_es_batch = warped_label_es_batch.squeeze(1).numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n \"\"\"\"\"\"\n\n \"\"\" Calculate the metrics (only works with SAX images) \"\"\"\n # (optional) extract 3 slices (apical, mid-ventricle and basal)\n if not args.all_slices:\n num_slices = label_ed_batch.shape[-1]\n apical_idx = int(round((num_slices - 1) * 0.75)) # 75% from basal\n mid_ven_idx = int(round((num_slices - 1) * 0.5)) # 50% from basal\n basal_idx = int(round((num_slices - 1) * 0.25)) # 25% from basal\n slices_idx = [apical_idx, mid_ven_idx, basal_idx]\n\n warped_label_es_batch = warped_label_es_batch[:, :, slices_idx]\n label_ed_batch = label_ed_batch[:, :, slices_idx]\n dvf = dvf[slices_idx, :, :, :] # needed for detJac\n\n # dice\n dice_lv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=1)\n dice_myo = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=2)\n dice_rv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=3)\n\n dice_lv_buffer += [dice_lv]\n dice_myo_buffer += [dice_myo]\n dice_rv_buffer += [dice_rv]\n\n # contour distances\n mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)\n mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)\n mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)\n\n # determinant of Jacobian\n mean_grad_detJ, mean_negative_detJ = detJac_stack(dvf)\n\n\n # update buffers\n mcd_lv_buffer += [mcd_lv]\n hd_lv_buffer += [hd_lv]\n mcd_myo_buffer += [mcd_myo]\n hd_myo_buffer += [hd_myo]\n mcd_rv_buffer += [mcd_rv]\n hd_rv_buffer += [hd_rv]\n\n mean_mag_grad_detJ_buffer += [mean_grad_detJ]\n negative_detJ_buffer += [mean_negative_detJ]\n\n t.update()\n\n # construct metrics dict\n metrics = {'dice_lv_mean': np.mean(dice_lv_buffer), 'dice_lv_std': np.std(dice_lv_buffer),\n 'dice_myo_mean': np.mean(dice_myo_buffer), 'dice_myo_std': np.std(dice_myo_buffer),\n 'dice_rv_mean': np.mean(dice_rv_buffer), 'dice_rv_std': np.std(dice_rv_buffer),\n\n 'mcd_lv_mean': np.mean(mcd_lv_buffer), 'mcd_lv_std': np.std(mcd_lv_buffer),\n 'mcd_myo_mean': np.mean(mcd_myo_buffer), 'mcd_myo_std': np.std(mcd_myo_buffer),\n 'mcd_rv_mean': np.mean(mcd_rv_buffer), 'mcd_rv_std': np.std(mcd_rv_buffer),\n\n 'hd_lv_mean': np.mean(hd_lv_buffer), 'hd_lv_std': np.std(hd_lv_buffer),\n 'hd_myo_mean': np.mean(hd_myo_buffer), 'hd_myo_std': np.std(hd_myo_buffer),\n 'hd_rv_mean': np.mean(hd_rv_buffer), 'hd_rv_std': np.std(hd_rv_buffer),\n\n 'mean_mag_grad_detJ_mean': np.mean(mean_mag_grad_detJ_buffer),\n 'mean_mag_grad_detJ_std': np.std(mean_mag_grad_detJ_buffer),\n\n 'negative_detJ_mean': np.mean(negative_detJ_buffer),\n 'negative_detJ_std': np.std(negative_detJ_buffer)\n }\n\n\n if not val:\n # testing only: save all metrics evaluated for all test subjects in pandas dataframe\n test_result_dir = os.path.join(args.model_dir, \"test_results\")\n if not os.path.exists(test_result_dir):\n os.makedirs(test_result_dir)\n\n # save metrics results mean & std\n xutils.save_dict_to_json(metrics,\n f\"{test_result_dir}/test_results_3slices_{not args.all_slices}.json\")\n\n # save accuracy metrics of every subject\n subj_id_buffer = dataloader.dataset.dir_list\n df_buffer = []\n column_method = ['DL'] * len(subj_id_buffer)\n for struct in ['LV', 'MYO', 'RV']:\n if struct == 'LV':\n ls_dice = dice_lv_buffer\n ls_mcd = mcd_lv_buffer\n ls_hd = hd_lv_buffer\n elif struct == 'MYO':\n ls_dice = dice_myo_buffer\n ls_mcd = mcd_myo_buffer\n ls_hd = hd_myo_buffer\n elif struct == 'RV':\n ls_dice = dice_rv_buffer\n ls_mcd = mcd_rv_buffer\n ls_hd = hd_rv_buffer\n\n ls_struct = [struct] * len(subj_id_buffer)\n data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'Structure': ls_struct,\n 'Dice': ls_dice,\n 'MCD': ls_mcd,\n 'HD': ls_hd}\n df_buffer += [pd.DataFrame(data=data)]\n # concatenate df and save\n metrics_df = pd.concat(df_buffer, axis=0)\n metrics_df.to_pickle(f\"{test_result_dir}/test_accuracy_results_3slices_{not args.all_slices}.pkl\")\n\n # save detJac metrics for every subject\n jac_data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'GradDetJac': mean_mag_grad_detJ_buffer,\n 'NegDetJac': negative_detJ_buffer}\n jac_df = pd.DataFrame(data=jac_data)\n jac_df.to_pickle(f\"{test_result_dir}/test_Jacobian_results_3slices{not args.all_slices}.pkl\")\n\n return metrics",
"def test_dataset(ray_start_4_cpus, use_local):\n\n model_creator = mlp_identity.model_creator\n optimizer_creator = mlp_identity.optimizer_creator\n dataset_creator = mlp_identity.dataset_creator\n\n DatasetOperator = TrainingOperator.from_creators(\n model_creator=model_creator,\n optimizer_creator=optimizer_creator,\n loss_creator=nn.MSELoss)\n\n trainer = TorchTrainer(\n training_operator_cls=DatasetOperator,\n use_local=use_local,\n num_workers=2,\n )\n\n dataset = dataset_creator()\n for i in range(5):\n trainer.train(dataset=dataset, num_steps=100)\n\n x = mlp_identity.to_mat(0.5)\n prediction = float(trainer.get_model()(x)[0][0])\n assert 0.4 <= prediction <= 0.6\n trainer.shutdown()",
"def evaluate():\n\tmodel.eval()\n\tstddev = 1 # And mean=0\n\tfor batch_idx, (data, _) in enumerate(syn_test_loader):\n\t\tdata = data.cuda()\n\t\tif batch_idx == 0:\n\t\t\tnoise = torch.autograd.Variable(torch.randn(batch_size, bottleneck).cuda() * stddev)\n\t\t\tsample_representation(\"orig_nat\", data, noise)\n\t\t\tsample_representation(\"natural\", data, noise)\n\t\t\tsample_representation(\"orig_syn\", data, noise)\n\t\t\tsample_representation(\"synth\", data, noise)",
"def get_synthetic_data_and_answer_v9(num_rows, num_columns, num_preds, corr, random_seed,\n data_cache_file, print_details=False,\n min_meta=False, ignore_data_cache=False,\n target_fld='target_fld',\n\n # this is the percent of rows outside of answer\n # tuples to throw off individ corr\n percent_non_answer_used=0.7,\n\n # what percent of the above percent goes to\n # answer1; remainder to answer2\n ans1_percent_non_answer_used=0.5,\n\n # percent of answer and non-answer used by\n # noise column\n noise_percent_answer_used=0.7,\n noise_percent_non_answer_used=0.1):\n if not ignore_data_cache and os.path.exists(data_cache_file):\n with open(data_cache_file) as f:\n df, preds, meta = pickle.load(f)\n else:\n if ignore_data_cache:\n print \"WARNING: data cache being ignored!\"\n\n start = timer()\n np_random = np.random.RandomState(random_seed)\n meta = {}\n\n # 1. create means, variances, covariance matrix\n num_corrs = int(round((num_columns**2 - num_columns) / 2.0))\n means = np.zeros(num_columns)\n cov = squareform([corr] * num_corrs)\n for i in xrange(num_columns):\n cov[i, i] = 1.0\n\n # 2. creating dataset via normal distrib\n data = np_random.multivariate_normal(means, cov, size=(num_rows,), check_valid='warn')\n df = pd.DataFrame(data)\n df.columns = ['c{}'.format(i) for i in xrange(num_columns)]\n if print_details:\n get_corr_distrib(df)\n\n # 3. getting synthetic answer and meta\n num_rows = len(df)\n target_fld_bin_vals, bin_counts, adjusted_bin_edges = get_target_fld_vals(num_rows)\n target_counts, target_fld_vals, answer1_vals, answer2_vals, noise1_vals, ans_preds, noise_preds = \\\n get_synthetic_answer_v2(df, target_fld_bin_vals, bin_counts,\n percent_non_answer_used=percent_non_answer_used,\n ans1_percent_non_answer_used=ans1_percent_non_answer_used,\n noise_percent_answer_used=noise_percent_answer_used,\n noise_percent_non_answer_used=noise_percent_non_answer_used,\n print_details=print_details)\n c_preds = ans_preds + noise_preds\n\n # 6. add target/answer/noise fields to dataframe\n df.columns = [target_fld, 'answer1', 'answer2', 'noise1'] + list(df.columns[4:])\n df[target_fld] = target_fld_vals\n df['answer1'] = answer1_vals\n df['answer2'] = answer2_vals\n df['noise1'] = noise1_vals\n\n # 7. print correlations\n if print_details:\n ans1_signal = np.histogram(df.query(Query.get_pandas_query_from_preds([c_preds[0]]))['target_fld'])[0]\n print \"ans1 signal: {}, corr={:.3f}, sig_dist={:.3f}\"\\\n .format(ans1_signal, pearsonr(target_counts, ans1_signal)[0], get_signal_distance(target_counts, ans1_signal))\n\n ans2_signal = np.histogram(df.query(Query.get_pandas_query_from_preds([c_preds[1]]))['target_fld'])[0]\n print \"ans2 signal: {}, corr={:.3f}, sig_dist={:.3f}\"\\\n .format(ans2_signal, pearsonr(target_counts, ans2_signal)[0], get_signal_distance(target_counts, ans2_signal))\n\n ans_signal = np.histogram(df.query(Query.get_pandas_query_from_preds(c_preds[0:2]))['target_fld'])[0]\n print \"ans signal: {}, corr={:.3f}, sig_dist={:.3f}\"\\\n .format(ans_signal, pearsonr(target_counts, ans_signal)[0], get_signal_distance(target_counts, ans_signal))\n\n print('---------------------')\n\n noise1_signal = np.histogram(df.query(Query.get_pandas_query_from_preds([c_preds[2]]))['target_fld'])[0]\n print \"noise1 signal: {}, corr={:.3f}, sig_dist={:.3f}\"\\\n .format(noise1_signal, pearsonr(target_counts, noise1_signal)[0], get_signal_distance(target_counts, noise1_signal))\n\n ans1_noise1_signal = np.histogram(df.query(Query.get_pandas_query_from_preds([c_preds[0], c_preds[2]]))['target_fld'])[0]\n print \"ans1 noise1 signal: {}, corr={:.3f}, sig_dist={:.3f}\"\\\n .format(ans1_noise1_signal, pearsonr(target_counts, ans1_noise1_signal)[0], get_signal_distance(target_counts, ans1_noise1_signal))\n\n ans2_noise1_signal = np.histogram(df.query(Query.get_pandas_query_from_preds([c_preds[1], c_preds[2]]))['target_fld'])[0]\n print \"ans2 noise1 signal: {}, corr={:.3f}, sig_dist={:.3f}\"\\\n .format(ans2_noise1_signal, pearsonr(target_counts, ans2_noise1_signal)[0], get_signal_distance(target_counts, ans2_noise1_signal))\n\n print(\"Avg column corr:\")\n print_avg_column_corr(df)\n\n # 8. generate predicates\n # generate predicates {eq, gt, lt} for all unique columns+values; take random subset\n rand_preds = get_rand_predicates(np_random, df, c_preds, target_fld, num_preds-len(c_preds))\n preds = c_preds + rand_preds\n if print_details:\n print(\"Num preds: {}\".format(len(preds)))\n print preds[0:10]\n\n # 9. setting meta and caching results\n meta['target_fld'] = target_fld\n meta['target_counts'] = target_counts\n meta['adjusted_bin_edges'] = adjusted_bin_edges\n meta['bin_counts'] = bin_counts\n meta['answer_preds'] = preds[0:2]\n meta['answer_pids'] = [0, 1]\n meta['answer_query'] = Query.get_pandas_query_from_preds(meta['answer_preds'])\n meta['answer_counts'] = np.histogram(df.query(meta['answer_query'])[meta['target_fld']], meta['adjusted_bin_edges'])[0]\n meta['data_gen_runtime'] = timer() - start\n\n # 9. caching results\n meta['data_gen_runtime'] = timer() - start\n with open(data_cache_file, \"w\") as f:\n pickle.dump((df, preds, meta), f, -1)\n\n if print_details:\n print(\"Done building syn data v9: {} sec\".format(meta['data_gen_runtime']))\n return df, preds, meta",
"def quickstart_example():\n mu_values = np.random.randint(80, 110, 100)\n mu1, mu2 = map(np.array, zip(*combinations(mu_values, 2)))\n labels1, labels2 = [mu.astype(str) for mu in [mu1, mu2]]\n spreads = skellam.rvs(mu1=mu1, mu2=mu2)\n times = np.arange(spreads.size).astype('datetime64[s]')\n\n # MELO class arguments (explained in docs)\n lines = np.arange(-59.5, 60.5)\n k = .15\n\n # train the model on the list of comparisons\n melo = Melo(lines=lines, k=k)\n melo.fit(times, labels1, labels2, spreads)\n\n # predicted and true (analytic) comparison values\n pred_times = np.repeat(melo.last_update, times.size)\n pred = melo.mean(pred_times, labels1, labels2)\n true = skellam.mean(mu1=mu1, mu2=mu2)\n\n # plot predicted means versus true means\n plt.scatter(pred, true)\n plt.plot([-20, 20], [-20, 20], color='k')\n plt.xlabel('predicted mean')\n plt.ylabel('true mean')",
"def smzscore( raw, licks, rewards, success_rate, rz, default, shuffles, tracklength, mode=-1 ):\n\n # select trial type\n rewards = select_trials(rewards, 'reward', mode)\n licks = select_trials(licks, 'licks', mode)\n\n if licks.shape[0] > 0:\n\n # determine which trials were successful and only analyse those\n rewards_success = rewards[np.where(rewards[:,5] == 1),:][0]\n # create the 2-d vector that will hold the shuffled datasets\n shuffle_dist = np.zeros((np.shape(licks)[0], np.shape(licks)[1], shuffles))\n shuffled_sr = np.zeros((shuffles,))\n\n # create shuffled datset\n for i in range(shuffles):\n shuffle_dist[:,:,i] = shuffle_dset( raw, licks, tracklength )\n\n # loop through all shuffled lick-datasets\n shuffled_sr_all = 0\n for s in range(shuffles):\n # loop through each trial of the current shuffled lick dataset and check if there was at least one lick within the rewarded zone\n shuffled_sr_current = 0\n for t in np.unique(rewards_success[:,3]):\n cur_trial_ind = np.where(shuffle_dist[:,2,s]==t)[0]\n cur_trial = shuffle_dist[cur_trial_ind,:,s]\n if np.size(np.where( (cur_trial[:,1] > rz[0]) & (cur_trial[:,1] < default ) )[0]):\n shuffled_sr_current += 1\n # calculate success rate for this shuffled licks dataset and add to overall success rate\n try:\n shuffled_sr[s] = shuffled_sr_current/np.shape(np.unique(rewards_success[:,3]))[0]\n shuffled_sr_all += shuffled_sr_current/np.shape(np.unique(rewards_success[:,3]))[0]\n except ZeroDivisionError:\n # if the mouse didn't have a successful trial at all, set SMI to 1\n shuffled_sr_all = 1\n shuffled_sr[s] = 0\n\n shuffled_sr_all\t= shuffled_sr_all/shuffles\n\n if shuffled_sr_all > 0:\n if success_rate == 0:\n return 0, shuffled_sr\n else:\n return (success_rate - np.mean(shuffled_sr)) / np.std(shuffled_sr), shuffled_sr\n else:\n return (success_rate - 0.1) / 1, shuffled_sr\n else:\n return 0, shuffled_sr",
"def main():\n\n\t'''Calling CommandLine class'''\n\tcommand_line = CommandLine()\n\t\n\t'''Calling FastAreader class'''\n\treading = FastAreader()\n\t'''Calling the readFasta method '''\n\thead_seq = reading.readFasta()\n\n\t'''Passing command line arguments, minimum kmer size, maximum kmer size, and z-score cutoff, in the Motif class.'''\n\tres = Motif(command_line.args.minMotif, command_line.args.maxMotif, command_line.args.cutoff)\n\n\t'''Iteration through the head, sequence generator of the FastaReader class.'''\n\tfor h, s in head_seq:\n\t\t#cleaning sequence of newline chars and N bases\n\t\tclean_seq = s.replace('\\\\', \"\")\n\t\tcleanDubBases = clean_seq.replace('N', \"\")# N bases are assumed to be sequencing errors, and therefore removed from sequence, and not counted in as part of the genome length\n\t\t#calling Motif class methods\n\t\tres.lengthOfSequence(cleanDubBases)\n\t\tres.kmerComposition(cleanDubBases)\n\tres.addData()\n\tres.expectedCounts()\n\tres.sdScore()\n\tres.zScore()\n\tres.results()\n\n\t#sorting results list based on z-score\n\tsort_1 = sorted(res.res_lst, key=lambda tup: tup[3])\n\t#sorting results list a second time based on motif size\n\tsort_2 = sorted(sort_1, key=lambda k: len(k[0]), reverse=True)\n\n\t#printing of headers\n\tprint('Motif','\\t','Actual_Count','\\t','Expected_Count','\\t','Zscore')\n\t#printing of sorted results\n\tfor i in sort_2:\n\t\tprint('{0:8}\\t{1:0d}\\t{2:0.2f}\\t{3:0.2f}'.format(i[0], i[1], i[2], i[3]))",
"def netflix_solve(reader, writer):\n movie_id = -1\n actual_rating_list = []\n predict_rating_list = []\n assert len(CACHE_ANSWERS.keys()) > 0\n for line in reader:\n u_id, movie_flag = netflix_read(line)\n if movie_flag == 1:\n netflix_print(writer, u_id)\n movie_id = u_id\n elif movie_flag == 0:\n res = netflix_predict(movie_id, u_id) + related_movie_offset(movie_id, u_id)\n predict_rating_list.append(res)\n actual_rating_list.append(CACHE_ANSWERS[movie_id][u_id])\n netflix_print(writer, res)\n else:\n assert False\n\n rmse_res = netflix_rmse(actual_rating_list, predict_rating_list)\n netflix_print(writer, \"RMSE: \" + str(rmse_res))",
"def test_sampling1 ():\n cpus = list(range(C.N_PARALLEL))\n affinity = dict(cuda_idx=C.CUDA_IDX, workers_cpus=cpus)\n agent_ = findOptimalAgent(reward=None)\n agent = CategoricalPgAgent(AcrobotNet, \n initial_model_state_dict=agent_.state_dict())\n s0 = np.array([1, 0, 1/np.sqrt(2), 1/np.sqrt(2), 4, 2], dtype=np.float)\n sampler = SerialSampler(\n EnvCls=rlpyt_make,\n env_kwargs=dict(id=C.ENV, reward=None, internalStateFn=C.INTERNAL_STATE_FN, s0=s0),\n batch_T=500,\n batch_B=16,\n max_decorrelation_steps=0,\n )\n sampler.initialize(\n agent=agent,\n affinity=affinity,\n seed=0\n )\n _, traj_info = sampler.obtain_samples(0)\n print(np.mean([t['DiscountedReturn'] for t in traj_info]))",
"def test(self, filename: str, info_extractor: Optional[InfoExtractor]):\r\n if self.model is None:\r\n raise RatingModel.RatingModelError(\"model is not loaded or trained yet\")\r\n doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp)\r\n\r\n print(\"Getting rating...\")\r\n if self._type == \"fixed\":\r\n print(\"working on fixed model\")\r\n if self.keywords is None:\r\n raise RatingModel.RatingModelError(\"Keywords not found\")\r\n\r\n seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)\r\n\r\n # scoring\r\n temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords)\r\n if temp_out is None:\r\n raise RatingModel.RatingModelError(\r\n \"Either parser cannot detect text or too few words in resume for analysis. Most usually the former.\"\r\n )\r\n km_scores, wm_scores = temp_out\r\n # average of km/wm scores for all keywords\r\n km_score = np.mean(km_scores)\r\n wm_score = np.mean(wm_scores)\r\n final_score = km_score * wm_score\r\n elif self._type == \"lda\":\r\n if self.lda is None or self.dictionary is None or self.top_k_words is None:\r\n raise RatingModel.RatingModelError(\"No LDA found\")\r\n\r\n seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)\r\n seen_chunks_words, all_tokens_chunks = (\r\n list(seen_chunks_words),\r\n list(all_tokens_chunks),\r\n )\r\n\r\n # scoring\r\n new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words)\r\n bow = self.dictionary.doc2bow(new_seen_chunks_words)\r\n doc_distribution = np.array(\r\n [tup[1] for tup in self.lda.get_document_topics(bow=bow)]\r\n )\r\n # get keywords and weights\r\n keywords = []\r\n all_pair_scores = []\r\n all_topic_scores = []\r\n all_diff_scores = []\r\n # take top 5 topics\r\n for j in doc_distribution.argsort()[-5:][::-1]:\r\n topic_prob = doc_distribution[j]\r\n # take top 5 words for each topic\r\n st = self.lda.show_topic(topicid=j, topn=5)\r\n sum_st = np.sum(list(map(lambda x: x[1], st)))\r\n pair_scores = []\r\n for pair in st:\r\n keywords.append(pair[0])\r\n pair_scores.append(pair[1])\r\n all_pair_scores.append(np.array(pair_scores))\r\n all_topic_scores.append(np.array(topic_prob))\r\n\r\n all_pair_scores = np.array(all_pair_scores)\r\n norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1)\r\n norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores)\r\n all_diff_scores = (norm_all_pair_scores * norm_all_topic_scores).flatten()\r\n weights = pd.Series(all_diff_scores, index=keywords)\r\n weights.sort_values(ascending=False, inplace=True)\r\n\r\n temp_out = self.__trainKMWM(seen_chunks_words, all_tokens_chunks, keywords)\r\n if temp_out is None:\r\n print(\r\n \"Either parser cannot detect text or too few words in resume for analysis. Most usually the former. Skip document.\"\r\n )\r\n km_scores, wm_scores = temp_out\r\n\r\n # average of km/wm scores for all keywords\r\n km_score = np.dot(weights.values, km_scores)\r\n wm_score = np.dot(weights.values, wm_scores)\r\n\r\n final_score = km_score * wm_score\r\n\r\n # max_score = self.model[\"score\"].iloc[0] - np.std(self.model[\"score\"])\r\n # min_score = self.model[\"score\"].iloc[-1]\r\n mean = np.mean(self.model[\"score\"])\r\n sd = np.std(self.model[\"score\"])\r\n\r\n rating = min(10, max(0, round(5 + (final_score-mean)/sd, 2)))\r\n if info_extractor is not None:\r\n print(\"-\" * 20)\r\n\r\n # info_extractor.extractFromFile(filename)\r\n output= info_extractor.extractFromFile(filename)\r\n\r\n print(\"output:----\",output)\r\n print(\"-\" * 20)\r\n print(\"Rating: %.1f\" % rating)\r\n # if info_extractor is not None:\r\n # print(\"info extractor is not working\")\r\n # env = os.environ\r\n # subprocess.call([sys.executable, filename], env=env)\r\n return output",
"def test_marketsim(description, group, inputs, outputs, grader):\n\n points_earned = 0.0 # initialize points for this test case\n try:\n # Try to import student code (only once)\n if not main_code in globals():\n import importlib\n # * Import module\n mod = importlib.import_module(main_code)\n globals()[main_code] = mod\n # * Import methods to test\n for m in ['compute_portvals']:\n globals()[m] = getattr(mod, m)\n\n incorrect = False\n msgs = []\n\n if group == 'author':\n try:\n # globals()['author'] = getattr(marketsim, 'author')\n auth_string = run_with_timeout(marketsim.author,seconds_per_test_case,(),{})\n if auth_string == 'tb34':\n incorrect = True\n msgs.append(\" Incorrect author name (tb34)\")\n points_earned = -20\n elif auth_string == '':\n incorrect = True\n msgs.append(\" Empty author name\")\n points_earned = -20\n except Exception as e:\n incorrect = True\n msgs.append(\" Exception occured when calling author() method: {}\".format(e))\n points_earned = -20\n else:\n # Unpack test case\n orders_file = inputs['orders_file']\n start_val = inputs['start_val']\n\n portvals = None\n fullpath_orders_file = orders_file#get_orders_data_file(orders_file)\n portvals = run_with_timeout(compute_portvals,seconds_per_test_case,(fullpath_orders_file,start_val),{})\n # Verify against expected outputs and assign points\n\n # * Check return type is correct, coax into Series\n assert (type(portvals) == pd.Series) or (type(portvals) == pd.DataFrame and len(portvals.columns) == 1), \"You must return a Series or single-column DataFrame!\"\n if type(portvals) == pd.DataFrame:\n portvals = portvals[portvals.columns[0]] # convert single-column DataFrame to Series\n if group == 'basic':\n if len(portvals) != outputs['num_days']:\n incorrect=True\n msgs.append(\" Incorrect number of days: {}, expected {}\".format(len(portvals), outputs['num_days']))\n else:\n points_earned += 2.0\n if abs(portvals[-1]-outputs['last_day_portval']) > (0.001*outputs['last_day_portval']):\n incorrect=True\n msgs.append(\" Incorrect final value: {}, expected {}\".format(portvals[-1],outputs['last_day_portval']))\n else:\n points_earned += 5.0\n adr,sr = get_stats(portvals)\n if abs(sr-outputs['sharpe_ratio']) > abs(0.001*outputs['sharpe_ratio']):\n incorrect=True\n msgs.append(\" Incorrect sharpe ratio: {}, expected {}\".format(sr,outputs['sharpe_ratio']))\n else:\n points_earned += 1.5\n if abs(adr-outputs['avg_daily_ret']) > abs(0.001*outputs['avg_daily_ret']):\n incorrect=True\n msgs.append(\" Incorrect avg daily return: {}, expected {}\".format(adr,outputs['avg_daily_ret']))\n else:\n points_earned += 1.0\n elif group=='leverage':\n if abs(portvals[-1]-outputs['last_day_portval']) > (0.001*outputs['last_day_portval']):\n incorrect = True\n msgs.append(\" Incorrect final value: {}, expected {}\".format(portvals[-1],outputs['last_day_portval']))\n else:\n points_earned += 1.0\n if incorrect:\n # inputs_str = \" orders_file: {}\\n\" \\\n # \" start_val: {}\\n\".format(orders_file, start_val)\n raise IncorrectOutput, \"Test failed on one or more output criteria.\\n Inputs:\\n{}\\n Failures:\\n{}\".format(inputs, \"\\n\".join(msgs))\n except Exception as e:\n # Test result: failed\n msg = \"Test case description: {}\\n\".format(description)\n \n # Generate a filtered stacktrace, only showing erroneous lines in student file(s)\n \n tb_list = tb.extract_tb(sys.exc_info()[2])\n if 'grading_traceback' in dir(e):\n tb_list = e.grading_traceback\n for i in xrange(len(tb_list)):\n row = tb_list[i]\n tb_list[i] = (os.path.basename(row[0]), row[1], row[2], row[3]) # show only filename instead of long absolute path\n tb_list = [row for row in tb_list if row[0] == 'marketsim.py']\n if tb_list:\n msg += \"Traceback:\\n\"\n msg += ''.join(tb.format_list(tb_list)) # contains newlines\n msg += \"{}: {}\".format(e.__class__.__name__, e.message)\n\n # Report failure result to grader, with stacktrace\n grader.add_result(GradeResult(outcome='failed', points=points_earned, msg=msg))\n raise\n else:\n # Test result: passed (no exceptions)\n grader.add_result(GradeResult(outcome='passed', points=points_earned, msg=None))",
"def run(num, similarity_name=None, reward_name='acc', alpha=0.3):\n \n # --\n # Get subject's data\n sdata = get_behave_data(num)\n sdata.update(get_similarity_data(num))\n \n responses = np.array(sdata['resp'])\n\n rewards = None\n if reward_name == 'acc':\n rewards = np.array(sdata['acc'],dtype=np.float32)\n elif reward_name == 'gl':\n rewards = np.array(sdata['gl'],dtype=np.float32)\n\n trials = np.array(fmri.catreward.roi.data.get_trials())\n conds = list(set(trials))\n ## conds are the unqie entries in trials\n \n values = np.zeros_like(trials,dtype=np.float32)\n rpes = np.zeros_like(trials,dtype=np.float32)\n sim_rewards = np.zeros_like(trials,dtype=np.float32)\n ## Returned....\n\n # Each cond has n states, \n # matching the number of \n # responses (approx 2: {1,6}).\n #\n # Wrong button presses \n # are included, however these\n # are never rewarded so stay at 0.\n for cond in conds:\n if cond == 0: continue\n ## Drop jitter.\n\n # Create states and their rewards.\n mask = trials == cond\n states_c = responses[mask]\n rewards_c = rewards[mask] ## _c for cond...\n\n # Get the RL alg we want to run.\n # based on similarity_name\n if similarity_name == None:\n # No similarity:\n values_c, rpes_c = rl.reinforce.b_delta(\n rewards_c, states_c, alpha)\n sim_rewards_c = rewards_c\n ## To give a consistent return\n ## just map rewards to sim_rewards\n else:\n # Get the similarity data, filter it by mask, and run RL.\n similarity_c = np.array(sdata[similarity_name])[mask]\n values_c, rpes_c, sim_rewards_c = rl.reinforce.b_delta_similarity(\n rewards_c, states_c, similarity_c, alpha)\n \n # sim_rewards_c does not need to be\n # unpacked when similarity_name is None\n sim_rewards_c = rl.misc.unpack(sim_rewards_c, states_c)\n\n # Unpack values and rpes \n # based on states_c\n values_c = rl.misc.unpack(values_c, states_c)\n rpes_c = rl.misc.unpack(rpes_c, states_c)\n \n # Now use the mask to map values_c, etc,\n # into trials space\n values[mask] = values_c\n rpes[mask] = rpes_c\n sim_rewards[mask] = sim_rewards_c\n \n return values.tolist(), rpes.tolist(), sim_rewards.tolist()",
"def model_test(epo, natural):\n\tmodel.eval()\n\twith torch.no_grad():\n\t\tn = batch_size\n\n\t\tif natural:\n\t\t\tloader = nat_test_loader\n\t\t\tprefix = \"nat\"\n\t\telse:\n\t\t\tloader = syn_test_loader\n\t\t\tprefix = \"syn\"\n\n\t\tlog_cor_file = open(directory + \"/logs/test_\" + prefix + \"_cor_log.txt\", \"a\") # Correct\n\t\tlog_mae_file = open(directory + \"/logs/test_\" + prefix + \"_mae_log.txt\", \"a\") # MAE\n\t\tlog_dev_file = open(directory + \"/logs/test_\" + prefix + \"_dev_log.txt\", \"a\") # DEV\n\t\tlog_sam_file = open(directory + \"/logs/test_\" + prefix + \"_sam_log.txt\", \"a\") # Sample\n\n\t\tccs = []\n\t\tlabls = []\n\t\tnum_unlabeled = 0\n\t\tfor batch_idx, (data, labels) in enumerate(loader):\n\t\t\tdata = data.cuda()\n\t\t\tlabels = labels.float().cuda()\n\n\t\t\tmodel.mode = 'natural' if natural else 'synth'\n\t\t\trecon_batch, mu, logvar, cc = model(data)\n\n\t\t\tcc[labels == 0] = 0 # Sets the counted cells to 0 for unlabeled data, so that regressor_loss=0\n\t\t\tnum_unlabeled += (labels == 0).sum()\n\t\t\t_, _, _ = loss_function(recon_batch, data, mu, logvar, cc, labels, natural)\n\n\t\t\tccs.append(cc.cpu().detach().numpy())\n\t\t\tlabls.append(labels.cpu().detach().numpy())\n\n\t\t\tif batch_idx == 0 and epo % 1000 == 0:\n\t\t\t\t# Save test sample\n\t\t\t\tcomparison = torch.cat([data[:n], recon_batch.view(batch_size, 1, img_size, img_size)[:n]])\n\t\t\t\tsave_image(comparison.cpu(), directory + \"/\" + prefix + \"_\" + str(epo) + \".png\", nrow=n)\n\n\t\t\t\t# Save switch sample\n\t\t\t\tmodel.mode = 'synth' if natural else 'natural'\n\t\t\t\trecon_batch, _, _, _ = model(data)\n\t\t\t\tcomparison = torch.cat([data[:n], recon_batch.view(batch_size, 1, img_size, img_size)[:n]])\n\t\t\t\tsave_image(comparison.cpu(), directory + \"/switch_\" + prefix + \"_\" + str(epo) + \".png\", nrow=n)\n\n\t\tpreds = np.concatenate(ccs, axis=None) # Elementwise round of cellcounts\n\t\tlbls = np.concatenate(labls, axis=None) # Elementswise round of labels\n\n\t\tlog_sam_file.write(str(np.round(preds, 2)) + \"\\n\" + str(lbls) + \"\\n\")\n\t\tpreds = np.around(preds)\n\t\t#lbls = np.around(lbls)\n\n\t\tcorrect = np.sum(preds == lbls) # Count elementwise equality of predictions and labels\n\t\tlen_set = len(loader.dataset)\n\t\tcorrect -= num_unlabeled # Remove zero_indices from numerator\n\t\tcorrect = float(correct) / float(len_set - num_unlabeled) # Remove zero_indices from denominator\n\n\t\tdist_sum = np.sum(np.abs(np.subtract(preds, lbls))) # Elementwise addition of dist between preds and lbls\n\t\tMAE = dist_sum / float(len_set - num_unlabeled)\n\n\t\tlen_labeled = float(len_set - num_unlabeled)\n\t\tdev = np.ones(len_set) - np.divide(preds, lbls) # Deviation contains NaNs because syn data has lbl=0\n\t\tavg_dev = np.sum(np.abs(np.where(np.isnan(dev), 0, dev))) / len_labeled # Take the avg only of those deviations that weren't NaN\n\n\t\tlog_cor_file.write(str(correct)+\"\\n\")\n\t\tlog_mae_file.write(str(MAE)+\"\\n\")\n\t\tlog_dev_file.write(str(avg_dev)+\"\\n\")\n\n\t\t#logfile.write(str(correct) + \" correct, MAE: \" + str(MAE) + \", DEV: \" + str(avg_dev) + \" in \" + prefix + \" set in epoch \" + str(epoch) + \"\\n\\n\")\n\t\tlog_cor_file.close()\n\t\tlog_mae_file.close()\n\t\tlog_dev_file.close()\n\t\tlog_sam_file.close()\n\n\t\tglobal distance_sum\n\t\tdistance_sum = dist_sum\n\t\treturn correct, MAE",
"def test_read(self):\n self.reader._timing = [3, 2, 2, 1, 1, 1]\n score, time = self.reader.read(self.books[0], 0, 3)\n self.assertTrue(self.books[0].id_book not in self.reader._books)\n self.assertEqual(3, score)\n self.assertEqual(6, time)\n self.assertEqual([3, 3, 3, 2, 2, 2], self.reader._timing)\n score, time = self.reader.read(self.books[3], 4, 5)\n self.assertTrue(self.books[3].id_book not in self.reader._books)\n self.assertEqual(0, score)\n self.assertEqual(7, time)\n self.assertEqual([3, 3, 3, 2, 3, 3], self.reader._timing)",
"def main():\n users = [i.id for i in list(User.select())]\n sample_users = random.sample(users, _SAMPLE_NUMBER)\n actual_result = []\n average_result = []\n nearest_neighbour_result = []\n slope_one_result = []\n hybird_result = []\n for user_id in sample_users:\n print('Current user:', get_user_by_id(user_id))\n movie_id = random.choice(get_movie_rating_by_user(user_id)).movie_id\n print('Current movie:', get_movie_by_id(movie_id))\n actual = get_user_movie_rating(user_id, movie_id)\n print('Actual Rating:', actual)\n actual_result.append(actual)\n avg = average_rating(movie_id, True)\n print('Average Rating:', avg)\n average_result.append(avg)\n nearest = nearest_neighbour(user_id, movie_id, True)\n print('Nearest Neighbour Rating:', nearest)\n nearest_neighbour_result.append(nearest)\n slope = slope_one(user_id, movie_id, True)\n print('Slope One Rating:', slope)\n slope_one_result.append(parse_result(slope))\n hybrid = hybrid_algorithm(avg, nearest, slope, True)\n print('Hybrid Algorithm Rating:', hybrid)\n hybird_result.append(parse_result(hybrid))\n print()\n\n if _DEBUG:\n print(actual_result)\n print(average_result)\n print(nearest_neighbour_result)\n print(slope_one_result)\n print(hybird_result)\n\n print('RMSD of each recommender system')\n print(' Average Rating '.center(80, '#'))\n print(rmsd(actual_result, average_result))\n print(' Nearest Neighbour '.center(80, '#'))\n print(rmsd(actual_result, nearest_neighbour_result))\n print(' Slope One '.center(80, '#'))\n print(rmsd(actual_result, slope_one_result))\n print(' Hybrid Algorithm '.center(80, '#'))\n print(rmsd(actual_result, hybird_result))",
"def get_synthetic_data_and_answer_v1(num_rows, num_columns, num_preds, corr, random_seed,\n data_cache_file, print_details=False,\n min_meta=False):\n if os.path.exists(data_cache_file):\n with open(data_cache_file) as f:\n df, preds, meta = pickle.load(f)\n else:\n start = timer()\n np_random = np.random.RandomState(random_seed)\n\n # 1. getting synthetic data\n df, preds = get_synthetic_data_v5(np_random, num_rows, num_columns, corr,\n print_details=print_details)\n\n # 2. randomly choosing answer preds\n answer_preds = []\n answer_pids = []\n for i in xrange(100):\n pid1 = np_random.randint(0, len(preds))\n pid2 = np_random.randint(0, len(preds))\n if pid1 != pid2:\n answer_preds = [preds[pid1], preds[pid2]]\n answer_pids = sorted([pid1, pid2])\n break\n if not answer_preds:\n raise Exception(\"Problem getting unique answer preds.. 100 iterations matched.\")\n if print_details:\n print \"answer preds: {}\".format(answer_preds)\n print \"answer pids: {}\".format(answer_pids)\n\n # 3. adding synthetic answer to df\n df, meta = add_target_to_df_v3(np_random, df, answer_preds, corr,\n print_details=print_details, min_meta=min_meta)\n\n # 4. noting some meta data\n meta['answer_preds'] = answer_preds\n meta['answer_pids'] = answer_pids\n meta['answer_query'] = Query.get_pandas_query_from_preds(answer_preds)\n qs = Query.get_pandas_query_from_preds(answer_preds)\n meta['answer_counts'] = np.histogram(df.query(qs)[meta['target_fld']], meta['adjusted_bin_edges'])[0]\n\n # 5. caching results\n meta['data_gen_runtime'] = timer() - start\n with open(data_cache_file, \"w\") as f:\n pickle.dump((df, preds, meta), f, -1)\n\n return df, preds, meta",
"def test_full(args, model, device): \n test_path = '../data/full/original/'\n generate_path = '../data/full/generate/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))])\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n ind = 0\n for name in os.listdir(test_path):\n if os.path.isfile(os.path.join(test_path, name)):\n ind += 1\n test_original, test_style, image_height, image_width = load_test_dataset(name)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n y_real = y_real.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n \n y_fake = model.gen_g(x.view(-1, config.channels, image_height, image_width))\n y_fake = y_fake.view(config.channels, image_height, image_width)\n \n # Calculate PSNR & SSIM scores\n score_psnr += psnr_full(y_fake, y_real)\n \n y_fake_np = y_fake.detach().cpu().numpy().transpose(1, 2, 0)\n y_real_np = y_real.cpu().numpy().transpose(1, 2, 0)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += temp_ssim\n \n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim\n \n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n print('PSNR & SSIM scores of {} images are calculated.'.format(ind))\n \n utils.save_image(y_fake, os.path.join(generate_path, '{}-x.jpg'.format(name[:5] + args.model_type)))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))",
"def test_predictor():",
"def readOneData(self):\n\n\t\tif self._mt5Client is not None:\n\t\t\tdatas = self._mt5Client.getData()\n\n\t\t\tif datas is not None:\n\t\t\t\tPERIOD = int(self._config['data']['predict'])\n\t\t\t\tHALF_PERIOD = int(PERIOD/2)\n\n\t\t\t\tdata = []\n\n\t\t\t\t#Time Got\n\t\t\t\tself._LAST_PERIOD_PREDICTED_END = datas['time']\n\n\t\t\t\t#time open high low close tick_volume spread real_\n\t\t\t\t#Switch the price type calucation\n\n\t\t\t\tw_p = self._config['data']['price']\n\t\t\t\tv = 0\n\n\t\t\t\tif(w_p == CHART_PRICES_TYPE['O']):\n\t\t\t\t\tv = float(datas['open']) \n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['C']):\n\t\t\t\t\t\n\t\t\t\t\tv = float(datas['close']) \n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['H']):\n\t\t\t\t\t\n\t\t\t\t\tv = float(datas['high'])\n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['L']):\n\n\t\t\t\t\tv = float(datas['low']) \n\n\t\t\t\telif(w_p == CHART_PRICES_TYPE['HL/2']):\n\t\t\t\t\tv = ( float(datas['low']) + float(datas['high']) ) /2\n\t\t\t\t\n\t\t\t\tself.notify(msg={\n\t\t\t\t\t\t\t\t\t'prices': {\n\t\t\t\t\t\t\t\t\t\t'values': {\n\t\t\t\t\t\t\t\t\t\t\t'RP': str(v)\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t} \n\t\t\t\t\t\t\t\t} \n\t\t\t\t)\n\n\t\t\t\tdata.append(100000 * v ) \n\n\t\t\t\tself._TEMPORARY_GLOBAL_DATA.append(data[-1])\n\n\t\t\t\tself._GLOBAL_DATA.append(data[-1])\n\n\t\t\t\treturn data",
"def test_randomreader():\n from .context import readersender\n\n rr = readersender.readers.RandomReader()\n\n rr.connect()\n rr.read()\n rr.disconnect()",
"def main(um_file, ptl_file, wl_min_r=0.08, wl_max_r=50.0, wl_n_bins=22, verbose=True):\n # Read in the UM mock catalog\n um_mock = Table(np.load(um_file))\n if verbose:\n print(\"# Load in UM mock catalog: {}\".format(um_file))\n print(\"# Dealing with {} galaxies\".format(len(um_mock)))\n # Read in the particle table\n sim_particles = Table(np.load(ptl_file))\n if verbose:\n print(\"# Load in particle table: {}\".format(ptl_file))\n print(\"# Dealing with {} particles\".format(len(sim_particles)))\n\n # Output file name\n um_pre, _ = os.path.splitext(um_file)\n ptl_pre, _ = os.path.splitext(ptl_file)\n n_ptl = ptl_pre.split('_')[-1]\n precompute_out = \"{}_{}_r_{:4.2f}_{:4.1f}_{:2d}bins.npy\".format(\n um_pre, n_ptl, wl_min_r, wl_max_r, wl_n_bins\n )\n if verbose:\n print(\"# Output file name : {}\".format(precompute_out))\n\n # Run precompute\n if 'smdpl' in ptl_file:\n mass_encl = vagc.precompute_wl_smdpl(\n um_mock, sim_particles, wl_min_r=wl_min_r, wl_max_r=wl_max_r,\n wl_n_bins=wl_n_bins)\n elif 'mdpl2' in ptl_file:\n mass_encl = vagc.precompute_wl_mdpl2(\n um_mock, sim_particles, wl_min_r=wl_min_r, wl_max_r=wl_max_r,\n wl_n_bins=wl_n_bins)\n else:\n raise NameError(\"# Wrong simulation: [smdpl/mdpl2]\")\n\n np.save(precompute_out, mass_encl)",
"def computeRmse(model, data, n , sc):\n truth = data.map( lambda x: ((x[0], x[1]), x[2]) )\n truth.cache()\n ##print 'test zhou 0.....', truth.count() , '............', truth.take(10)\n\n predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n predictions.cache()\n # here let's rescale predicted ratings to 0-10 scale\n maxPrediction = predictions.map(lambda x: x[2]).max()\n minPrediction = predictions.map(lambda x: x[2]).min()\n maxRate = RatingScale\n minRate = RatingScaleMin\n ##print 'test zhou 1......', predictions.count(), '............', predictions.take(10)\n\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate )).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n\n\n #predictedRating = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate ) )\n predictedRating = predictions.map(lambda x: ((x[0], x[1]), x[2] ) )\n predictedRating.cache()\n ##predictedRating.checkpoint()\n ##print 'test zhou 2.......', predictedRating.count(), '............', predictedRating.take(10)\n\n\n \n\n\n predictionsAndRatings = predictedRating.join(truth).values()\n #predictionsAndRatings = sc.union(predictedRating, truth)\n predictionsAndRatings.cache()\n #print 'test zhou 3........', predictionsAndRatings.count(), '............', predictionsAndRatings.take(10)\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n \n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))\n #return 1.0",
"def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):\n\n train_data = all_data.masked(train_mask)\n test_data = all_data.masked(test_mask)\n\n if instances is not None:\n ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]\n train_data = train_data.filter(*ids)\n\n if independent:\n train_data = train_data.collect_independent(mixture).only_nonempty()\n else:\n train_data = train_data.collect_systematic(mixture).only_nonempty()\n\n budget = test_data.common_budget\n #budget = test_data.common_budget / 2 # XXX\n suite = borg.fake.FakeSuite(test_data)\n\n if maker.subname == \"preplanning-dir\":\n model_kwargs = {\"K\": 64}\n\n if \"set_alpha\" in maker.variants:\n model_kwargs[\"alpha\"] = 1e-2\n else:\n model_kwargs = {}\n\n solver = maker(suite, train_data, model_kwargs = model_kwargs)\n successes = []\n\n for (i, instance_id) in enumerate(test_data.run_lists):\n logger.info(\"simulating run %i/%i on %s\", i, len(test_data), instance_id)\n\n with suite.domain.task_from_path(instance_id) as instance:\n with borg.accounting() as accountant:\n answer = solver.start(instance).run_then_stop(budget)\n\n succeeded = suite.domain.is_final(instance, answer)\n\n logger.info(\n \"%s %s on %s (%.2f CPU s)\",\n maker.name,\n \"succeeded\" if succeeded else \"failed\",\n os.path.basename(instance),\n accountant.total.cpu_seconds,\n )\n\n if succeeded:\n successes.append(accountant.total.cpu_seconds)\n\n logger.info(\n \"%s had %i successes over %i instances\",\n maker.name,\n len(successes),\n len(test_data),\n )\n\n description = \"{0} ({1})\".format(mixture, \"Sep.\" if independent else \"Sys.\")\n\n return (\n description,\n maker.name,\n instances,\n len(successes),\n numpy.mean(successes),\n numpy.median(successes),\n )",
"def testingData(self):\n\n #import SampleData\n #sampleDataLogic = SampleData.SampleDataLogic()\n #mrHead = sampleDataLogic.downloadMRHead()\n #dtiBrain = sampleDataLogic.downloadDTIBrain()\n \n # w = slicer.modules.SteeredFluidRegistrationWidget\n # w.fixedSelector.setCurrentNode(mrHead)\n # w.movingSelector.setCurrentNode(dtiBrain)\n \n if not slicer.util.getNodes('testbrain1*'):\n import os\n fileName = \"C:\\\\Work\\\\testbrain1.nrrd\"\n vl = slicer.modules.volumes.logic()\n brain1Node = vl.AddArchetypeVolume(fileName, \"testbrain1\", 0)\n else:\n nodes = slicer.util.getNodes('testbrain1.nrrd')\n brain1Node = nodes[0]\n\n if not slicer.util.getNodes('testbrain2*'):\n import os\n fileName = \"C:\\\\Work\\\\testbrain2.nrrd\"\n vl = slicer.modules.volumes.logic()\n brain2Node = vl.AddArchetypeVolume(fileName, \"testbrain2\", 0)\n #TODO else assign from list\n\n # if not slicer.util.getNodes('movingToFixed*'):\n # # Create transform node\n # transform = slicer.vtkMRMLLinearTransformNode()\n # transform.SetName('movingToFixed')\n # slicer.mrmlScene.AddNode(transform)\n\n # transform = slicer.util.getNode('movingToFixed')\n \n # ###\n # # neutral.SetAndObserveTransformNodeID(transform.GetID())\n # ###\n \n compositeNodes = slicer.util.getNodes('vtkMRMLSliceCompositeNode*')\n for compositeNode in compositeNodes.values():\n compositeNode.SetBackgroundVolumeID(brain1Node.GetID())\n compositeNode.SetForegroundVolumeID(brain2Node.GetID())\n compositeNode.SetForegroundOpacity(0.5)\n applicationLogic = slicer.app.applicationLogic()\n applicationLogic.FitSliceToAll()",
"def main():\n args = parse_argument()\n train_file = args['train'][0]\n test_file = args['test'][0]\n print train_file, test_file\n \n user_ratings_train, movie_ratings_train=parse_file(train_file)\n ave_ratings=compute_average_user_ratings(user_ratings_train)\n user_train=list(user_ratings_train.keys())\n with open(test_file,'r') as test:\n with open('predictions.txt', 'w') as pred:\n writer = csv.writer(pred)\n prediction=list()\n actual=list()\n for row in csv.reader(test):\n num_sum=0.0\n sim_sum=0.0\n user=int(row[1])\n movie=int(row[0])\n other_users_ratings=movie_ratings_train[movie]\n other_users=other_users_ratings.keys()\n for i in range(len(other_users)): \n other_user=other_users[i]\n similar=compute_user_similarity(user_ratings_train[user],user_ratings_train[other_user],ave_ratings[user],ave_ratings[other_user])\n num_sum=num_sum+similar*(float(movie_ratings_train[movie][other_user])-float(ave_ratings[other_user]))\n sim_sum = sim_sum+abs(similar)\n #No similar users\n try:\n pred_rating=ave_ratings[user]+num_sum/sim_sum\n except ZeroDivisionError:\n pred_rating=ave_ratings[user]\n prediction.append(pred_rating)\n actual.append(row[2])\n writer.writerow(row+[pred_rating])\n actual_np=np.array(map(float, actual))\n prediction_np=np.array(prediction)\n rmse=np.sqrt(((prediction_np - actual_np)** 2).mean())\n mae=np.absolute(prediction_np - actual_np).mean()\n print \"RMSE\",round(rmse,4)\n print \"MAE\",round(mae,4)"
] | [
"0.58857113",
"0.58756906",
"0.55608004",
"0.5548108",
"0.54858565",
"0.5459243",
"0.54199636",
"0.5406073",
"0.5349795",
"0.5342734",
"0.5308023",
"0.5295146",
"0.5277245",
"0.5275046",
"0.52688444",
"0.52575064",
"0.5236231",
"0.52356595",
"0.5231033",
"0.52271736",
"0.5221161",
"0.5217811",
"0.5215468",
"0.5207171",
"0.5192097",
"0.51890993",
"0.51806074",
"0.5176913",
"0.5166307",
"0.5153947"
] | 0.64828056 | 0 |
Computes the 2sided pvalue for a tstatistic with the specified d.o.f. | def _two_sided_p_value(t, df):
return 2 * scipy.stats.t.cdf(-np.abs(t), df=df) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _one_sided_p_value(t, df):\n return scipy.stats.t.sf(t, df=df)",
"def _p_value(self):\n p_value = chi2.sf(self.test_statistic, 2)\n\n return p_value",
"def _tstat_generic(value1, value2, std_diff, dof, alternative, diff=0):\n\n tstat = (value1 - value2 - diff) / std_diff\n if alternative in [\"two-sided\", \"2-sided\", \"2s\"]:\n pvalue = stats.t.sf(np.abs(tstat), dof) * 2\n elif alternative in [\"larger\", \"l\"]:\n pvalue = stats.t.sf(tstat, dof)\n elif alternative in [\"smaller\", \"s\"]:\n pvalue = stats.t.cdf(tstat, dof)\n else:\n raise ValueError(\"invalid alternative\")\n return tstat, pvalue",
"def pvalue(data, control_label=None, *args, **kwargs):\n def fn(control, test):\n if _is_proportion(control, test):\n return ztest(control, test, alternative='two-sided')[1]\n else:\n return ttest_ind(control, test, alternative='two-sided')[1]\n\n return _apply(data, fn, control_label)",
"def _p_value(self):\n pval = chi2.sf(self.chi_square, self.degrees_of_freedom)\n\n return pval",
"def compute_pvalue(self):\n # Run permutation test\n self.PermutationTest()\n # TS obtained from the original B,T samples\n self.compute_obs_TS()\n \n # Mean and std of the TS distribution\n self.mu = np.mean(self.TS_tilde)\n self.sigma = np.std(self.TS_tilde)\n \n # Standardized test statistic (zero mean, unit variance)\n self.TS_prime = (self.TS_tilde - self.mu)/self.sigma\n self.TS_prime_obs = (self.TS_obs - self.mu)/self.sigma\n \n # Two-sided p-value from TS' distribution\n self.p_value = 2*(1 - 0.01 * stats.percentileofscore(self.TS_prime,\n abs(self.TS_prime_obs)) )\n \n # if 0, compute it from standard normal\n if self.p_value == 0.0:\n self.p_value = self.pvalue_gaussian()\n \n print(\"\")\n print(\"p-value = {:e}\".format(self.p_value))",
"def p_value(set1, set2):\n\ts, p = stats.ttest_ind(set1, set2)\n\treturn p",
"def get_pvalue(self, independent, dependent, second_indep=None):\n\n try:\n if second_indep is None:\n if isinstance(independent, str) and isinstance(dependent, str):\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n elif isinstance(independent, pd.DataFrame) and isinstance(dependent, pd.DataFrame):\n x = independent\n y = dependent \n else:\n x = self.df_input[[independent, second_indep]]\n y = self.df_input[[dependent]]\n\n x = sm.add_constant(x) \n model = sm.OLS(y, x).fit()\n pvalue = model.pvalues\n return pvalue\n except Exception as e:\n print(e)",
"def f_test_var(data1,data2):\n var1, var2 = np.var(data1,ddof = 1),np.var(data2,ddof = 1)\t# compute variance\n df1, df2, = len(data1) - 1, len(data2) - 1\t\t# compute degrees of freedom\n if var1 > var2:\n\tprob = 2. * f.cdf(var1/var2,df1,df2)\n else:\n\tprob = 2. * f.cdf(var2/var1,df2,df1)\n if prob > 1.:\n\treturn 2. - prob\n else:\n\treturn prob",
"def p_value(beta_hat_j, sigma_hat_j):\n if beta_hat_j > 0:\n return 2 - (1 * norm.cdf(beta_hat_j / sigma_hat_j))\n else:\n return 2 * norm.cdf(beta_hat_j / sigma_hat_j)",
"def p_obs(obs, mean, sd, two_tailed=True):\n x = (obs - mean) / sd\n if two_tailed:\n return 2 * (1 - phi(abs(x)))\n return 1 - phi(abs(x))",
"def d2(self):\r\n return self.d1() - self.sigma*self.t**0.5",
"def _compute_f(self, p, dh, dv):\n return dh / (self.beta * p * dv)",
"def fD(self, vpd):\n\t if vpd < 0.1:\n\t return 1.\n\t else:\n\t return 3/13./sqrt(vpd/1000.)",
"def _calc_pval(self):\n t = self.beta / self.stderr_beta\n return (2. * (1. - stats.t.cdf(np.abs(t), self.n - 2)))[0]",
"def hypothesis_test_two_means_testvalue(datae,dataf,test_value,alpha):\n \n # Dataset E\n data_e = 1.0*np.array(datae)\n n_e = data_e.shape[0]*data_e.shape[1]\n mean_e = np.array(data_e).mean()\n var_e = np.array(data_e).var(ddof=1)\n df_e = n_e-1\n \n # Dataset F\n data_f = 1.0*np.array(dataf)\n n_f = dataf.shape[0]*dataf.shape[1]\n mean_f = np.array(data_f).mean()\n var_f = np.array(data_f).var(ddof=1)\n df_f = n_f-1\n \n # Sp,t and pvalue\n Sp = np.sqrt((((df_e*var_e) + (df_f*var_f))/(df_e+df_f)))\n t = ((mean_e-mean_f)-test_value)/(Sp*np.sqrt(1/n_e+1/n_f))\n pvalue = 1-scs.t.cdf(t,df_e+df_f,)\n \n # Decision\n if pvalue > alpha:\n decision = 'Fail to Reject H0'\n return t,pvalue,decision\n else:\n decision = 'Reject H0'\n return t,pvalue,decision",
"def pofd(self):\n return self.table[0, 1] / (self.table[0, 1] + self.table[1, 1])",
"def ttest_ind(self, alternative=\"two-sided\", usevar=\"pooled\", value=0):\n d1 = self.d1\n d2 = self.d2\n\n if usevar == \"pooled\":\n stdm = self.std_meandiff_pooledvar\n dof = d1.nobs - 1 + d2.nobs - 1\n elif usevar == \"unequal\":\n stdm = self.std_meandiff_separatevar\n dof = self.dof_satt()\n else:\n raise ValueError('usevar can only be \"pooled\" or \"unequal\"')\n\n tstat, pval = _tstat_generic(\n d1.mean, d2.mean, stdm, dof, alternative, diff=value\n )\n\n return tstat, pval, dof",
"def getZScorePvalue(zscore=None, twoSided=False):\n\timport scipy.stats as stats\n\tpvalue = stats.norm.sf(zscore)\n\tif twoSided:\n\t\tpvalue = pvalue* 2\n\treturn pvalue",
"def _pvalues_all(self):\n return 2.0 * (1.0 - scs.t.cdf(np.abs(self._tstat_all), self._df_err))",
"def d2(self):\n d1 = self.d1()\n return d1 - self.sigma * (self.t **(0.5))",
"def value(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][0]",
"def std_meandiff_pooledvar(self):\n # this uses ``_var`` to use ddof=0 for formula\n\n d1 = self.d1\n d2 = self.d2\n # could make var_pooled into attribute\n var_pooled = (\n (d1.sumsquares + d2.sumsquares)\n /\n # (d1.nobs - d1.ddof + d2.nobs - d2.ddof))\n (d1.nobs - 1 + d2.nobs - 1)\n )\n return np.sqrt(var_pooled * (1.0 / d1.nobs + 1.0 / d2.nobs))",
"def test_repeated_two_tailed(self):\n rng = np.random.default_rng(6464584234)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)",
"def p2(self) -> float:\n return self.distortion_coefficients[4]",
"def _summarize_t(noncentrality, df, alpha=0.05):\n\n x = np.arange(-7.5, 7.6, 0.1)\n y1 = scipy.stats.t.pdf(x, loc=0, scale=1, df=df)\n y2 = scipy.stats.t.pdf(x, loc=noncentrality, scale=1, df=df)\n\n crit = scipy.stats.t.ppf(1 - alpha/2, df=df)\n\n return x, y1, y2, crit",
"def f_test(chi1,df1,chi2,df2,red_chi = True):\n\n# if chi1/df1 > chi2/df2:\n#\tprob = 2. * f.cdf(chi1/df1, chi2/df2, df1, df2)\n# else:\n#\tprob = 2. * f.cdf(chi2/df2, chi1/df1, df2, df1)\n if red_chi:\n\tfval = (chi1/df1) / (chi2/df2)\n else:\n\tfval = chi1 / chi2\n prob = 2. * f.cdf((chi1/df1) / (chi2/df2), df1, df2)\n if prob > 1.: \n\treturn 2. - prob\n else:\n\treturn prob",
"def es(d1, d2, verbose=False):\n\n d1 = assure_numpy_array(d1)\n d2 = assure_numpy_array(d2)\n\n es, pvalue = stats.epps_singleton_2samp(d1, d2)\n\n return es, pvalue",
"def test_stat(df,ivar,tvar,equal_var=True,ddof=0):\n ivar_uniques = df[ivar].unique().shape[0]\n tvar_uniques = df[tvar].unique().shape[0]\n if tvar_uniques < 2:\n print \"Only one sample can be generated\"\n return None\n if ivar_uniques <= 10: #This the case of a categorical independant variable. We use chisquare\n ss = pd.crosstab(df[ivar],df[tvar])\n ss = (ss.T/ss.sum(axis=1)).T\n s0,s1 = ss[0].values,ss[1].values\n\n return chisquare(s1,s0,ddof=ddof)\n\n if ivar_uniques >10: #Consider using ttest\n s0 = df[ivar][df[tvar] == 0]\n s1 = df[ivar][df[tvar] == 1]\n return ttest_ind(s1,s0,equal_var=equal_var)",
"def d2P_dt2(P, t=0):\n return np.array([[ -self.d + self.s - (2 * P[0]) * self.s , self.r ],\n [ self.d, -self.r ] ])"
] | [
"0.68148166",
"0.635963",
"0.59294915",
"0.59291756",
"0.5869865",
"0.5867593",
"0.58343273",
"0.58018786",
"0.5522177",
"0.54848075",
"0.5471973",
"0.5449782",
"0.54324764",
"0.54098445",
"0.54015267",
"0.5346402",
"0.53168315",
"0.531237",
"0.53113824",
"0.5309784",
"0.5298294",
"0.5279564",
"0.52611125",
"0.5256313",
"0.5224725",
"0.5212022",
"0.52021986",
"0.5198574",
"0.5195707",
"0.5183506"
] | 0.7478272 | 0 |
Computes the 1sided pvalue for a tstatistic with the specified d.o.f. | def _one_sided_p_value(t, df):
return scipy.stats.t.sf(t, df=df) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _two_sided_p_value(t, df):\n return 2 * scipy.stats.t.cdf(-np.abs(t), df=df)",
"def fD(self, vpd):\n\t if vpd < 0.1:\n\t return 1.\n\t else:\n\t return 3/13./sqrt(vpd/1000.)",
"def pofd(self):\n return self.table[0, 1] / (self.table[0, 1] + self.table[1, 1])",
"def _p_value(self):\n p_value = chi2.sf(self.test_statistic, 2)\n\n return p_value",
"def _pvalues_all(self):\n return 2.0 * (1.0 - scs.t.cdf(np.abs(self._tstat_all), self._df_err))",
"def p_value(set1, set2):\n\ts, p = stats.ttest_ind(set1, set2)\n\treturn p",
"def _get_p_value_for_t_value_from_dist(t_value: Tensor) ->Tensor:\n device = t_value\n normal_dist = torch.distributions.normal.Normal(torch.tensor([0.0]), torch.tensor([1.0]))\n is_nan = t_value.isnan()\n t_value = t_value.nan_to_num()\n p_value = normal_dist.cdf(t_value)\n return p_value.where(~is_nan, torch.tensor(float('nan'), dtype=p_value.dtype, device=p_value.device))",
"def pvalue(data, control_label=None, *args, **kwargs):\n def fn(control, test):\n if _is_proportion(control, test):\n return ztest(control, test, alternative='two-sided')[1]\n else:\n return ttest_ind(control, test, alternative='two-sided')[1]\n\n return _apply(data, fn, control_label)",
"def value(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][0]",
"def ppf(self,x):\n ppfValue = self.invCDF(x)\n return ppfValue",
"def d1(self):\n f = (self.rf + (self.sigma ** (2)) / 2 ) * self.t\n return (1/(self.sigma * (self.t ** (0.5)))) *(math.log(self.s/self.x) + f)",
"def d1(self):\r\n numerator = math.log(self.s/self.x) + (self.rf-self.div+self.sigma**2*0.5)*self.t # Numerator of d1\r\n denominator = self.sigma * self.t**0.5 # Denominator of d1\r\n \r\n return numerator/denominator",
"def evaluate_one(self, x):\n # p = 1. / (np.sqrt(2. * np.pi) * self.sigma) * \\\n # np.exp(-0.5 * (self.mean - x) * self.invvar * (self.mean - x))\n p = self.dist.probability(x)\n return p",
"def compute_pvalue(self):\n # Run permutation test\n self.PermutationTest()\n # TS obtained from the original B,T samples\n self.compute_obs_TS()\n \n # Mean and std of the TS distribution\n self.mu = np.mean(self.TS_tilde)\n self.sigma = np.std(self.TS_tilde)\n \n # Standardized test statistic (zero mean, unit variance)\n self.TS_prime = (self.TS_tilde - self.mu)/self.sigma\n self.TS_prime_obs = (self.TS_obs - self.mu)/self.sigma\n \n # Two-sided p-value from TS' distribution\n self.p_value = 2*(1 - 0.01 * stats.percentileofscore(self.TS_prime,\n abs(self.TS_prime_obs)) )\n \n # if 0, compute it from standard normal\n if self.p_value == 0.0:\n self.p_value = self.pvalue_gaussian()\n \n print(\"\")\n print(\"p-value = {:e}\".format(self.p_value))",
"def pc1(d):\n\timport numpy as np\n\tfrom sklearn.decomposition import TruncatedSVD\n\t# Normalize data\n\tt1 = d.T\n\tt1 = t1 - t1.mean(axis=0)\n\tt1 = t1 / (np.sqrt((t1**2).mean(axis=0)) + 1E-200)\n\tt0 = TruncatedSVD(n_components=1)\n\tt1 = t0.fit_transform(t1).T.astype(d.dtype, copy=False).ravel()\n\tassert t1.shape == (d.shape[1], )\n\treturn t1",
"def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)",
"def ppf(self,x):\n if x > 1.0 or x < 0:\n self.raiseAnError(IOError,'Categorical distribution cannot calculate ppf for', str(x), '! Valid value should within [0,1]!')\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == 1.0:\n return float(sortedMapping[-1][0]) if self.isFloat else sortedMapping[-1][0]\n else:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if cumulative >= x:\n return float(element[0]) if self.isFloat else element[0]",
"def p_value(beta_hat_j, sigma_hat_j):\n if beta_hat_j > 0:\n return 2 - (1 * norm.cdf(beta_hat_j / sigma_hat_j))\n else:\n return 2 * norm.cdf(beta_hat_j / sigma_hat_j)",
"def is_one(f):\n return dmp_one_p(f.rep, f.lev, f.dom)",
"def _compute_f(self, p, dh, dv):\n return dh / (self.beta * p * dv)",
"def _tstat_generic(value1, value2, std_diff, dof, alternative, diff=0):\n\n tstat = (value1 - value2 - diff) / std_diff\n if alternative in [\"two-sided\", \"2-sided\", \"2s\"]:\n pvalue = stats.t.sf(np.abs(tstat), dof) * 2\n elif alternative in [\"larger\", \"l\"]:\n pvalue = stats.t.sf(tstat, dof)\n elif alternative in [\"smaller\", \"s\"]:\n pvalue = stats.t.cdf(tstat, dof)\n else:\n raise ValueError(\"invalid alternative\")\n return tstat, pvalue",
"def _correct_p(self, f0, f1):\n return self.p * np.exp(self.dbeta * (f0 + f1) / 2)",
"def _p_value(self):\n pval = chi2.sf(self.chi_square, self.degrees_of_freedom)\n\n return pval",
"def secant1d(f, df, x0, x1, niter=10):\n for i in xrange(niter):\n x_new = x1 - df(x1)*(x1 - x0)/(df(x1)-df(x0))\n x0 = x1\n x1 = x_new\n return x_new",
"def ppf(self,x):\n return self.categoricalDist.ppf(x)",
"def pvalue_gaussian(self):\n \n pv = 2 * stats.norm.sf(abs(self.TS_prime_obs), loc=0, scale=1)\n return(pv)",
"def ppf(self, q):\n self.value = self.rv.ppf(\n q, *self._pymc_dists_to_value(self.args), **self.kwds\n )\n return self.value",
"def f_P_T(u, P_0, r_f, d, s, T):\n sigma = s * math.sqrt(T)\n sigma_2 = math.pow(sigma, 2)\n mu = math.log(P_0) + (r_f - d) * T - sigma_2 / 2\n P_T = math.exp(mu + u * sigma)\n return P_T",
"def _summarize_t(noncentrality, df, alpha=0.05):\n\n x = np.arange(-7.5, 7.6, 0.1)\n y1 = scipy.stats.t.pdf(x, loc=0, scale=1, df=df)\n y2 = scipy.stats.t.pdf(x, loc=noncentrality, scale=1, df=df)\n\n crit = scipy.stats.t.ppf(1 - alpha/2, df=df)\n\n return x, y1, y2, crit",
"def t_test_(x):\n assert np.ndim(x) == 1 and (not np.any(np.isnan(x)))\n\n if (len(x) <= 1) or (not np.all(np.isfinite(x))):\n return 1.0 # Can't say anything about scale => p=1\n\n _, pval = sst.ttest_1samp(x, 0.0)\n if np.isnan(pval):\n # Should only be possible if scale underflowed to zero:\n assert np.var(x, ddof=1) <= 1e-100\n # It is debatable if the condition should be ``np.mean(x) == 0.0`` or\n # ``np.all(x == 0.0)``. Should not matter in practice.\n pval = np.float(np.mean(x) == 0.0)\n assert 0.0 <= pval and pval <= 1.0\n return pval"
] | [
"0.66497636",
"0.6062942",
"0.59336257",
"0.58911395",
"0.58727777",
"0.5768678",
"0.5765754",
"0.57613516",
"0.5737039",
"0.57043195",
"0.56871516",
"0.56455255",
"0.5643148",
"0.5641853",
"0.56283367",
"0.55456716",
"0.55343145",
"0.5526232",
"0.54990625",
"0.54945636",
"0.5490515",
"0.54829764",
"0.54561394",
"0.53979987",
"0.53865534",
"0.5374897",
"0.5359822",
"0.5353227",
"0.53426814",
"0.5334705"
] | 0.7691578 | 0 |
Wraps index_fom_fn to acccept (disease, score) vectors. | def fom_fn(indices, reader_idx_vector):
idx_values = set(reader_idx_vector)
assert len(idx_values) == 1
idx = idx_values.pop()
assert idx in ([-1] + list(reader_indices)), idx
unrecognized = set(indices) - set(example_indices)
assert unrecognized == set(), unrecognized
return index_fom_fn(indices, idx) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def model_vs_readers_orh_index(example_indices,\n reader_indices,\n index_fom_fn,\n coverage=0.95,\n margin=0):\n\n def fom_fn(indices, reader_idx_vector):\n \"\"\"Wraps index_fom_fn to acccept (disease, score) vectors.\"\"\"\n idx_values = set(reader_idx_vector)\n assert len(idx_values) == 1\n idx = idx_values.pop()\n assert idx in ([-1] + list(reader_indices)), idx\n unrecognized = set(indices) - set(example_indices)\n assert unrecognized == set(), unrecognized\n\n return index_fom_fn(indices, idx)\n\n assert -1 not in reader_indices\n disease = np.array(example_indices)\n model_score = -1 * np.ones_like(disease)\n reader_scores = np.column_stack(\n [idx * np.ones_like(disease) for idx in reader_indices])\n\n return model_vs_readers_orh(\n disease,\n model_score,\n reader_scores,\n fom_fn,\n coverage=coverage,\n margin=margin)",
"def _foi_average(conn, foi_idx):\n # get the number of foi\n n_foi = foi_idx.shape[0]\n\n # get input shape and replace n_freqs with the number of foi\n sh = list(conn.shape)\n sh[-2] = n_foi\n\n # compute average\n conn_f = np.zeros(sh, dtype=conn.dtype)\n for n_f, (f_s, f_e) in enumerate(foi_idx):\n conn_f[..., n_f, :] = conn[..., f_s:f_e, :].mean(-2)\n return conn_f",
"def create_index_tfidf(lines, numDocuments):\n \n index=collections.defaultdict(list)\n tf=collections.defaultdict(list) #term frequencies of terms in documents (documents in the same order as in the main index)\n df=collections.defaultdict(int) #document frequencies of terms in the corpus\n idf=collections.defaultdict(float)\n with Bar('Creating tf-idf index', max=len(lines)) as bar:\n for key in lines:\n page_id = key \n terms = getTerms(lines[key]) \n\n ## create the index for the **current page** and store it in termdictPage\n ## termdictPage in form ==> { ‘term1’: [currentdoc, [list of positions]], ...,‘termn’: [currentdoc, [list of positions]]}\n\n termdictPage={}\n\n for position, term in enumerate(terms): \n try:\n # if the term is already in the dict append the position to the corrisponding list\n termdictPage[term][1].append(position) \n except:\n # Add the new term as dict key and initialize the array of positions and add the position\n termdictPage[term]=[page_id, array('I',[position])] \n\n #normalize term frequencies\n norm=0\n for term, posting in termdictPage.items(): \n # posting ==> [currentdoc, [list of positions]] \n norm+=len(posting[1])**2\n norm=math.sqrt(norm)\n\n\n #calculate the tf(dividing the term frequency by the above computed norm) and df weights\n for term, posting in termdictPage.items(): \n # append the tf for current term (tf = term frequency in current doc/norm)\n tf[term].append(np.round(len(posting[1])/norm,4)) ## SEE formula (1) above\n #increment the document frequency of current term (number of documents containing the current term)\n df[term] += 1 \n\n #merge the current page index with the main index\n for termpage, postingpage in termdictPage.items():\n index[termpage].append(postingpage)\n\n # Compute idf following the formula (3) above. HINT: use np.log\n bar.next()\n for term in df:\n idf[term] = np.round(np.log(float(numDocuments/df[term])),4)\n \n return (index, tf, df, idf)",
"def featureize(F, observation_ids, all_tokens_dict, binary=False):\n (mrc_words_index,) = F\n\n n = len(mrc_words_index)\n m = len(observation_ids)\n\n # Observations\n X = np.zeros((m,n), dtype=np.float)\n\n for (i,ob_id) in enumerate(observation_ids, start=0):\n\n N = len(all_tokens_dict[ob_id])\n\n for token in all_tokens_dict[ob_id]:\n\n if token in mrc_words_index:\n\n if binary:\n X[i][mrc_words_index[token]] = 1\n else: \n X[i][mrc_words_index[token]] += 1.0\n\n if not binary:\n # Normalize by the number of tokens in each observation\n for j in range(0, N):\n X[i][j] /= float(N)\n\n return X",
"def compute_word_idf_(wx_series, wx2_idxs, idx2_aid, daids):\n if utool.VERBOSE:\n mark, end_ = utool.log_progress('[smk_index] Word IDFs: ',\n len(wx_series), flushfreq=500,\n writefreq=50, with_totaltime=True)\n mark(0)\n wx_series_values = pdh.ensure_values(wx_series)\n idx2_aid_values = pdh.ensure_values(idx2_aid)\n wx2_idxs_values = pdh.ensure_values_subset(wx2_idxs, wx_series_values)\n #with utool.Timer('method 1'): # 0.16s\n idxs_list = [pdh.ensure_values(idxs).astype(INDEX_TYPE) for idxs in wx2_idxs_values] # 11%\n aids_list = [idx2_aid_values.take(idxs) if len(idxs) > 0 else [] for idxs in idxs_list]\n nTotalDocs = len(daids)\n nDocsWithWord_list = [len(set(aids)) for aids in aids_list] # 68%\n # compute idf half of tf-idf weighting\n idf_list = [np.log(nTotalDocs / nDocsWithWord).astype(FLOAT_TYPE)\n if nDocsWithWord > 0 else 0.0\n for nDocsWithWord in nDocsWithWord_list] # 17.8 ms # 13%\n if utool.VERBOSE:\n end_()\n if WITH_PANDAS:\n wx2_idf = pdh.IntSeries(idf_list, index=wx_series, name='idf')\n else:\n wx2_idf = dict(zip(wx_series_values, idf_list))\n return wx2_idf",
"def tfidf_occurrence_matrix( O ) :\n # number of words in each document\n words_in_doc = O.sum(1)\n docs_containing_word = sum( asarray( O > 0, 'i' ), axis=0 )\n logpart = log(float(O.shape[0]) / docs_containing_word )\n\n result = (O.astype(float32) / words_in_doc[:,newaxis] ) * logpart\n \n return result",
"def lookup_idf(self) -> list:\n self.__process()\n prox_by_doc = {}\n\n for token in self._query_vector:\n for token_info in self._index.get_token_search(token):\n doc = token_info.doc\n if doc not in prox_by_doc:\n prox_by_doc[doc] = 0\n prox_by_doc[doc] += self._query_vector[token] * token_info.weight\n\n return sorted(prox_by_doc.items(), key=lambda t: t[1], reverse=True)",
"def new_inv_ind(doc_inds, documents, inv_ind_func):\n temp = dict()\n\n # msgs here is the item dict \n for item in documents:\n # print(item)\n temp[item['id']] = item\n\n new_docs = np.array([])\n for i in doc_inds:\n new_docs = np.append(new_docs, temp[i])\n\n new_inv_ind = inv_ind_func(new_docs)\n return new_inv_ind",
"def gen_ind_params(self, x, hist_cr, hist_f):\n mi = self.integers(self.hist_mem_size) # a random pair of f cr is selected form historical memory\n m_cr = hist_cr[mi]\n m_f = hist_f[mi]\n cr = self.normal(m_cr, 0.1) if m_cr != -1 else 0\n # cr is randomised from normal distribution and then repaired if needed\n cr = np.clip(cr, 0, 1)\n f = self.cauchy(m_f, 0.1)\n # f is randomised from cauchy distribution until the value is >0 and then repaired if needed\n f = np.clip(f, 0, 1)\n return self.individual_type(x=x.x, differential_weight=f, crossover_probability=cr, e=False)",
"def calc_idf(self, nd):\n # collect idf sum to calculate an average idf for epsilon value\n idf_sum = 0\n # collect words with negative idf to set them a special epsilon value.\n # idf can be negative if word is contained in more than half of documents\n negative_idfs = []\n for word, freq in nd.items():\n idf = math.log(self.corpus_size - freq + 0.5) - math.log(freq + 0.5)\n self.idf[word] = idf\n idf_sum += idf\n if idf < 0:\n negative_idfs.append(word)\n self.average_idf = idf_sum / len(self.idf)\n\n eps = self.epsilon * self.average_idf\n for word in negative_idfs:\n self.idf[word] = eps",
"def _get_document_representation(self, doc_id, index):\n vec = np.zeros(shape=(index.num_terms,), dtype=np.float64)\n for i, term in enumerate(sorted(index.get_index_terms())):\n vec[i] = self._tfidf(\n index.get_term_frequency(term, doc_id),\n index.get_document_frequency(term),\n index.get_document_count()\n )\n return vec",
"def count_idf(self):\n idf = dict.fromkeys(range(self.instances.shape[1]), 0) # initialize for all features\n num_docs = self.instances.shape[0]\n feature_counts = self.count_document_frequency()\n for feature in feature_counts.keys():\n idf[feature] = math.log((num_docs / feature_counts[feature]), 10) if feature_counts[feature] > 0 else 0\n return idf",
"def wrapped_f(*args):\n input_docs = func(*args)\n output_doc_cnt = 0\n # split input_docs into chunks of size self.batch_size\n for batchiter in iter_n(input_docs, int(self.batch_size / len(self.input_types))):\n output_docs = self.key_lookup_batch(batchiter)\n for odoc in output_docs:\n # print debug information if the original id is the in the debug list\n if \"dt_debug\" in odoc:\n if isinstance(self.debug, list) and odoc[\"dt_debug\"][\"orig_id\"] in self.debug:\n self.logger.debug(\"DataTransform Debug doc['dt_debug']: {}\".format(odoc[\"dt_debug\"]))\n output_doc_cnt += 1\n yield odoc\n self.logger.info(\"wrapped_f Num. output_docs: {}\".format(output_doc_cnt))\n self.logger.info(\"DataTransform.histogram: {}\".format(self.histogram))",
"def _get_tokens_idf(self) ->Dict[int, float]:\n token_counter: Counter = Counter()\n for tokens in map(self._set_of_tokens, self.text['input_ids']):\n token_counter.update(tokens)\n tokens_idf: Dict[int, float] = defaultdict(self._get_tokens_idf_default_value)\n tokens_idf.update({idx: math.log((self.num_sentences + 1) / (occurrence + 1)) for idx, occurrence in token_counter.items()})\n return tokens_idf",
"def cosine_scoring(query, doc_lengths, index):\n idf_dict_vector = compute_idf_vector(len(doc_lengths), index)\n doc_scores = {}\n\n for q in query:\n if q in idf_dict_vector:\n wt_q = idf_dict_vector[q] * query[q]\n else:\n wt_q = 0\n\n for tup in index[q][1:]:\n wf_q = idf_dict_vector[q] * tup[1]\n if tup[0] in doc_scores:\n doc_scores[tup[0]] += wt_q * wf_q\n else:\n doc_scores[tup[0]] = wt_q * wf_q\n\n for doc in doc_scores:\n doc_scores[doc] = doc_scores[doc] / doc_lengths[doc]\n\n return doc_scores",
"def embed(raw_seq, index_dict):\n return np.asarray([index_dict[word.lower()]\n if word.lower() in index_dict\n else index_dict[OOV_TOKEN] for word in raw_seq])",
"def create_load_tfidf_ann_index(ann_index_path: str, tfidf_vectorizer_path: str,\n umls_concept_list: List) -> Tuple[List[int], TfidfVectorizer, FloatIndex]:\n uml_concept_ids = []\n uml_concept_aliases = []\n print('Collecting aliases ... ')\n for i, concept in enumerate(umls_concept_list):\n concept_id = concept['concept_id']\n concept_aliases = concept['aliases'] + [concept['canonical_name']]\n\n uml_concept_ids.extend([concept_id] * len(concept_aliases))\n uml_concept_aliases.extend(concept_aliases)\n\n if i % 1000000 == 0 and i > 0:\n print(f'Processed {i} or {len(umls_concept_list)} concepts')\n\n uml_concept_ids = np.array(uml_concept_ids)\n uml_concept_aliases = np.array(uml_concept_aliases)\n assert len(uml_concept_ids) == len(uml_concept_aliases)\n\n tfidf_vectors_path = f'{tfidf_vectorizer_path}.npy'\n if not os.path.isfile(tfidf_vectorizer_path):\n print(f'No tfidf vectorizer on {tfidf_vectorizer_path}')\n print(f'Fitting tfidf vectorizer on {len(uml_concept_aliases)} aliases')\n # tfidf_vectorizer = HashingVectorizer(analyzer='char_wb', ngram_range=(3, 3), n_features=2**9)\n tfidf_vectorizer = TfidfVectorizer(analyzer='char_wb', ngram_range=(3, 3), min_df=10, dtype=np.float32) # max_df=150000, max_features=10000)\n start_time = datetime.datetime.now()\n uml_concept_alias_tfidfs = tfidf_vectorizer.fit_transform(uml_concept_aliases)\n print(f'Saving tfidf vectorizer to {tfidf_vectorizer_path}')\n dump(tfidf_vectorizer, tfidf_vectorizer_path)\n print(f'Saving tfidf vectors to {tfidf_vectors_path}')\n np.save(tfidf_vectors_path, uml_concept_alias_tfidfs)\n end_time = datetime.datetime.now()\n total_time = (end_time - start_time)\n print(f'Fitting and saving vectorizer, and saving vectorized aliases took {total_time.total_seconds()} seconds')\n\n start_time = datetime.datetime.now()\n print(f'Loading tfidf vectorizer from {tfidf_vectorizer_path}')\n tfidf_vectorizer = load(tfidf_vectorizer_path)\n if isinstance(tfidf_vectorizer, TfidfVectorizer):\n print(f'Tfidf vocab size: {len(tfidf_vectorizer.vocabulary_)}')\n print(f'Loading tfidf vectors from {tfidf_vectors_path}')\n uml_concept_alias_tfidfs = np.load(tfidf_vectors_path).tolist()\n end_time = datetime.datetime.now()\n total_time = (end_time - start_time)\n print(f'Loading vectorizer and vectors took {total_time.total_seconds()} seconds')\n\n # find empty (all zeros) tfidf vectors\n empty_tfidfs_boolean_flags = np.array(uml_concept_alias_tfidfs.sum(axis=1) != 0).reshape(-1,)\n deleted_aliases = uml_concept_aliases[empty_tfidfs_boolean_flags == False]\n number_of_non_empty_tfidfs = len(deleted_aliases)\n total_number_of_tfidfs = uml_concept_alias_tfidfs.shape[0]\n print(f'Deleting {number_of_non_empty_tfidfs}/{total_number_of_tfidfs} aliases because their tfidf is empty')\n\n # remove empty tfidf vectors, otherwise nmslib will crashd\n uml_concept_ids = uml_concept_ids[empty_tfidfs_boolean_flags]\n uml_concept_aliases = uml_concept_aliases[empty_tfidfs_boolean_flags]\n uml_concept_alias_tfidfs = uml_concept_alias_tfidfs[empty_tfidfs_boolean_flags]\n print(deleted_aliases)\n assert len(uml_concept_ids) == len(uml_concept_aliases)\n assert len(uml_concept_ids) == uml_concept_alias_tfidfs.shape[0]\n\n # nmslib hyperparameters (very important)\n # guide: https://github.com/nmslib/nmslib/blob/master/python_bindings/parameters.md\n # default values resulted in very low recall\n M = 100 # set to the maximum recommended value. Improves recall at the expense of longer indexing time\n efC = 2000 # `C` for Construction. Set to the maximum recommended value\n # Improves recall at the expense of longer indexing time\n efS = 1000 # `S` for Search. This controls performance at query time. Maximum recommended value is 2000.\n # It makes the query slow without significant gain in recall.\n\n num_threads = 60 # set based on the machine\n\n index_params = {'M': M, 'indexThreadQty': num_threads, 'efConstruction': efC, 'post' : 0}\n\n if not os.path.isfile(ann_index_path):\n print(f'No ann index on {ann_index_path}')\n print(f'Fitting ann index on {len(uml_concept_aliases)} aliases (takes 2 hours)')\n\n start_time = datetime.datetime.now()\n ann_index = nmslib.init(method='hnsw', space='cosinesimil_sparse', data_type=nmslib.DataType.SPARSE_VECTOR)\n ann_index.addDataPointBatch(uml_concept_alias_tfidfs)\n ann_index.createIndex(index_params, print_progress=True)\n ann_index.saveIndex(ann_index_path)\n end_time = datetime.datetime.now()\n elapsed_time = end_time - start_time\n print(f'Fitting ann index took {elapsed_time.total_seconds()} seconds')\n\n print(f'Loading ann index from {ann_index_path}')\n ann_index = nmslib.init(method='hnsw', space='cosinesimil_sparse', data_type=nmslib.DataType.SPARSE_VECTOR)\n ann_index.addDataPointBatch(uml_concept_alias_tfidfs)\n ann_index.loadIndex(ann_index_path)\n query_time_params = {'efSearch': efS}\n ann_index.setQueryTimeParams(query_time_params)\n\n return uml_concept_ids, tfidf_vectorizer, ann_index",
"def index_object(idxs=None):",
"def term_idf(self, term):\n idf = math.log(2 + self.count_term_distinct_documents(ANY))\\\n - math.log(1 + self.count_term_distinct_documents(term))\n return idf",
"def calc_idf(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n idf = {}\r\n for term in terms:\r\n term_count = 0\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1\r\n if term in doc:\r\n term_count += 1\r\n idf[term] = doc_count/term_count\r\n return idf",
"def process_index(index, intensity, interaction_symbol):\n return tuple(index.split(interaction_symbol))",
"def tf_idf_score():\n\n global final_doc_set\n global final_dictionary\n final_score = []\n\n for doc_id in final_doc_set:\n score = 0\n for query_term in final_dictionary.keys():\n if final_dictionary[query_term][1].get(doc_id):\n tf = final_dictionary[query_term][1][doc_id][0]\n df = final_dictionary[query_term][0]\n\n score += ((1 + log10(tf)) * log10(TOTAL_DOCS / df))\n\n final_score.append([doc_id, score])\n\n return final_score",
"def calculate_tf_idf(self,doc_token_number,document_count):\n for term_ in self.inverted_index.keys():\n postingsList=self.inverted_index[term_]\n len_of_posting_list=postingsList.length\n idf=document_count/len_of_posting_list\n if postingsList.start_node is None:\n print(\"List has no element\")\n return\n else:\n n = postingsList.start_node\n # Start traversal from head, and go on till you reach None\n while n is not None:\n freq=n.term_frequency\n tf=freq/doc_token_number[n.value]\n tf_idf_value=tf*idf\n n.tf_idf=tf_idf_value\n n = n.next",
"def Xind(L, f):\n nu = 2*pi*f\n return 0+1j*nu*L",
"def faiss_index(vectors, ids=None):\n index = faiss.IndexFlatL2(vectors.shape[1])\n if ids:\n index = faiss.IndexIDMap(index)\n index.add_with_ids(vectors, np.array([i for i in ids]))\n else:\n index.add(vectors)\n\n return index",
"def process_index_with_weights(index, intensity, interaction_symbol):\n return tuple(index.split(interaction_symbol) + [intensity])",
"def create_feature_vector(ix, term_dict, bow):\n\n\ttfv = list()\n\t# get corpus length (n. of docs)\n\tnum_docs = ix.num_docs\n\tfor idx, tf in bow:\n\t\t# get term from dict index\n\t\tterm = ix[idx]\n\t\t# filter out terms not contained in self.term_dict\n\t\tif term not in term_dict:\n\t\t\tcontinue\n\t\t# filter out terms w/ length gt 20\n\t\tif len(term) > 20:\n\t\t\tcontinue\n\t\t# filter out non-alphabetical terms\n\t\tif not term.isalpha():\n\t\t\tcontinue\n\t\t# get document frequency \n\t\tdf = ix.dfs[idx]\n\t\t# compute ratio between df and num_docs\n\t\tratio = df / num_docs\n\t\tif ratio > 0.1: # skip term - requires tuning: check if it's okay to keep it as is\n\t\t\tcontinue\n\t\t# append term w/ tf to tfv\n\t\ttfv.append((term, tf))\n\treturn tfv",
"def convert_doc_count_to_idf(self, df_of_dc_to_make_into_idf):\n num_transcripts = df_of_dc_to_make_into_idf.loc[self.__str_cheeky_document_counter]\n # in our case, because of the way we are constructing the set of terms\n # there should never be a term that has a document frequency of zero.\n # however, in general, if querying a new phrase using existing data,\n # in theory a term could have a document frequency of zero, so the general\n # practice is to add 1 to the document frequency, so that in the next\n # set, division by zero does not happen.\n df_of_dc_to_make_into_idf = df_of_dc_to_make_into_idf + 1\n # then we find the IDF (inverse document frequency)\n df_of_dc_to_make_into_idf = num_transcripts / df_of_dc_to_make_into_idf\n # then we find the log of that\n df_of_dc_to_make_into_idf = log(df_of_dc_to_make_into_idf)\n return df_of_dc_to_make_into_idf",
"def Exp_OB(xref,theta):\n\n F = xref + theta\n nF = np.shape(F)\n\n for r in range(nF[1]):\n F[:,r] = F[:,r]/np.linalg.norm(F[:,r])\n\n return F",
"def index_data_annots(annots_df, daids, words, with_internals=True,\n aggregate=False, alpha=3, thresh=0):\n if utool.VERBOSE:\n print('[smk_index] index_data_annots')\n flann_params = {}\n _words = pdh.ensure_values(words)\n wordflann = nntool.flann_cache(_words, flann_params=flann_params,\n appname='smk')\n _daids = pdh.ensure_values(daids)\n _vecs_list = pdh.ensure_2d_values(annots_df['vecs'][_daids])\n _idx2_dvec, _idx2_daid, _idx2_dfx = nntool.invertable_stack(_vecs_list, _daids)\n\n # Pandasify\n if WITH_PANDAS:\n idx_series = pdh.IntIndex(np.arange(len(_idx2_daid)), name='idx')\n idx2_dfx = pdh.IntSeries(_idx2_dfx, index=idx_series, name='fx')\n idx2_daid = pdh.IntSeries(_idx2_daid, index=idx_series, name='aid')\n idx2_dvec = pd.DataFrame(_idx2_dvec, index=idx_series, columns=VEC_COLUMNS)\n else:\n idx2_dfx = _idx2_dfx\n idx2_daid = _idx2_daid\n idx2_dvec = _idx2_dvec\n pass\n\n invindex = InvertedIndex(words, wordflann, idx2_dvec, idx2_daid, idx2_dfx, daids)\n if with_internals:\n compute_data_internals_(invindex, aggregate, alpha, thresh) # 99%\n return invindex"
] | [
"0.59032357",
"0.54225093",
"0.52509314",
"0.5245321",
"0.5230781",
"0.5219626",
"0.5194253",
"0.51824915",
"0.5174531",
"0.51368845",
"0.5055017",
"0.50405216",
"0.50282276",
"0.502258",
"0.49983007",
"0.49819922",
"0.49674502",
"0.4957773",
"0.49574274",
"0.49560648",
"0.49383125",
"0.49345905",
"0.49275905",
"0.4926812",
"0.4923749",
"0.49112153",
"0.49053207",
"0.49032736",
"0.4903033",
"0.48878196"
] | 0.6301212 | 0 |
Performs the ORH procedure to compare a standalone model against readers. This function uses the ObuchowskiRocketteHillis analysis to compare the quality of a model's predictions with that of a panel of readers that all interpreted the same cases. I.e., the reader data occurs in a dense matrix of shape [num_cases, num_readers], and the model has been applied to these same cases. This tool can be used with an arbitrary 'figure of merit' (FOM) defined on the labels and the scores; scores can be binary, ordinal or continuous. It tests the null hypothesis that the average difference in the FOM between the readers and the model is 0. | def model_vs_readers_orh(disease,
model_score,
reader_scores,
fom_fn,
coverage=0.95,
margin=0):
if margin < 0:
raise ValueError('margin parameter should be nonnegative.')
num_cases, num_readers = reader_scores.shape
if len(disease) != num_cases or len(model_score) != num_cases:
raise ValueError(
'disease, model_score and reader_scores must have the same size '
'in the first dimension.')
model_fom = fom_fn(disease, model_score)
reader_foms = [fom_fn(disease, rad_scores) for rad_scores in reader_scores.T]
average_reader_fom = np.mean(reader_foms)
observed_effect_size = model_fom - average_reader_fom
covariances = _jackknife_covariance_model_vs_readers(disease, model_score,
reader_scores, fom_fn)
off_diagonals = []
for offset in range(1, num_readers):
off_diagonals.extend(np.diag(covariances, k=offset))
cov2 = np.mean(off_diagonals)
# msr = mean squared reader difference
msr = np.var(reader_foms - model_fom, ddof=1)
se = np.sqrt((msr + max(num_readers * cov2, 0)) / num_readers)
dof = (num_readers - 1) * ((msr + max(num_readers * cov2, 0)) / msr)**2
return _test_result(
effect=observed_effect_size,
margin=margin,
se=se,
dof=dof,
coverage=coverage,
effect_size_constituents=EffectSizeConstituents(
model_fom=model_fom, average_reader_fom=average_reader_fom)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def model_vs_readers_orh_index(example_indices,\n reader_indices,\n index_fom_fn,\n coverage=0.95,\n margin=0):\n\n def fom_fn(indices, reader_idx_vector):\n \"\"\"Wraps index_fom_fn to acccept (disease, score) vectors.\"\"\"\n idx_values = set(reader_idx_vector)\n assert len(idx_values) == 1\n idx = idx_values.pop()\n assert idx in ([-1] + list(reader_indices)), idx\n unrecognized = set(indices) - set(example_indices)\n assert unrecognized == set(), unrecognized\n\n return index_fom_fn(indices, idx)\n\n assert -1 not in reader_indices\n disease = np.array(example_indices)\n model_score = -1 * np.ones_like(disease)\n reader_scores = np.column_stack(\n [idx * np.ones_like(disease) for idx in reader_indices])\n\n return model_vs_readers_orh(\n disease,\n model_score,\n reader_scores,\n fom_fn,\n coverage=coverage,\n margin=margin)",
"def evaluation_od_train(x, y, data_name, model_name=\"iforest\", chosen_subspace=None):\n global chosen_model\n\n dim = x.shape[1]\n ano_idx = np.where(y == 1)[0]\n n_ano = len(ano_idx)\n\n # get all the possible feature subset or just use given subset list\n f_subsets = utils.get_subset_candidate(dim, chosen_subspace)\n\n # score anomalies in each subspace, generate the score matrix\n n_subsets = len(f_subsets)\n score_matrix = np.zeros([n_ano, n_subsets])\n for i in tqdm(range(n_subsets)):\n subset = f_subsets[i]\n x_subset = x[:, subset]\n\n\n if model_name == \"iforest\":\n clf = IForest()\n clf.fit(x_subset)\n od_score = clf.decision_scores_\n elif model_name == \"copod\":\n clf = COPOD()\n clf.fit(x_subset)\n od_score = clf.decision_scores_\n elif model_name == \"hbos\":\n clf = HBOS()\n clf.fit(x_subset)\n od_score = clf.decision_scores_\n else:\n raise ValueError(\"unsupported od model\")\n\n od_score = utils.min_max_norm(od_score)\n score_matrix[:, i] = od_score[ano_idx]\n\n if not os.path.exists(eva_root + \"data_od_evaluation/\"):\n os.makedirs(eva_root + \"data_od_evaluation/\")\n\n # score matrix to df\n anomaly_score_df = pd.DataFrame(data=score_matrix, columns=[str(s) for s in f_subsets])\n col_name = anomaly_score_df.columns.tolist()\n col_name.insert(0, 'ano_idx')\n anomaly_score_df[\"ano_idx\"] = ano_idx\n anomaly_score_df = anomaly_score_df.reindex(columns=col_name)\n path1 = eva_root + \"data_od_evaluation/\" + data_name + \"_score_\" + model_name + \".csv\"\n anomaly_score_df.to_csv(path1, index=False)\n\n # get the ground truth (one subspace for each anomaly that the anomaly can obtain the highest anomaly score)\n g_truth_df = pd.DataFrame(columns=[\"ano_idx\", \"exp_subspace\"])\n\n exp_subspaces = []\n for ii, ano_score in enumerate(score_matrix):\n max_score_idx = int(np.argmax(ano_score))\n exp_subset = str(f_subsets[max_score_idx])\n exp_subspaces.append(exp_subset)\n g_truth_df[\"ano_idx\"] = ano_idx\n g_truth_df[\"exp_subspace\"] = exp_subspaces\n\n g_truth_df.astype({\"exp_subspace\": \"object\"})\n path2 = eva_root + \"data_od_evaluation/\" + data_name + \"_gt_\" + model_name + \".csv\"\n g_truth_df.to_csv(path2, index=False)\n return anomaly_score_df, g_truth_df",
"def dual_modality_orh(disease,\n reader_scores,\n fom_fn,\n coverage=0.95,\n margin=0,\n sample_weight=None,\n verbose=True):\n if margin < 0:\n raise ValueError('margin parameter should be nonnegative.')\n\n num_cases, num_readers, num_modalities = reader_scores.shape\n if num_modalities != 2:\n raise ValueError('Only two modalities are supported.')\n\n if sample_weight is not None:\n if len(sample_weight) != num_cases:\n raise ValueError('Length of weights do not match cases.')\n\n if len(disease) != num_cases:\n raise ValueError(\n 'disease, model_score and reader_scores must have the same size '\n 'in the first dimension.')\n\n if 'sample_weight' not in inspect.signature(fom_fn).parameters.keys():\n raise ValueError(f'sample_weight is given but no such argument is supported'\n ' by the given figure-of-merit function {fom_fn.__name__}')\n reader_modality_foms = np.zeros((num_readers, 2), dtype=np.float32)\n for reader_idx in range(num_readers):\n for modality_idx in range(2):\n if sample_weight is not None:\n fom = fom_fn(disease, reader_scores[:, reader_idx, modality_idx],\n sample_weight=sample_weight)\n else:\n fom = fom_fn(disease, reader_scores[:, reader_idx, modality_idx] )\n reader_modality_foms[reader_idx, modality_idx] = fom\n\n reader_foms = np.mean(reader_modality_foms, axis=1)\n modality_foms = np.mean(reader_modality_foms, axis=0)\n average_fom = np.mean(modality_foms)\n\n assert len(reader_foms) == num_readers\n assert len(modality_foms) == 2\n\n # mstr = mean squared reader/modality difference; equation 10.43\n mstr = 0.0\n for reader_idx in range(num_readers):\n for modality_idx in range(2):\n summand = reader_modality_foms[reader_idx, modality_idx]\n summand -= modality_foms[modality_idx]\n summand -= reader_foms[reader_idx]\n summand += average_fom\n mstr += summand**2\n mstr /= num_readers - 1\n\n # Estimate covariance terms according to Equation 10.31\n covmat, indices = _jackknife_covariance_dual_modality(disease, reader_scores,\n fom_fn, sample_weight)\n cov2_samples = []\n cov3_samples = []\n for row_idx in range(2 * num_readers):\n modality, reader = indices[row_idx]\n for col_idx in range(row_idx + 1):\n modality_prime, reader_prime = indices[col_idx]\n if reader != reader_prime:\n if modality == modality_prime:\n cov2_samples.append(covmat[row_idx, col_idx])\n else:\n cov3_samples.append(covmat[row_idx, col_idx])\n cov2 = np.mean(cov2_samples)\n cov3 = np.mean(cov3_samples)\n\n if verbose:\n print('mstr', mstr)\n print('cov2 * 10^5', cov2 * 1e5)\n print('cov3 * 10^5', cov3 * 1e5)\n\n observed_effect_size = modality_foms[1] - modality_foms[0]\n\n # Equation 10.45\n dof = (mstr + max(num_readers * (cov2 - cov3), 0))**2\n dof /= (mstr**2) / (num_readers - 1)\n\n # Equation 10.48\n se = np.sqrt(2 * (mstr + num_readers * max(cov2 - cov3, 0)) / num_readers)\n\n return _test_result(\n effect=observed_effect_size,\n margin=margin,\n se=se,\n dof=dof,\n coverage=coverage,\n effect_size_constituents=EffectSizeConstituents(modality_foms=modality_foms))",
"def test_estimateFullRichness(self):\r\n # Verified with iNEXT.\r\n\r\n # f2 > 0\r\n obs = self.estimator3.estimateFullRichness()\r\n assert_almost_equal(obs, 5.5)\r\n\r\n # f2 == 0\r\n obs = self.estimator4.estimateFullRichness()\r\n assert_almost_equal(obs, 4)",
"def test_wind_mel_model(preds_paths, data_val):\n # Load model predicitions - allowing for possibility of ensemble\n model_preds = np.stack([np.load(pred_path) for pred_path in preds_paths])\n model_preds = np.mean(model_preds, axis=0)\n\n # Get ids and true labels\n labels = []\n ids = []\n for example in data_val:\n labels.append(example[1])\n ids.append(example[2])\n\n # Calculate accuracy and label-predication pairs\n num_examples = 0\n num_correct = 0\n current_id = None\n current_label = None\n c_matrix = np.zeros((50, 50))\n for i in range(len(ids)):\n label = labels[i]\n id = ids[i]\n\n # Check to see if new example has entered\n if id != current_id:\n\n # Evaluate previous id fully - will not enter on first iteration\n if current_id:\n current_prediction_probs /= num_ids\n prediction = np.argmax(current_prediction_probs)\n\n # update lab_pred counts\n c_matrix[int(current_label), int(prediction)] += 1\n\n # Increment correct prediction counter if prediction correct\n if prediction == current_label:\n num_correct += 1\n\n # reset and increment variables\n num_examples += 1\n current_id = id\n current_label = label\n num_ids = 1\n current_prediction_probs = model_preds[i]\n else:\n num_ids += 1\n current_prediction_probs += model_preds[i]\n\n accuracy = num_correct / num_examples\n\n print(f\"{num_correct} / {num_examples} = {accuracy:.4f}\")\n\n return accuracy, c_matrix",
"def test(model, dataloader):\n model.eval()\n device = model.device\n time_start = time.time()\n batch_time = 0.0\n accuracy = 0.0\n all_prob, all_labels = [], []\n \n with torch.no_grad():\n for (batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels) in dataloader:\n batch_start = time.time()\n seqs = batch_seqs.to(device) \n masks = batch_seq_masks.to(device)\n segments = batch_seq_segments.to(device)\n labels = batch_labels.to(device)\n\n _, _, probabilities = model(seqs, masks, segments, labels)\n accuracy += correct_predictions(probabilities, labels)\n batch_time += time.time() - batch_start\n all_prob.extend(probabilities[:, 1].cpu().numpy())\n all_labels.extend(batch_labels)\n batch_time /= len(dataloader)\n total_time = time.time() - time_start\n accuracy /= (len(dataloader.dataset))\n return batch_time, total_time, accuracy, roc_auc_score(all_labels, all_prob)",
"def evaluate(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, show_progress: bool = True,\n device: torch.device = torch.device('cuda:0')):\n with torch.no_grad():\n model.to(device=device)\n sum_cross_entropy = torch.nn.BCEWithLogitsLoss(reduction='sum').to(device=device)\n scoring_loss = 0.\n scoring_predictions = []\n scoring_labels = []\n for scoring_data in tqdm(dataloader, total=len(dataloader), desc=\"Evaluating model\",\n disable=not show_progress, position=1):\n \n # Get samples as lists\n labels, inputs, sequence_lengths, counts_per_sequence, sample_ids = scoring_data\n \n # Apply attention-based sequence reduction and create minibatch\n labels, inputs, sequence_lengths, n_sequences = model.reduce_and_stack_minibatch(\n labels, inputs, sequence_lengths, counts_per_sequence)\n \n # Compute predictions from reduced sequences\n logit_outputs = model(inputs, n_sequences)\n prediction = torch.sigmoid(logit_outputs)\n \n # Compute mean of losses on-the-fly\n scoring_loss += sum_cross_entropy(logit_outputs, labels[..., -1]) / len(dataloader.dataset)\n \n # Store predictions and labels\n scoring_predictions.append(prediction)\n scoring_labels.append(labels[..., -1])\n \n # Compute BACC, F1, and AUC score\n scoring_predictions = torch.cat(scoring_predictions, dim=0).float()\n scoring_predictions_threshold = (scoring_predictions > 0.5).float()\n scoring_labels = torch.cat(scoring_labels).float()\n \n scoring_labels = scoring_labels.cpu().numpy()\n scoring_predictions = scoring_predictions.cpu().numpy()\n scoring_predictions_threshold = scoring_predictions_threshold.cpu().numpy()\n \n roc_auc = metrics.roc_auc_score(scoring_labels, scoring_predictions, average=None)\n bacc = metrics.balanced_accuracy_score(y_true=scoring_labels, y_pred=scoring_predictions_threshold)\n f1 = metrics.f1_score(y_true=scoring_labels, y_pred=scoring_predictions_threshold, average='binary',\n pos_label=1)\n return roc_auc, bacc, f1, scoring_loss",
"def classic_model_testing():\n dataset_path = \"/home/kateryna/Documents\"\n X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])\n contam = 0.08\n models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),\n COPOD(contamination=contam)]\n for model in models:\n model_name = model.__str__().split('(')[0]\n clf = model\n clf.fit(X_train, y_train)\n\n y_train_pred = clf.labels_\n y_train_scores = clf.decision_scores_\n\n # get the prediction on the test data\n # 0 stands for inliers and 1 for outliers.\n y_test_pred = clf.predict(X_test)\n y_test_scores = clf.decision_function(X_test)\n # y_probabilities = clf.predict_proba(X_test)\n print(\"\\nOn Training Data:\")\n evaluate_print(model_name, y_train, y_train_scores)\n print(\"\\nOn Test Data:\")\n evaluate_print(model_name, y_test, y_test_scores)\n print('roc auc', roc_auc_score(y_test, y_test_scores))\n\n conf_mtx_test = confusion_matrix(y_test, y_test_pred, labels=[0, 1])\n print(conf_mtx_test)\n conf_mtx_train = confusion_matrix(y_train, y_train_pred, labels=[0, 1])\n print(conf_mtx_train)\n print('~~~')",
"def compare(self, model, u_model, obs, u_obs, bprop, label='', plot=False):\n self.debug.start_function('compare')\n pyprint.check_same_length(model, obs, 'model and obs arrays')\n pyprint.check_same_length(u_model, u_obs, 'u_model and u_obs arrays')\n\n weight = self.mcmc_version.weights[bprop]\n inv_sigma2 = 1 / (u_model ** 2 + u_obs ** 2)\n lh = -0.5 * weight * ((model - obs) ** 2 * inv_sigma2\n + np.log(2 * np.pi / inv_sigma2))\n self.debug.print_(f'lhood breakdown: {label} {lh}')\n\n if plot:\n self.plot_compare(model=model, u_model=u_model, obs=obs,\n u_obs=u_obs, bprop=label)\n self.debug.end_function()\n return lh.sum()",
"def score_intro_model():\n k = 100\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n mc = ModelChooser([model])\n dp = DataPrep(training=False)\n dp.prepare(n_components=k, use_cached_nmf='/home/ubuntu/ca_bills_project/data/extra/nmf_100_05-23-17-08-23.pkl',\n use_cached_tfidf=\"/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl\", cache_tfidf=True, test=True)\n X_test, y_test = dp.subset(features)\n\n\n mc.score(X_test, y_test)",
"def test_models(directorio=''):\r\n \r\n print('The trained models will be tested now')\r\n start = time.time()\r\n \r\n busqueda = \"ls \" + directorio + \"/*.h5 > model_names.txt\"\r\n\r\n os.system(busqueda)\r\n\r\n X = np.load(directorio + '/Xtest.npy')\r\n diccio = np.load(directorio + '/feature_standarisation.npy').item()\r\n y = pd.read_csv(directorio + '/dbtest.csv')['target'].values\r\n\r\n X = (X - diccio['mean'])/diccio['std']\r\n x = np.reshape(X,(X.shape[0],X.shape[2]))\r\n \r\n with open('model_names.txt','r') as f:\r\n for line in f:\r\n modelo = models.load_model(line[:len(line)-1])\r\n nombre = line.split('/')[1]\r\n outpred = modelo.predict(x)\r\n prediction = outpred >= 0.5\r\n \r\n cost = -(np.dot(y,np.log10(outpred)) + \\\r\n np.dot((1-y),np.log10(1-outpred)))/y.shape[0]\r\n precision,recall,fscore,support = PRFS(y, prediction)\r\n \r\n with open(directorio + '/test_results.txt','a') as tr:\r\n tr.write(nombre + '\\n')\r\n tr.write('cost function: '+str(cost[0])+'\\n')\r\n tr.write('samples: '+str(support)+'\\n')\r\n tr.write('precision: '+str(np.round(precision*100,2))+'\\n')\r\n tr.write('recall: '+str(np.round(recall*100,2))+'\\n')\r\n tr.write('f1-score: '+str(np.round(fscore*100,2))+'\\n')\r\n tr.write('\\n')\r\n tr.close()\r\n \r\n print('The test of all trained models lasted ', round(time.time()-start,2),' s')\r\n os.system('rm model_names.txt')\r\n \r\n return",
"def testB(scores=\"\"):\n \"\"\"\n USE\n from armor.tests import roughwork as rw\n reload(rw)\n rw.testB('/home/k/ARMOR/documents/2013-final-report/1383883165/testA.pydump')\n \"\"\"\n #from armor.objects3 import kongrey # the data \n #kongrey.fix()\n from armor.objects3 import kongreywrf2 # the models\n kongreywrf2.fix() #fixing the w.name's for w in kongreywrf\n \n testBfolder = outputFolder + 'testB' + timeString + '/'\n if not os.path.exists(testBfolder):\n os.makedirs(testBfolder)\n if scores == \"\" :\n testAfile = [v for v in sorted(os.listdir(outputFolder), reverse=True) if 'testA' in v][0]\n print 'loading', outputFolder + testAfile\n scores = pickle.load(open(outputFolder + testAfile))\n elif isinstance(scores, str):\n print 'loading', scores\n scores = pickle.load(open(scores))\n testAfile = scores\n else:\n testAfile = \"\"\n print 'sleeping 1 seconds'\n time.sleep(1)\n Tlist = [v.dataTime for v in kongrey]\n top8matches = {}\n for T in Tlist:\n print '\\n...............................\\nTime:', T\n pairList = [v for v in scores.keys() if T in v[0]]\n print pairList[:5]\n if len(pairList) == 0:\n continue\n pairList.sort(reverse=True, key=lambda v:scores[v])\n top8matches[T] = [v[1] for v in pairList][:8]\n v0 = pairList[0][0] # temporary variable\n print 'Top 8 matches:', '\\n'.join([v+ '\\t' + str(scores[(v0,v)]) for v in top8matches[T]])\n pickle.dump(top8matches, open(testBfolder+ 'top8matches.pydump', 'w'))\n\n # construct the 3x3 panel\n imList = kongrey(T) + [kongreywrf2(M)[0] for M in top8matches[T]]\n #kongrey.load(T)\n #for im in imList[1:]:\n # im.load()\n #debug\n #print imList\n img9 = construct3by3(imList)\n img9.imagePath = testBfolder + 'best8matches' + T + '.png'\n print 'saving images to', img9.imagePath\n img9.saveImage(dpi=600)\n try:\n open(testBfolder + 'notes.txt').write('source:\\n'+ testAfile + '\\n\\n' +str(scores))\n except:\n print 'error in writing ' + testBfolder + 'notes.txt' \n pass\n return top8matches",
"def simulate_model_vs_readers(disease,\n model_auc,\n reader_auc,\n sigma_r,\n sigma_c,\n num_readers,\n rng=np.random):\n mu = auc_to_mu(model_auc)\n delta_mu = auc_to_mu(reader_auc) - mu\n return simulate_single_modality(\n disease,\n mu,\n delta_mu,\n sigma_r=sigma_r,\n sigma_c=sigma_c,\n num_readers=num_readers,\n rng=rng)",
"def test_1():\n clf1 = mord.OrdinalRidge(alpha=0.)\n clf1.fit(X, y)\n\n clf2 = mord.LogisticAT(alpha=0.)\n clf2.fit(X, y)\n\n # the score is - absolute error, 0 is perfect\n # assert clf1.score(X, y) < clf2.score(X, y)\n\n clf3 = mord.LogisticSE(alpha=0.)\n clf3.fit(X, y)\n pred3 = clf3.predict(X)\n pred2 = clf2.predict(X)\n\n # check that it predicts better than the surrogate\n # for other loss\n assert np.abs(pred2 - y).mean() <= np.abs(pred3 - y).mean()\n # # the score is - absolute error, 0 is perfect\n # assert_almost_equal(clf.score(X, y), 0., places=2)\n #\n # clf = mord.LogisticIT(alpha=0.)\n # clf.fit(X, y)\n # # the score is classification error, 1 is perfect\n # assert_almost_equal(clf.score(X, y), 1., places=2)\n\n # test on sparse matrices\n X_sparse = sparse.csr_matrix(X)\n clf4 = mord.LogisticAT(alpha=0.)\n clf4.fit(X_sparse, y)\n pred4 = clf4.predict(X_sparse)\n assert metrics.mean_absolute_error(y, pred4) < 1.",
"def test(model, dataloader, params, args, val):\n\n # evaluation mode\n model.eval()\n\n # initialise buffers\n dice_lv_buffer = []\n dice_myo_buffer = []\n dice_rv_buffer = []\n\n mcd_lv_buffer = []\n hd_lv_buffer = []\n mcd_myo_buffer = []\n hd_myo_buffer = []\n mcd_rv_buffer = []\n hd_rv_buffer = []\n\n mean_mag_grad_detJ_buffer = []\n negative_detJ_buffer = []\n\n\n with tqdm(total=len(dataloader)) as t:\n # iterate over validation subjects\n for idx, (image_ed_batch, image_es_batch, label_ed_batch, label_es_batch) in enumerate(dataloader):\n # (data all in shape of (c, N, H, W))\n\n # extend to (N, c, H, W)\n image_ed_batch = image_ed_batch.permute(1, 0, 2, 3).to(device=args.device)\n image_es_batch = image_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n label_es_batch = label_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n\n with torch.no_grad():\n # compute optical flow and warped ED images towards ES\n dvf = model(image_ed_batch, image_es_batch)\n\n # transform label mask of ES frame\n warped_label_es_batch = resample_transform(label_es_batch.float(), dvf, interp='nearest')\n\n\n \"\"\" Move data to device \"\"\"\n if args.cuda:\n # move data to cpu to calculate metrics\n # (the axis permutation is to comply with metric calculation code which takes input shape H, W, N)\n warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n else:\n # CPU version of the code\n warped_label_es_batch = warped_label_es_batch.squeeze(1).numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n \"\"\"\"\"\"\n\n \"\"\" Calculate the metrics (only works with SAX images) \"\"\"\n # (optional) extract 3 slices (apical, mid-ventricle and basal)\n if not args.all_slices:\n num_slices = label_ed_batch.shape[-1]\n apical_idx = int(round((num_slices - 1) * 0.75)) # 75% from basal\n mid_ven_idx = int(round((num_slices - 1) * 0.5)) # 50% from basal\n basal_idx = int(round((num_slices - 1) * 0.25)) # 25% from basal\n slices_idx = [apical_idx, mid_ven_idx, basal_idx]\n\n warped_label_es_batch = warped_label_es_batch[:, :, slices_idx]\n label_ed_batch = label_ed_batch[:, :, slices_idx]\n dvf = dvf[slices_idx, :, :, :] # needed for detJac\n\n # dice\n dice_lv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=1)\n dice_myo = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=2)\n dice_rv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=3)\n\n dice_lv_buffer += [dice_lv]\n dice_myo_buffer += [dice_myo]\n dice_rv_buffer += [dice_rv]\n\n # contour distances\n mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)\n mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)\n mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)\n\n # determinant of Jacobian\n mean_grad_detJ, mean_negative_detJ = detJac_stack(dvf)\n\n\n # update buffers\n mcd_lv_buffer += [mcd_lv]\n hd_lv_buffer += [hd_lv]\n mcd_myo_buffer += [mcd_myo]\n hd_myo_buffer += [hd_myo]\n mcd_rv_buffer += [mcd_rv]\n hd_rv_buffer += [hd_rv]\n\n mean_mag_grad_detJ_buffer += [mean_grad_detJ]\n negative_detJ_buffer += [mean_negative_detJ]\n\n t.update()\n\n # construct metrics dict\n metrics = {'dice_lv_mean': np.mean(dice_lv_buffer), 'dice_lv_std': np.std(dice_lv_buffer),\n 'dice_myo_mean': np.mean(dice_myo_buffer), 'dice_myo_std': np.std(dice_myo_buffer),\n 'dice_rv_mean': np.mean(dice_rv_buffer), 'dice_rv_std': np.std(dice_rv_buffer),\n\n 'mcd_lv_mean': np.mean(mcd_lv_buffer), 'mcd_lv_std': np.std(mcd_lv_buffer),\n 'mcd_myo_mean': np.mean(mcd_myo_buffer), 'mcd_myo_std': np.std(mcd_myo_buffer),\n 'mcd_rv_mean': np.mean(mcd_rv_buffer), 'mcd_rv_std': np.std(mcd_rv_buffer),\n\n 'hd_lv_mean': np.mean(hd_lv_buffer), 'hd_lv_std': np.std(hd_lv_buffer),\n 'hd_myo_mean': np.mean(hd_myo_buffer), 'hd_myo_std': np.std(hd_myo_buffer),\n 'hd_rv_mean': np.mean(hd_rv_buffer), 'hd_rv_std': np.std(hd_rv_buffer),\n\n 'mean_mag_grad_detJ_mean': np.mean(mean_mag_grad_detJ_buffer),\n 'mean_mag_grad_detJ_std': np.std(mean_mag_grad_detJ_buffer),\n\n 'negative_detJ_mean': np.mean(negative_detJ_buffer),\n 'negative_detJ_std': np.std(negative_detJ_buffer)\n }\n\n\n if not val:\n # testing only: save all metrics evaluated for all test subjects in pandas dataframe\n test_result_dir = os.path.join(args.model_dir, \"test_results\")\n if not os.path.exists(test_result_dir):\n os.makedirs(test_result_dir)\n\n # save metrics results mean & std\n xutils.save_dict_to_json(metrics,\n f\"{test_result_dir}/test_results_3slices_{not args.all_slices}.json\")\n\n # save accuracy metrics of every subject\n subj_id_buffer = dataloader.dataset.dir_list\n df_buffer = []\n column_method = ['DL'] * len(subj_id_buffer)\n for struct in ['LV', 'MYO', 'RV']:\n if struct == 'LV':\n ls_dice = dice_lv_buffer\n ls_mcd = mcd_lv_buffer\n ls_hd = hd_lv_buffer\n elif struct == 'MYO':\n ls_dice = dice_myo_buffer\n ls_mcd = mcd_myo_buffer\n ls_hd = hd_myo_buffer\n elif struct == 'RV':\n ls_dice = dice_rv_buffer\n ls_mcd = mcd_rv_buffer\n ls_hd = hd_rv_buffer\n\n ls_struct = [struct] * len(subj_id_buffer)\n data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'Structure': ls_struct,\n 'Dice': ls_dice,\n 'MCD': ls_mcd,\n 'HD': ls_hd}\n df_buffer += [pd.DataFrame(data=data)]\n # concatenate df and save\n metrics_df = pd.concat(df_buffer, axis=0)\n metrics_df.to_pickle(f\"{test_result_dir}/test_accuracy_results_3slices_{not args.all_slices}.pkl\")\n\n # save detJac metrics for every subject\n jac_data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'GradDetJac': mean_mag_grad_detJ_buffer,\n 'NegDetJac': negative_detJ_buffer}\n jac_df = pd.DataFrame(data=jac_data)\n jac_df.to_pickle(f\"{test_result_dir}/test_Jacobian_results_3slices{not args.all_slices}.pkl\")\n\n return metrics",
"def main():\n checkpoint_steps = 1e7\n def load_rmleague(league_file):\n if os.path.isfile(league_file):\n with open(league_file, 'rb') as f:\n return pickle.load(f)\n else:\n rmleague = League(main_agents=1, main_exploiters=0, league_exploiters=0, checkpoint_steps=checkpoint_steps)\n return rmleague\n league_file = os.path.join(os.getcwd(), 'data', 'league')\n rmleague = load_rmleague(league_file)\n rmleague.set_ckpt_steps(checkpoint_steps)\n # Start main-agent's learner processes \n agent = rmleague.get_player_agent('main_agent', 0)\n comm.send(agent, dest=learners[0])\n # Start/Add main-exploiter's learner processes\n if len(learners) >= 2:\n if len(rmleague._learning_agents['main_exploiter']) == 0:\n rmleague.add_main_exploiter()\n agent = rmleague.get_player_agent('main_exploiter', 0)\n comm.send(agent, dest=learners[1])\n # Start/Add league-exploiter's learner processes\n if len(learners) >= 3:\n if len(rmleague._learning_agents['league_exploiter']) == 0:\n rmleague.add_league_exploiter()\n agent = rmleague.get_player_agent('league_exploiter', 0)\n comm.send(agent, dest=learners[2])\n # Send opponent to Actor\n opponent_coordinator = dict()\n player_types = {0: 'main_agent', 1: 'main_exploiter', 2: 'league_exploiter'}\n for i, actor in enumerate(actors):\n player = rmleague.get_player(player_types[i], 0)\n opponent, match_bool = player.get_match()\n opponent_coordinator[i] = opponent\n for a in actor:\n comm.send(opponent.get_agent(), dest=a)\n # Setup summary writer\n dt_string = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n log_dir = os.path.join(os.getcwd(), \"data\", \"logs\", dt_string)\n summary_writer = tf.summary.create_file_writer(log_dir)\n # Run-time variables\n main_p_t = time.time()\n learner_updated = False\n # Start coordinator loop\n while True:\n # Receive trajectory outcome from Actor and perhaps checkpoint player\n for ia, actor in enumerate(actors):\n player = rmleague.get_player(player_types[ia], 0)\n if player.ready_to_checkpoint():\n rmleague.add_player(player.checkpoint())\n for ja, a in enumerate(actor):\n if comm.Iprobe(source=a, tag=5):\n #if actors_rcv_traj_p[ia][ja].Get_status():\n #traj_outcome = actors_rcv_traj_p[ia][ja].wait()\n traj_outcome = comm.recv(source=a, tag=5)\n opponent = opponent_coordinator[ia]\n rmleague.update(player, opponent, traj_outcome['outcome'])\n comm.send(opponent.get_agent(), dest=a, tag=4)\n # Receive new agent and ppo summaries from Learner\n for il, learner in enumerate(learners):\n # Update agent of player\n if comm.Iprobe(source=learner, tag=3):\n #if learner_rcv_p[il].Get_status():\n learner_updated = True\n results = comm.recv(source=learner, tag=3)\n player = rmleague.get_player(player_types[il], 0)\n player.set_agent(results['agent'])\n player.incre_updates()\n # Report and log each Player's PPO details\n with summary_writer.as_default():\n tf.summary.scalar(player.get_name()+':approxkl', results['approxkl'], step=player.get_updates())\n tf.summary.scalar(player.get_name()+':loss', results['loss'], step=player.get_updates())\n tf.summary.scalar(player.get_name()+':entropy', results['entropy'], step=player.get_updates())\n tf.summary.scalar(player.get_name()+':return', results['return'], step=player.get_updates())\n tf.summary.scalar(player.get_name()+':rewards', results['rewards'], step=player.get_updates())\n tf.summary.scalar(player.get_name()+':total_steps', player.get_agent().get_steps(), step=player.get_updates())\n print(f\"PPO info (learner = {str(learner)}, steps = {player.get_agent().get_steps()}, time = {rmleague.training_time}):\")\n print(player.get_name()+\":approxkl =\", results['approxkl'])\n print(player.get_name()+\":loss =\", results['loss'])\n print(player.get_name()+\":entropy =\", results['entropy'])\n print(player.get_name()+\":return =\", results['return'])\n print(player.get_name()+\":rewards =\", results['rewards'])\n print(player.get_name()+\":number of fresh trajectories =\", str(results['n_fresh_trajs']))\n print(player.get_name()+\":average response time =\", str(results['average_response_time']))\n print()\n sys.stdout.flush()\n summary_writer.flush()\n # Increment training timer and save updated rmLeague\n if learner_updated:\n rmleague.training_time += time.time() - main_p_t\n main_p_t = time.time()\n # Save progress\n rmleague.nupdates += 1\n Save(rmleague, os.path.join(os.getcwd(), 'data'), league_file)\n learner_updated = False",
"def print_model_quality_report(pred_path: str, ground_path: str):\n predictions = np.load(pred_path).argmax(axis=1)\n groundtruth = pd.read_csv(ground_path).open_channels.values\n groups = pd.read_csv(ground_path).group.values\n\n print(\"Macro F1 score, F1 scores and confusion matrix per group:\")\n for group in range(6):\n pred = predictions[groups == group]\n true = groundtruth[groups == group]\n print(f\"Group {group} macro F1 score, F1 scores and confusion matrix:\")\n print(f1_score(true, pred, average='macro'))\n print(f1_score(true, pred, average=None))\n print(confusion_matrix(true, pred, normalize='true').round(3))\n print()\n\n print(\"Batch 5 macro F1 score, F1 scores and confusion matrix:\")\n pred = predictions[2_000_000:2_500_000]\n true = groundtruth[2_000_000:2_500_000]\n print(f1_score(true, pred, average='macro'))\n print(f1_score(true, pred, average=None))\n print(confusion_matrix(true, pred, normalize='true').round(3))\n print()\n\n print(\"Batch 9 macro F1 score, F1 scores and confusion matrix:\")\n pred = predictions[4_500_000:5_000_000]\n true = groundtruth[4_500_000:5_000_000]\n print(f1_score(true, pred, average='macro'))\n print(f1_score(true, pred, average=None))\n print(confusion_matrix(true, pred, normalize='true').round(3))\n print()\n\n print(\"Overall OOF macro F1 score, F1 scores and confusion matrix:\")\n print(f1_score(groundtruth[:5_000_000], predictions[:5_000_000], average='macro'))\n print(f1_score(groundtruth[:5_000_000], predictions[:5_000_000], average=None))\n print(confusion_matrix(groundtruth[:5_000_000], predictions[:5_000_000], normalize='true').round(3))\n print()",
"def report_model(model, R_deg, mask_good, batch_size, steps, elts_true, display=True):\n\n # Mask for perturbed (\"bad\") elements\n mask_bad = ~mask_good\n \n # Get scores on the whole data set\n pred = model.predict(ds, steps=steps)\n elts, R, u_pred, z, scores_all = pred\n\n # Consolidate results to batch_size rows\n num_rows = elts.shape[0]\n score_cols = scores_all.shape[1]\n row_idx = np.arange(num_rows, dtype=np.int32) % batch_size\n elts = elts[0:batch_size]\n R = R[0:batch_size]\n u_pred = u_pred[0:batch_size]\n\n # Model to predict position from elements\n ts = model.direction.ts\n model_pos = make_model_ast_pos(ts=ts, batch_size=batch_size)\n\n # Difference between predicted and true trajectory in Kepler 2 body model\n traj_err = traj_diff(elts, elts_true, model_pos)\n\n # Consolidate the scores; create 2 extra columns for sigma and t_score\n scores = np.zeros((batch_size, score_cols+2))\n for batch_idx in range(batch_size):\n mask = (row_idx == batch_idx)\n scores[batch_idx, 0:score_cols] = scores_all[mask].sum(axis=0)\n\n # Unpock scores\n raw_score = scores[:,0]\n mu = scores[:,1]\n sigma2 = scores[:,2]\n objective = scores[:,3]\n\n # Compute derived scores after aggregation\n sigma = np.sqrt(sigma2)\n eff_obs = raw_score - mu\n t_score = eff_obs / sigma\n \n # Pack sigma and t_score at the end of scores\n scores[:, 4] = sigma\n scores[:, 5] = t_score\n \n # Error in orbital elements\n elt_err = np.abs(elts - elts_true)\n # Convert angles from radians to degrees\n elt_err[:, 2:6] = np.rad2deg(elt_err[:, 2:6])\n # Mean element error on good and bad masks\n elt_err_g = elt_err[mask_good]\n elt_err_b = elt_err[mask_bad]\n mean_err_g = np.mean(elt_err_g[0:6], axis=0)\n mean_err_b = np.mean(elt_err_b[0:6], axis=0)\n\n if display:\n # Report trajectory error\n report_model_attribute(traj_err, mask_good, 'Trajectory Error (AU) vs. True Elements')\n\n # Report errors in orbital elements\n print('\\nError in orbital elements:')\n print(f'(Angles shown in degrees)')\n print(' a e inc Omega omega f')\n print(f'Good: {mean_err_g[0]:8.6f}, {mean_err_g[1]:8.6f}, {mean_err_g[2]:8.6f}, '\n f'{mean_err_g[3]:8.6f}, {mean_err_g[4]:8.6f}, {mean_err_g[5]:8.6f}, ')\n print(f'Bad : {mean_err_b[0]:8.6f}, {mean_err_b[1]:8.6f}, {mean_err_b[2]:8.6f}, '\n f'{mean_err_b[3]:8.6f}, {mean_err_b[4]:8.6f}, {mean_err_b[5]:8.6f}, ')\n \n # Report effective observations, mu, sigma, and t_score \n report_model_attribute(raw_score, mask_good, 'Raw Score')\n report_model_attribute(mu, mask_good, 'Mu')\n report_model_attribute(eff_obs, mask_good, 'Effective Observations')\n report_model_attribute(sigma, mask_good, 'Sigma')\n report_model_attribute(t_score, mask_good, 't_score')\n report_model_attribute(objective, mask_good, 'Objective Function')\n report_model_attribute(R, mask_good, 'Resolution R')\n\n return scores, traj_err, elt_err",
"def conditionDecodings(data, rois, ncvs=100, effects=False, motorOutput=False,confusion=False, decoder='similarity', nproc=5):\n \n ncond = data.shape[1] # two motor outputs\n nSubjs = data.shape[2]\n\n nsamples = nSubjs * ncond\n stats = np.zeros((len(rois),nsamples))\n rmatches = np.zeros((len(rois),))\n rmismatches = np.zeros((len(rois),))\n\n # Label array for supervised learning\n labels = np.tile(range(ncond),nSubjs)\n subjarray = np.repeat(range(nSubjs),ncond)\n\n # Run SVM classifications on network-level activation patterns across subjects\n confusion_mats = []\n roicount = 0\n for roi in rois:\n roi_ind = np.where(glasser2==roi+1)[0]\n nfeatures = len(roi_ind)\n roi_ind.shape = (len(roi_ind),1) \n\n svm_mat = np.zeros((nsamples,roi_ind.shape[0]))\n samplecount = 0\n for scount in range(nSubjs):\n roidata = np.squeeze(data[roi_ind,:,scount])\n svm_mat[samplecount:(samplecount+ncond),:] = roidata.T\n\n samplecount += ncond\n\n # Spatially demean matrix across features\n# samplemean = np.mean(svm_mat,axis=1)\n# samplemean.shape = (len(samplemean),1)\n# svm_mat = svm_mat - samplemean\n \n scores, rmatch, rmismatch, confusion_mat = randomSplitLOOBaselineCV(ncvs, svm_mat, labels, subjarray, \n motorOutput=motorOutput, decoder=decoder, nproc=nproc)\n stats[roicount,:] = scores\n rmatches[roicount] = np.mean(rmatch)\n rmismatches[roicount] = np.mean(rmismatch)\n confusion_mats.append(confusion_mat)\n roicount += 1\n \n if effects and confusion:\n return stats, rmatch, rmismatch, confusion_mats\n if effects and not confusion:\n return stats, rmatch, rmismatch\n if confusion and not effects:\n return stats, confusion_mats\n else:\n return stats",
"def test_compare_blackrockio_with_matlabloader_v21(self):\n\n dirname = self.get_local_path('blackrock/blackrock_2_1/l101210-001')\n # First run with parameters for ns5, then run with correct parameters for ns2\n parameters = [('blackrock/blackrock_2_1/l101210-001_nev-02_ns5.mat',\n {'nsx_to_load': 5, 'nev_override': '-'.join([dirname, '02'])}, 96),\n ('blackrock/blackrock_2_1/l101210-001.mat', {'nsx_to_load': 2}, 6)]\n for param in parameters:\n # Load data from Matlab generated files\n ml = scipy.io.loadmat(self.get_local_path(param[0]))\n lfp_ml = ml['lfp'] # (channel x time) LFP matrix\n ts_ml = ml['ts'] # spike time stamps\n elec_ml = ml['el'] # spike electrodes\n unit_ml = ml['un'] # spike unit IDs\n wf_ml = ml['wf'] # waveforms\n mts_ml = ml['mts'] # marker time stamps\n mid_ml = ml['mid'] # marker IDs\n\n # Load data from original data files using the Neo BlackrockIO\n reader = BlackrockRawIO(dirname, **param[1])\n reader.parse_header()\n\n # Check if analog data are equal\n stream_index = 0\n self.assertGreater(reader.signal_channels_count(stream_index), 0)\n\n for c in range(0, param[2]):\n raw_sigs = reader.get_analogsignal_chunk(channel_indexes=[c])\n raw_sigs = raw_sigs.flatten()\n assert_equal(raw_sigs[:], lfp_ml[c, :])\n\n # Check if spikes in channels are equal\n nb_unit = reader.spike_channels_count()\n for spike_channel_index in range(nb_unit):\n unit_name = reader.header['spike_channels'][spike_channel_index]['name']\n # name is chXX#YY where XX is channel_id and YY is unit_id\n channel_id, unit_id = unit_name.split('#')\n channel_id = int(channel_id.replace('ch', ''))\n unit_id = int(unit_id)\n\n matlab_spikes = ts_ml[(elec_ml == channel_id) & (unit_ml == unit_id)]\n\n io_spikes = reader.get_spike_timestamps(spike_channel_index=spike_channel_index)\n assert_equal(io_spikes, matlab_spikes)\n\n # Check all waveforms\n io_waveforms = reader.get_spike_raw_waveforms(\n spike_channel_index=spike_channel_index)\n io_waveforms = io_waveforms[:, 0, :] # remove dim 1\n matlab_wf = wf_ml[np.nonzero(\n np.logical_and(elec_ml == channel_id, unit_ml == unit_id)), :][0]\n assert_equal(io_waveforms, matlab_wf)\n\n # Check if digital input port events are equal\n nb_ev_chan = reader.event_channels_count()\n # ~ print(reader.header['event_channels'])\n for ev_chan in range(nb_ev_chan):\n name = reader.header['event_channels']['name'][ev_chan]\n # ~ print(name)\n if name == 'digital_input_port':\n all_timestamps, _, labels = reader.get_event_timestamps(\n event_channel_index=ev_chan)\n\n for label in np.unique(labels):\n python_digievents = all_timestamps[labels == label]\n matlab_digievents = mts_ml[mid_ml == int(label)]\n assert_equal(python_digievents, matlab_digievents)",
"def test_tpr_fwer(self, syn_genomic_data, syn_labels, syn_labels_0based, syn_labels_cat, syn_fm, syn_idx, rep, syn_true_pvalues):\n\n window_lengths = [35]\n\n best_params_montaez = {'epochs': 500, 'l1_reg': 0.001, 'l2_reg': 0.0001,'lr' :1e-05, 'dropout_rate':0.3, 'hidden_neurons':64, 'n_snps': n_total_snps}\n\n # n_permutations = 2\n\n def combi_compute_pvalues(d, x, fm, l,filter_window_size,pf,ps,k):\n #clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30\n idx, pvalues, _ = combi_method(d, x,fm, l,filter_window_size,pf,ps,k)\n\t\t\t#combi_method(classifier,data, fm, labels, filter_window_size, pnorm_filter, psvm, top_k)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[idx] = pvalues\n del d, l\n return pvalues_filled\n\n def challenger_compute_pvalues(d, x, l_0b, l, idx):\n is_only_zeros = False\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model(best_params_montaez)\n\n model.fit(x=x[idx.train], y=l_0b[idx.train],\n validation_data=(x[idx.test], l_0b[idx.test]),\n epochs=best_params_montaez['epochs'],\n callbacks=[\n ReduceLROnPlateau(monitor='val_loss',\n mode='min'),\n ])\n\n model = iutils.keras.graph.model_wo_softmax(model)\n analyzer = innvestigate.analyzer.LRPAlpha2Beta1(model)\n weights = analyzer.analyze(x).sum(0)\n\n if np.max(abs(weights)) < 0.005:\n fig, axes = plt.subplots(1)\n is_only_zeros = True\n axes.plot(np.absolute(weights).sum(axis=1))\n fig.savefig(os.path.join(IMG_DIR, 'test.png'))\n\n pvalues_list = np.zeros((len(window_lengths), weights.shape[0]))\n for i, filter_size in enumerate(window_lengths):\n top_indices_sorted, _ = postprocess_weights(\n weights, top_k, filter_size, p_svm, p_pnorm_filter)\n pvalues = chi_square(d[:, top_indices_sorted], l)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[top_indices_sorted] = pvalues\n pvalues_list[i] = pvalues_filled\n del d, x, l\n\n return pvalues_list, is_only_zeros\n\n fm_2d = syn_fm(\"2d\")\n fm_3d = syn_fm(\"3d\")\n clf = LinearSVC(penalty='l2', loss='hinge', C=1.0000e-05, dual=True, tol=1e-3, verbose=0)\n\n pvalues_per_run_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(\n combi_compute_pvalues)(clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30) for i in tqdm(range(rep))))\n\n pvalues_per_run_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(\n chi_square)(syn_genomic_data[str(i)][:], syn_labels[str(i)]) for i in tqdm(range(rep))))\n\n # len(thresholds) * len(window_sizes) * 10020\n a = Parallel(n_jobs=-1, require='sharedmem')(delayed(\n challenger_compute_pvalues)(syn_genomic_data[str(i)][:], fm_3d[str(i)][:], syn_labels_cat[str(i)], syn_labels[str(i)], syn_idx[str(i)]) for i in tqdm(range(rep)))\n\n # INNvestigate bugfix\n zeros_index = np.array(list(np.array(a)[:, 1]))\n pvalues_per_run_dense = np.array(list(np.array(a)[:, 0]))\n\n pvalues_per_run_combi = pvalues_per_run_combi[np.logical_not(zeros_index)]\n pvalues_per_run_dense = pvalues_per_run_dense[np.logical_not(zeros_index)]\n pvalues_per_run_rpvt = pvalues_per_run_rpvt[np.logical_not(zeros_index)]\n true_pvalues = syn_true_pvalues[np.logical_not(zeros_index)]\n\n # COMBI\n res_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_combi, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n tpr_combi, _, fwer_combi, precision_combi = res_combi.T\n\n\n # T_star - WARNING TAKES FOREVER\n tpr_permuted = 0\n fwer_permuted = 0\n precision_permuted = 0\n\n \"\"\"\n for i in range(rep):\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model_2(best_params_montaez_2)\n t_star = permuted_deepcombi_method(model, h5py_data[str(i)][:], fm_3d[str(i)][:], labels[str(i)], labels_cat[str(i)], n_permutations, alpha_sig_toy, filter_window_size, top_k, mode='all' )\n ground_truth = np.zeros((1,n_total_snps),dtype=bool)\n ground_truth[:,5000:5020] = True\n tpr, _, fwer, precision = compute_metrics(pvalues_per_run_rpvt[i], ground_truth, t_star) \n tpr_permuted += tpr\n fwer_permuted += fwer\n precision_permuted += precision\n tpr_permuted/=rep\n fwer_permuted/=rep\n precision_permuted/=rep\n \"\"\"\n\n # RPVT\n\n res_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(\n pvalues_per_run_rpvt, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_rpvt, _, fwer_rpvt, precision_rpvt = res_rpvt.T\n\n # Plot\n fig, axes = plt.subplots(2)\n fig.set_size_inches(18.5, 10.5)\n ax1, ax2 = axes\n\n ax1.set_ylim(0, 0.45)\n ax1.set_xlim(0, 0.1)\n\n ax1.set_ylabel('TPR')\n ax1.set_xlabel('FWER')\n ax1.plot(fwer_combi, tpr_combi, '-o',\n label='Combi')\n ax1.plot(fwer_rpvt, tpr_rpvt, '-o',\n label='RPVT')\n #ax1.plot(fwer_permuted, tpr_permuted, '-x',\n # label='COMBI & permuted threshold - ttbr={}'.format(ttbr))\n\n ax2.set_ylabel('Precision')\n ax2.set_xlabel('TPR')\n ax2.plot(tpr_combi, precision_combi, '-o',\n label='Combi')\n ax2.plot(tpr_rpvt, precision_rpvt, '-o',\n label='RPVT')\n #ax2.plot(tpr_permuted, precision_permuted, '-x',\n # label='COMBI & permuted threshold - ttbr={}'.format(ttbr))\n\n # Save results\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-tpr-{}'.format(ttbr)), tpr_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-fwer-{}'.format(ttbr)), fwer_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'combi-precision-{}'.format(ttbr)), precision_combi)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-tpr-pt{}'.format(ttbr)), tpr_permuted)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-fwer-pt{}'.format(ttbr)), fwer_permuted)\n np.save(os.path.join(NUMPY_ARRAYS, 'permuted-avg-precision-pt{}'.format(ttbr)), precision_permuted)\n\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-tpr-{}'.format(ttbr)), tpr_rpvt)\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-fwer-{}'.format(ttbr)), fwer_rpvt)\n np.save(os.path.join(NUMPY_ARRAYS, 'rpvt-precision-{}'.format(ttbr)), precision_rpvt)\n\n # CHALLENGER\n for i, window in enumerate(window_lengths):\n pvalues_challenger = pvalues_per_run_dense[:, i]\n\n res_dense = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(\n pvalues_challenger, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_dense, _, fwer_dense, precision_dense = res_dense.T\n np.save(os.path.join(NUMPY_ARRAYS, 'tpr-{}-{}'.format(window, ttbr)), tpr_dense)\n np.save(os.path.join(NUMPY_ARRAYS, 'fwer-{}-{}'.format(window, ttbr)), fwer_dense)\n np.save(os.path.join(NUMPY_ARRAYS, 'precision-{}-{}'.format(window, ttbr)), precision_dense)\n assert fwer_combi.max() <= 1 and fwer_combi.min() >= 0\n ax1.plot(fwer_dense, tpr_dense, '-x', label='DeepCOMBI')\n ax2.plot(tpr_dense, precision_dense, '-x', label='DeepCOMBI')\n\n ax1.legend()\n ax2.legend()\n fig.savefig(\n os.path.join(IMG_DIR, 'tpr_fwer_montaez_combi_newsettings.png'.format(zeros_index.sum())),\n dpi=300)",
"def painting_matching_ml(imgs, db_imgs, method_list, text_masks, author_text, gt_text, metrics, weights, splits=30, max_rank=10):\n descriptor_extractors = [get_descriptor_extractor(method_name) for method_name in method_list]\n tmp_img_format = []\n tmp_mask_format = []\n tmp_text_format = []\n for i in range(len(imgs)):\n for j in range(len(imgs[i])):\n tmp_img_format.append(imgs[i][j])\n tmp_mask_format.append(text_masks[i][j])\n tmp_text_format.append(author_text[i][j])\n\n #db_imgs = [img[0] for img in db_imgs]\n db_img_splits = [i*len(db_imgs)//splits for i in range(splits-1)]\n \n scores = []\n query_descriptors = extract_descriptors(tmp_img_format, descriptor_extractors, method_list, tmp_text_format, tmp_mask_format) \n #np.array([extract_descriptors(img, matching_methods, mask) for img, mask in zip(tmp_img_format, tmp_mask_format)])\n print(\"Starting db extraction + matching\")\n for split in tqdm(range(splits-2)):\n db_descriptors = extract_descriptors(db_imgs[db_img_splits[split]:db_img_splits[split+1]], descriptor_extractors, method_list, gt_text[db_img_splits[split]:db_img_splits[split+1]], None) #np.array([mrhm(db_img) for db_img in db_imgs[db_img_splits[split]:db_img_splits[split+1]]])\n scores.append(compare_descriptors(query_descriptors, db_descriptors, metrics, method_list, weights))\n # compare_descriptors(query_descriptors, db_descriptors, descriptor_comp_methods, descriptor_names, weights)\n db_descriptors = extract_descriptors(db_imgs[db_img_splits[-1]:], descriptor_extractors, method_list, gt_text[db_img_splits[-1]:], None)\n scores.append(compare_descriptors(query_descriptors, db_descriptors, metrics, method_list, weights))\n \n # concatenate all the results\n scores = np.concatenate(scores, 1)\n \n top_k_matches = np.argpartition(scores, list(range(max_rank)))[:, :max_rank]\n return top_k_matches",
"def evaluate(probs, y_test, output_folder, file_prefix='test', model_names=None):\n colours = ['b', 'g', 'm', 'c', 'y', 'r', 'k']\n\n if not os.path.isdir(output_folder):\n os.makedirs(output_folder)\n test_log = open(output_folder + '/' + file_prefix + '.log', 'w+')\n\n fprs, tprs, aucs = [], [], []\n for prob, model_name in zip(probs, model_names):\n test_log.write(model_name + \"\\n\\n\")\n pred = prob.argmax(axis=1)\n test_log.write(str(classification_report(y_test, pred)) + '\\n')\n test_log.write('\\n' + ' Predicted' + '\\n')\n test_log.write(str(confusion_matrix(y_test, pred)) + '\\n')\n\n fpr, tpr, thr = roc_curve(y_test, prob[:, 1])\n ## find best threshold : http://www.medicalbiostatistics.com/roccurve.pdf\n dist = np.sqrt((1. - tpr) ** 2 + (fpr) ** 2)\n best_thr = thr[np.argmin(dist)]\n best_thr_pred = (prob[:,1] > best_thr) * 1\n\n test_log.write('\\n' + \"Accuracy : \" + str((accuracy_score(y_test, pred))) + '\\n')\n test_log.write(\"F1 score : \" + str(f1_score(y_test, pred)) + '\\n')\n test_log.write(\"F1 score (thrs : {:.3f}) : \".format(best_thr) + str(f1_score(y_test, best_thr_pred)) + '\\n')\n test_log.write(\"Recall : \" + str(recall_score(y_test, pred)) + '\\n')\n test_log.write(\"Precision : \" + str(precision_score(y_test, pred)) + '\\n\\n')\n\n roc_auc = auc(fpr, tpr)\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(roc_auc)\n\n if len(probs) > 1:\n model_names.extend(['mean', 'geom_mean'])\n test_log.write(\"Ensemble (mean)\\n\\n\")\n prob = (np.array(probs).sum(axis=0) / 2)\n pred = prob.argmax(axis=1)\n test_log.write(str(classification_report(y_test, pred)) + '\\n')\n test_log.write('\\n' + ' Predicted' + '\\n')\n test_log.write(str(confusion_matrix(y_test, pred)) + '\\n')\n\n test_log.write('\\n' + \"Accuracy : \" + str((accuracy_score(y_test, pred))) + '\\n')\n test_log.write(\"F1 score : \" + str(f1_score(y_test, pred)) + '\\n')\n test_log.write(\"Recall : \" + str(recall_score(y_test, pred)) + '\\n')\n test_log.write(\"Precision : \" + str(precision_score(y_test, pred)) + '\\n\\n')\n\n fpr, tpr, _ = roc_curve(y_test, prob[:, 1])\n roc_auc = auc(fpr, tpr)\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(roc_auc)\n\n test_log.write(\"Ensemble (geom. mean)\\n\\n\")\n prob = (np.array(probs).prod(axis=0) / np.array(probs).prod(axis=0).sum(axis=1)[:, np.newaxis])\n pred = prob.argmax(axis=1)\n test_log.write(str(classification_report(y_test, pred)) + '\\n')\n test_log.write('\\n' + ' Predicted' + '\\n')\n test_log.write(str(confusion_matrix(y_test, pred)) + '\\n')\n\n test_log.write('\\n' + \"Accuracy : \" + str((accuracy_score(y_test, pred))) + '\\n')\n test_log.write(\"F1 score : \" + str(f1_score(y_test, pred)) + '\\n')\n test_log.write(\"Recall : \" + str(recall_score(y_test, pred)) + '\\n')\n test_log.write(\"Precision : \" + str(precision_score(y_test, pred)) + '\\n\\n')\n\n fpr, tpr, _ = roc_curve(y_test, prob[:, 1])\n roc_auc = auc(fpr, tpr)\n fprs.append(fpr)\n tprs.append(tpr)\n aucs.append(roc_auc)\n\n #plt.figure(figsize=(15, 15))\n for fpr, tpr, roc_auc, col, name in zip(fprs, tprs, aucs, colours, model_names):\n plt.plot(fpr, tpr, col, label='[%s] AUC = %0.5f' % (name, roc_auc))\n\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], 'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.savefig(output_folder + '/' + file_prefix + '_auc.png')\n plt.close()\n\n test_log.close()",
"def test_call_open_reference_with_match_usearch61(self):\r\n\r\n app = Usearch61ReferenceOtuPicker(\r\n params={'save_intermediate_files': False,\r\n 'output_dir':\r\n self.output_dir,\r\n 'remove_usearch_logs': True,\r\n 'suppress_new_clusters':\r\n False\r\n })\r\n\r\n obs_clusters, failures = app(self.tmp_seq_filepath_97perc_id_rc,\r\n refseqs_fp=self.tmp_seqs_rc_single_seq)\r\n\r\n # Randomly selected match is used for equivalent matches, so need to\r\n # test for results without order affecting output\r\n expected_clusters = {'denovo0': ['usearch_ecoli_seq',\r\n 'usearch_ecoli_seq_1bp_change'],\r\n 'usearch_ecoli_seq_2bp_change_rc':\r\n ['usearch_ecoli_seq_2bp_change_rc']}\r\n\r\n for result in obs_clusters:\r\n for cluster in obs_clusters[result]:\r\n self.assertTrue(cluster in expected_clusters[result])\r\n\r\n expected_failures = []\r\n self.assertEqual(failures, expected_failures)",
"def test_model(self, model, dataloader, test_loss_function):\n test_loss = 0.0\n test_correct = 0\n for images, labels in dataloader:\n images, labels = images.to(device), labels.to(device)\n\n output = model(images)\n loss = test_loss_function(output, labels)\n test_loss += loss.item() * images.size(0)\n scores, predictions = torch.max(output.data, 1)\n test_correct += (predictions == labels).sum().item()\n\n return test_loss, test_correct",
"def test_reviewer_matching(resident_names, hospital_names, capacities, seed):\n\n _, _, match = _make_match(resident_names, hospital_names, capacities, seed)\n\n match.solve()\n match.reviewers[0].matching.append(Player(name=\"foo\", pref_names=[]))\n\n with pytest.raises(Exception):\n match._check_reviewer_matching()",
"def testOMW():\n deleteMatches()\n deletePlayers()\n registerPlayer(\"Pikachu\")\n registerPlayer(\"Charmander\")\n registerPlayer(\"Bulbasaur\")\n registerPlayer(\"Squirtle\")\n standings = playerStandings()\n [id1, id2, id3, id4] = [row[0] for row in standings]\n reportMatch(id2, id1)\n reportMatch(id4, id3)\n reportMatch(id2, id4, True)\n reportMatch(id1, id3)\n standings = playerStandings()\n if not (standings[2][0]==id4 and standings[2][5]==0 and\n standings[3][0]==id2 and standings[3][5]==2):\n raise ValueError(\n \"OMWs are not tallied and accounted for correctly.\"\n )\n\n print \"5. OMWs are tallied and accounted for correctly.\"",
"def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))",
"def main(args):\n \n ## Load & Preprocess data \n if args.data_name == 'amsterdam': \n file_name = '../data/amsterdam/test_longitudinal_data.csv'\n ori_data = data_preprocess(file_name, args.max_seq_len)\n \n # Divide the data into training and testing\n divided_data, _ = data_division(ori_data, seed = args.seed, divide_rates = [args.train_rate, 1-args.train_rate])\n \n train_data = np.asarray(divided_data[0])\n test_data = np.asarray(divided_data[1])\n\n print('Finish data loading: ' + str(args.data_name)) \n \n ## Run hider algorithm\n if args.hider_model == 'timegan':\n generated_data = timegan.timegan(train_data)\n elif args.hider_model == 'add_noise':\n generated_data = add_noise.add_noise(train_data, args.noise_size) \n print('Finish hider algorithm training') \n \n ## Define enlarge data and its labels\n enlarge_data = np.concatenate((train_data, test_data), axis = 0)\n enlarge_data_label = np.concatenate((np.ones([train_data.shape[0],]), np.zeros([test_data.shape[0],])), axis = 0)\n \n # Mix the order\n idx = np.random.permutation(enlarge_data.shape[0])\n enlarge_data = enlarge_data[idx]\n enlarge_data_label = enlarge_data_label[idx]\n \n ## Run seeker algorithm\n reidentified_data = knn_seeker(generated_data, enlarge_data)\n \n print('Finish seeker algorithm training') \n \n ## Evaluate the performance\n # 1. Feature prediction\n feat_idx = np.random.permutation(train_data.shape[2])[:args.feature_prediction_no]\n ori_feat_pred_perf = feature_prediction(train_data, test_data, feat_idx)\n new_feat_pred_perf = feature_prediction(generated_data, test_data, feat_idx)\n \n feat_pred = [ori_feat_pred_perf, new_feat_pred_perf]\n \n print('Feature prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_feat_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_feat_pred_perf, 4)))\n \n # 2. One step ahead prediction\n ori_step_ahead_pred_perf = one_step_ahead_prediction(train_data, test_data)\n new_step_ahead_pred_perf = one_step_ahead_prediction(generated_data, test_data)\n \n step_ahead_pred = [ori_step_ahead_pred_perf, new_step_ahead_pred_perf]\n \n print('One step ahead prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_step_ahead_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_step_ahead_pred_perf, 4)))\n \n # 3. Reidentification score\n reidentification_score = reidentify_score(enlarge_data_label, reidentified_data)\n \n print('Reidentification score: ' + str(np.round(reidentification_score, 4)))\n \n shutil.rmtree('tmp')\n \n return feat_pred, step_ahead_pred, reidentification_score",
"def read_data_split_and_search():\n\n\n\n dataReader = Movielens10MReader()\n dataset = dataReader.load_data()\n\n URM_train, URM_test = split_train_in_two_percentage_global_sample(dataset.get_URM_all(), train_percentage = 0.80)\n URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80)\n\n output_folder_path = \"result_experiments/\"\n\n\n # If directory does not exist, create\n if not os.path.exists(output_folder_path):\n os.makedirs(output_folder_path)\n\n\n\n\n\n\n\n collaborative_algorithm_list = [\n Random,\n TopPop,\n P3alphaRecommender,\n RP3betaRecommender,\n ItemKNNCFRecommender,\n UserKNNCFRecommender,\n MatrixFactorization_BPR_Cython,\n MatrixFactorization_FunkSVD_Cython,\n PureSVDRecommender,\n SLIM_BPR_Cython,\n SLIMElasticNetRecommender\n ]\n\n\n\n\n from Base.Evaluation.Evaluator import EvaluatorHoldout\n\n evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[5])\n evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[5, 10])\n\n\n runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,\n URM_train = URM_train,\n metric_to_optimize = \"MAP\",\n n_cases = 10,\n evaluator_validation_earlystopping = evaluator_validation,\n evaluator_validation = evaluator_validation,\n evaluator_test = evaluator_test,\n output_folder_path = output_folder_path,\n similarity_type_list = [\"cosine\"],\n parallelizeKNN = False)\n\n\n\n\n\n pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)\n pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)\n\n #\n #\n # for recommender_class in collaborative_algorithm_list:\n #\n # try:\n #\n # runParameterSearch_Collaborative_partial(recommender_class)\n #\n # except Exception as e:\n #\n # print(\"On recommender {} Exception {}\".format(recommender_class, str(e)))\n # traceback.print_exc()\n #"
] | [
"0.6117741",
"0.54087925",
"0.54066885",
"0.5242778",
"0.5207955",
"0.5201614",
"0.519627",
"0.5183446",
"0.5078315",
"0.50693494",
"0.5066249",
"0.503917",
"0.50329924",
"0.502089",
"0.5012984",
"0.50017047",
"0.49997708",
"0.49859002",
"0.49853367",
"0.49727893",
"0.49616915",
"0.49471214",
"0.4946759",
"0.49364844",
"0.49341378",
"0.49253732",
"0.49154043",
"0.490719",
"0.49058038",
"0.49002922"
] | 0.6756479 | 0 |
An alias for `dual_modality_orh`, kept for backward compatibility. | def two_treatment_orh(*args, **kwargs):
return dual_modality_orh(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_modality(self):\n return self._modality",
"def change_modality(self, new_modality):\n raise NotImplementedError",
"def modal(image, selem, out=None, mask=None, shift_x=False, shift_y=False):\n\n return _apply(_crank8.modal, _crank16.modal, image, selem, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)",
"def simulate_dual_modality(disease,\n modality_0_auc,\n modality_1_auc,\n structure,\n num_readers,\n b=1,\n rng=np.random):\n mu = auc_to_mu(modality_0_auc)\n delta_mu = auc_to_mu(modality_1_auc) - mu\n return simulate_dual_modality_from_mu(\n disease,\n mu,\n delta_mu,\n structure=structure,\n num_readers=num_readers,\n b=b,\n rng=rng)",
"def support_inverse(rho):\n return LA.pinv(rho)",
"def has_modality(self):\n return self._modality is not None",
"def Rotation_HOR_EQJ(time, observer):\n hor_eqd = Rotation_HOR_EQD(time, observer)\n eqd_eqj = Rotation_EQD_EQJ(time)\n return CombineRotation(hor_eqd, eqd_eqj)",
"def _get_modality(self, a_feats, a_toks1, a_toks2):\n mod1 = self._get_arg_modality(a_toks1)\n mod2 = self._get_arg_modality(a_toks2)\n joint_mod = [i * j for i in mod1 for j in mod2]\n # add modality features\n a_feats[\"Mod1-\" + ''.join(str(i) for i in mod1)] = 1.\n a_feats[\"Mod2-\" + ''.join(str(i) for i in mod2)] = 1.\n a_feats[\"JointMod-\" + ''.join(str(i) for i in joint_mod)] = 1.",
"def Rotation_HOR_EQD(time, observer):\n rot = Rotation_EQD_HOR(time, observer)\n return InverseRotation(rot)",
"def __or__(self, other):\n return self.fam.c_binop('or', self, other)",
"def Rotation_EQJ_HOR(time, observer):\n rot = Rotation_HOR_EQJ(time, observer)\n return InverseRotation(rot)",
"def __ixor__(self, y):\n if is_tensor(y) or isinstance(y, int):\n if self.rank == 0:\n self.share ^= y\n elif isinstance(y, BinarySharedTensor):\n self.share ^= y.share\n else:\n raise TypeError(\"Cannot XOR %s with %s.\" % (type(y), type(self)))\n return self",
"def Xor(*args, **kwargs):\n return _gdi_.Region_Xor(*args, **kwargs)",
"def __ior__(self, y):\n xor_result = self ^ y\n return self.__iand__(y).__ixor__(xor_result)",
"def getModality(self, resource):\n\n resource = self.parseUrl(resource, 'modalities')\n\n res = self.getRequest(resource)\n return vsdModels.Modality(**res)",
"def __or__(self, y):\n return self.__and__(y) ^ self ^ y",
"def gen_dual_func(self):\n if 0 in self.sig:\n # We are degenerate, use the right complement\n return self.right_complement_func\n else:\n Iinv = self.pseudoScalar.inv().value\n gmt_func = self.gmt_func\n @numba.njit\n def dual_func(Xval):\n return gmt_func(Xval, Iinv)\n return dual_func",
"def xor(self, *args):\n return Xor(self, *args)",
"def JoliModule( L, h, parity = 'odd', uterm = None):\n return JoliModule_class( L, h, parity = parity, uterm = uterm)",
"def dual_modality_orh(disease,\n reader_scores,\n fom_fn,\n coverage=0.95,\n margin=0,\n sample_weight=None,\n verbose=True):\n if margin < 0:\n raise ValueError('margin parameter should be nonnegative.')\n\n num_cases, num_readers, num_modalities = reader_scores.shape\n if num_modalities != 2:\n raise ValueError('Only two modalities are supported.')\n\n if sample_weight is not None:\n if len(sample_weight) != num_cases:\n raise ValueError('Length of weights do not match cases.')\n\n if len(disease) != num_cases:\n raise ValueError(\n 'disease, model_score and reader_scores must have the same size '\n 'in the first dimension.')\n\n if 'sample_weight' not in inspect.signature(fom_fn).parameters.keys():\n raise ValueError(f'sample_weight is given but no such argument is supported'\n ' by the given figure-of-merit function {fom_fn.__name__}')\n reader_modality_foms = np.zeros((num_readers, 2), dtype=np.float32)\n for reader_idx in range(num_readers):\n for modality_idx in range(2):\n if sample_weight is not None:\n fom = fom_fn(disease, reader_scores[:, reader_idx, modality_idx],\n sample_weight=sample_weight)\n else:\n fom = fom_fn(disease, reader_scores[:, reader_idx, modality_idx] )\n reader_modality_foms[reader_idx, modality_idx] = fom\n\n reader_foms = np.mean(reader_modality_foms, axis=1)\n modality_foms = np.mean(reader_modality_foms, axis=0)\n average_fom = np.mean(modality_foms)\n\n assert len(reader_foms) == num_readers\n assert len(modality_foms) == 2\n\n # mstr = mean squared reader/modality difference; equation 10.43\n mstr = 0.0\n for reader_idx in range(num_readers):\n for modality_idx in range(2):\n summand = reader_modality_foms[reader_idx, modality_idx]\n summand -= modality_foms[modality_idx]\n summand -= reader_foms[reader_idx]\n summand += average_fom\n mstr += summand**2\n mstr /= num_readers - 1\n\n # Estimate covariance terms according to Equation 10.31\n covmat, indices = _jackknife_covariance_dual_modality(disease, reader_scores,\n fom_fn, sample_weight)\n cov2_samples = []\n cov3_samples = []\n for row_idx in range(2 * num_readers):\n modality, reader = indices[row_idx]\n for col_idx in range(row_idx + 1):\n modality_prime, reader_prime = indices[col_idx]\n if reader != reader_prime:\n if modality == modality_prime:\n cov2_samples.append(covmat[row_idx, col_idx])\n else:\n cov3_samples.append(covmat[row_idx, col_idx])\n cov2 = np.mean(cov2_samples)\n cov3 = np.mean(cov3_samples)\n\n if verbose:\n print('mstr', mstr)\n print('cov2 * 10^5', cov2 * 1e5)\n print('cov3 * 10^5', cov3 * 1e5)\n\n observed_effect_size = modality_foms[1] - modality_foms[0]\n\n # Equation 10.45\n dof = (mstr + max(num_readers * (cov2 - cov3), 0))**2\n dof /= (mstr**2) / (num_readers - 1)\n\n # Equation 10.48\n se = np.sqrt(2 * (mstr + num_readers * max(cov2 - cov3, 0)) / num_readers)\n\n return _test_result(\n effect=observed_effect_size,\n margin=margin,\n se=se,\n dof=dof,\n coverage=coverage,\n effect_size_constituents=EffectSizeConstituents(modality_foms=modality_foms))",
"def func(rho, h0, c_ops, l_ops):\n rhs = -1j * commutator(h0, rho)\n\n for i in range(len(c_ops)):\n c_op = c_ops[i]\n l_op = l_ops[i]\n rhs -= commutator(c_op, l_op.dot(rho) - rho.dot(dag(l_op)))\n return rhs",
"def or_(*args, **kwargs):\n ...",
"def DOR(self):\n a, c, d, b = self.to_ccw()\n ad, bc = a * d, b * c\n return _div(ad, bc)",
"def EOR(self, value):\n result = self.reg.A ^ value\n self.reg.N = result >> 7\n self.reg.Z = result == 0\n self.reg.A = result",
"def dual(self):\n letter = self.letter()\n # the self-dual cases\n if letter != 'BC' and letter[0] in ['B','C']:\n if letter == 'BB': letter = 'CC'\n elif letter == 'CC': letter = 'BB'\n elif letter[0] == 'B': letter = 'C' + letter[1:]\n elif letter[0] == 'C': letter = 'B' + letter[1:]\n rank = self._rank\n if self.is_affine():\n rank -= 1\n twist = self._twist\n return QuiverMutationType(letter,rank,twist)\n # the cases F and G have non-trivial duality in some cases\n elif letter in ['F','G']:\n if self.is_finite(): return self\n elif self.is_affine():\n rank = self._rank - 1\n twist = - self._twist\n elif self.is_elliptic():\n twist = self._twist\n rank = self._rank - 2\n if letter == 'F':\n if self._twist == [2,2]:\n twist == [1,1]\n if self._twist == [1,1]:\n twist == [2,2]\n if letter == 'G':\n if self._twist == [3,3]:\n twist = [1,1]\n elif self._twist == [1,1]:\n twist = [3,3]\n else: rank = self._rank\n return QuiverMutationType(letter,rank,twist)\n else:\n return self",
"def homothick():\n return se2hmt(binary([[1,1,1],\n [0,0,0],\n [0,0,0]]),\n binary([[0,0,0],\n [0,1,0],\n [1,1,1]]))",
"def _or(self, _or):\n\n self.__or = _or",
"def _or(self, _or):\n\n self.__or = _or",
"def _or(self, _or):\n\n self.__or = _or",
"def _or(self, _or):\n\n self.__or = _or"
] | [
"0.5828373",
"0.5521631",
"0.5268918",
"0.5122006",
"0.50041944",
"0.49817193",
"0.49683288",
"0.49280772",
"0.48526505",
"0.48431662",
"0.48264137",
"0.48069292",
"0.4785869",
"0.47820333",
"0.47731036",
"0.47571263",
"0.4755001",
"0.47361046",
"0.47241923",
"0.47087532",
"0.46980837",
"0.46826568",
"0.4650154",
"0.46390954",
"0.46284604",
"0.45968413",
"0.45739806",
"0.45739806",
"0.45739806",
"0.45739806"
] | 0.75222385 | 0 |
Convert python to JSON by replacing single quotes with double quotes and stripping trailing commas Note that this checker might be oversensitive if additional JSON errors with the nested dictionary Used before SafeDictHook to check for redundant keys. We use a placeholder in block text because JSON cannot process it and the point of this function is to use SafeDictHook to prevent redundant keys. | def jsonify(text):
# remove comments because they might corrupt the JSON
re_block_comments = re.compile(r'([\"]{3}.*?[\"]{3})',flags=re.M+re.DOTALL)
text = re_block_comments.sub('"REMOVED_BLOCK_COMMENT"',text)
# note that this fails if you use hashes inside of dictionary values
re_comments = re.compile(r'(#.*?)\n',flags=re.M+re.DOTALL)
text = re_comments.sub('',text)
# strip trailing commas because they violate JSON rules
text = re.sub(r",[ \t\r\n]*([}\]])",r"\1",text.replace("'","\""))
# fix the case on all booleans
text = re.sub("True","true",text)
text = re.sub("False","false",text)
text = re.sub("None","null",text)
# remove whitespace
re_whitespace = re.compile(r'\n\s*\n',flags=re.M)
text = re_whitespace.sub('\n',text)
return text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_json_quotes(json_data):\n lines = []\n for line in json_data.splitlines():\n if ':' in line:\n key, value = line.split(':', maxsplit=1)\n value = value.strip()\n lines.append(key + ': ' + value.strip('\",') + (',' if value.endswith(',') else ''))\n else:\n lines.append(' ' * (len(line) - len(line.lstrip(' '))) + line.strip('\" '))\n return '\\n'.join(lines)",
"def clean_qstat_json(stream):\n string_entry_re = re.compile(r'^\\s*\"(?P<key>.+)\":\"(?P<value>.+)\"(?P<comma>,?)$')\n\n lines = []\n\n for line in stream.splitlines():\n match = string_entry_re.match(line)\n if match is not None:\n fixed_value = json.dumps(match.group('value'))\n line = f'\"{match.group(\"key\")}\":{fixed_value}{match.group(\"comma\")}'\n\n lines.append(line)\n\n return json.loads(''.join(lines))",
"def safe_json(value):\n return json.dumps(value).replace('</', '<\\\\/') # Fix injection of closing markup in strings",
"def stripper(data):\n\n new_data = {}\n for k, v in data.items():\n if isinstance(v, dict):\n v = stripper(v)\n if not v in (u'', None, {}):\n new_data[k] = v\n return new_data",
"def postprocess(self, json_string):\n is_compressing, is_hash, compressed, spaces = False, False, [], 0\n for row in json_string.split(\"\\n\"):\n if is_compressing:\n if (row[:spaces + 5] == \" \" * (spaces + 4) +\n (\"\\\"\" if is_hash else \"{\")):\n compressed.append(row.rstrip())\n elif (len(row) > spaces and row[:spaces] == \" \" * spaces and\n re.match(\"[\\]\\}],?\", row[spaces:].rstrip())):\n compressed.append(row.rstrip())\n is_compressing = False\n else:\n compressed[-1] += \" \" + row.strip()\n else:\n compressed.append(row.rstrip())\n if any(a in row for a in [\"edges\", \"nodes\"]):\n # Fix to handle issues that arise with empty lists\n if \"[]\" in row:\n continue\n spaces = sum(1 for _ in takewhile(str.isspace, row))\n is_compressing, is_hash = True, \"{\" in row\n return \"\\n\".join(compressed)",
"def json(value):\n uncleaned = jsonlib.dumps(value)\n clean = bleach.clean(uncleaned)\n\n try:\n jsonlib.loads(clean)\n except:\n # should never happen, but this is a last-line-of-defense check\n # to make sure this blob wont get eval'ed by the JS engine as \n # anything other than a JSON object\n raise ValueError('JSON contains a quote or escape sequence that was unable to be stripped')\n\n return mark_safe(clean)",
"def process_json(json_str):\n\tjson_str = json_str.replace(\"\\n\", \" \")\n\tjson_str = json_str.replace(\"\\t\", \" \")\n\t\n\twhile json_str.find(\" \") > -1:\n\t\tjson_str = json_str.replace(\" \", \" \")\n\t\n\treturn json_str",
"def json_escape(context, value):\n\n return json.dumps(value).strip('\"')",
"def python_filter(txt):\n\n indent_level = 0\n tmp = txt[:]\n i = 0\n while i < len(tmp):\n tok = tmp[i:i+2]\n if tok == \"{:\":\n indent_level += 1\n elif tok == \":}\":\n indent_level -= 1\n tabstr = \"\\n\" + \" \" * indent_level\n if tok == \"{:\" or tok == \":}\":\n tmp = tmp.replace(tok, tabstr, 1)\n i += 1\n # Strip superfluous blank lines.\n txt = \"\\n\".join([line for line in tmp.split(\"\\n\")\n if line.strip() != \"\"])\n return txt",
"def beautify_json(self) -> None:\n for letter in self.data:\n for category in self.data[letter]:\n self.data[letter][category] = str(self.data[letter][category.lower()])\n self.save()\n with open(dict_path, encoding='utf-8') as x:\n data = x.read()\n with open(dict_path, 'w', encoding='utf-8') as y:\n data2 = data.replace('\"[', '[').replace(']\"', ']').replace(\"'\", '\"')\n y.write(data2)",
"def removeJsonComment(jsonStr):\n pureJsonStr = jsonStr\n\n # whole line with #\n # # International\n pureJsonStr = re.sub(\"^\\s*#.*$\\n+\", \"\", pureJsonStr, flags=re.M)\n\n # whole line with //\n # // \"mode\": \"SINGLE\",\n pureJsonStr = re.sub(\"^\\s*//.*$\\n+\", \"\", pureJsonStr, flags=re.M)\n\n # line tail with #\n pureJsonStr = re.sub(\"\\s+#.*$\", \"\", pureJsonStr, flags=re.M)\n\n # line tail with //\n pureJsonStr = re.sub(\"\\s+//.*$\", \"\", pureJsonStr, flags=re.M)\n\n return pureJsonStr",
"def json(data, strip_str_floats=False):\n x = simplejson.dumps(data, separators=(',', ':'))\n if strip_str_floats:\n return mark_safe(FLOAT_AS_STRING_RE.sub(r\"\\1\", x))\n else:\n return mark_safe(x)",
"def json_str(item):\n\n if isinstance(item, dict):\n #return {json_str(key): json_str(value) for key, value in item.iteritems()}\n return dict((json_str(key), json_str(value)) for key, value in item.iteritems())\n elif isinstance(item, list):\n return [json_str(element) for element in item]\n elif isinstance(item, unicode):\n return item.encode('utf-8')\n else:\n return item",
"def removeQuotes(data):\n\tfor each in data:\n\t\tfor v in each.values():\n\t\t\tif not isinstance(v, list):\n\t\t\t\t# Not implemented because not true case\n\t\t\t\tpass\n\t\ttagValueData = dict(zip(each['k'], each['v']))\n\t\tfor tag, val in tagValueData.items():\n\t\t\tif str(tag).find('\"') != -1:\n\t\t\t\t_tag = str(tag).replace('\"', '')\n\t\t\t\teach['k'][each['k'].index(tag)] = _tag\n\t\t\tif str(val).find('\"') != -1:\n\t\t\t\t_val = str(val).replace('\"', '')\n\t\t\t\teach['v'][each['v'].index(val)] = _val\n\t\tyield each",
"def dict_to_minified_json_string(json_dict):\r\n return json.dumps(json_dict, separators=(',', ':')) + '\\n'",
"def parse_json(raw):\n return escape.recursive_unicode(escape.json_decode(raw)) if raw != None else None",
"def json_dumps(data, indent=2, indent_increment=None, toplevel=True, one_line_max_width=200, object_fields_sorting_key=None):\n\n def simple(d):\n r = True\n if isinstance(d, dict):\n r = not any(isinstance(v, (list, tuple, set, dict)) for v in d.values()) and len(d) < 17\n elif isinstance(d, (tuple, list)):\n r = not any(isinstance(v, (list, tuple, set, dict)) for v in d)\n return r\n\n def end(symbol, indent):\n if indent > indent_increment:\n r = \"{:{}s}{}\".format(\"\", indent - indent_increment, symbol)\n else:\n r = symbol\n return r\n\n def make_one_line(data):\n if isinstance(data, set):\n s = json.dumps(sorted(data, key=object_fields_sorting_key), ensure_ascii=False)\n elif isinstance(data, dict):\n s = \"{\"\n for no, k in enumerate(sorted(data, key=object_fields_sorting_key), start=1):\n comma = \", \" if no < len(data) else \"\"\n s += \"{}: {}{}\".format(json.dumps(k, ensure_ascii=False), json_dumps(data[k], indent=0, indent_increment=None, toplevel=False, object_fields_sorting_key=object_fields_sorting_key), comma)\n s += \"}\"\n else:\n s = json.dumps(data, sort_keys=True, ensure_ascii=False)\n return s\n\n def make_object(data):\n if toplevel:\n r = [\"{{{:<{}s}\\\"_\\\":\\\"-*- js-indent-level: {} -*-\\\",\".format(\"\", indent_increment - 1, indent_increment)]\n else:\n r = [\"{\"]\n for no, k in enumerate(sorted(data, key=object_fields_sorting_key), start=1):\n comma = \",\" if no < len(data) else \"\"\n r.append(\"{:{}s}{}: {}{}\".format(\"\", indent, json.dumps(k, ensure_ascii=False), json_dumps(data[k], indent + indent_increment, indent_increment, toplevel=False, object_fields_sorting_key=object_fields_sorting_key), comma))\n r.append(end(\"}\", indent))\n return r\n\n # --------------------------------------------------\n\n if indent_increment is None:\n indent_increment = indent\n if indent == 0 or simple(data):\n s = make_one_line(data)\n else:\n r = []\n if isinstance(data, dict):\n r.extend(make_object(data))\n elif isinstance(data, (tuple, list)):\n r.append(\"[\")\n for no, v in enumerate(data, start=1):\n comma = \",\" if no < len(data) else \"\"\n r.append(\"{:{}s}{}{}\".format(\"\", indent, json_dumps(v, indent + indent_increment, indent_increment, toplevel=False, object_fields_sorting_key=object_fields_sorting_key), comma))\n r.append(end(\"]\", indent))\n else:\n raise ValueError(\"Cannot serialize: {!r}\".format(data))\n s = \"\\n\".join(r)\n if \"\\n\" in s and len(s) < one_line_max_width:\n s = make_one_line(data)\n return s",
"def json_filter(val, indent=0):\n if val is None or isinstance(val, Undefined):\n return \"null\"\n return json.dumps(val, indent=indent, sort_keys=True)",
"def format_json(json_data):\n return json_data[json_data.index('{'):json_data.rfind('}')+1]",
"def quote_json(_json: dict) -> str:\n stringified = json.dumps(_json)\n return quote(stringified)",
"def prepare_value(self, value):\n return json.dumps(value, indent=4)",
"def ASTtoJSON(block):\n def prepare(block):\n \"\"\" Strips circular 'parent' references and trims empty\n block elements.\"\"\"\n if block.parent:\n block.parent = None\n if not block.__dict__['is_open'] is None:\n block.__dict__['open'] = block.is_open\n del(block.is_open)\n # trim empty elements...\n for attr in dir(block):\n if not callable(attr) and not attr.startswith(\"__\") and \\\n attr != \"makeNode\" and attr != \"pretty\":\n if block.__dict__[attr] in [\"\", [], None, {}]:\n del(block.__dict__[attr])\n if 'children' in block.__dict__ and len(block.children) > 0:\n for i, child in enumerate(block.children):\n block.children[i] = prepare(child)\n if 'inline_content' in block.__dict__ and \\\n len(block.inline_content) > 0:\n for i, child in enumerate(block.inline_content):\n block.inline_content[i] = prepare(child)\n if 'label' in block.__dict__ and len(block.label) > 0:\n for i, child in enumerate(block.label):\n block.label[i] = prepare(child)\n if 'c' in block.__dict__ and type(block.c) is list and \\\n len(block.c) > 0:\n for i, child in enumerate(block.c):\n block.c[i] = prepare(child)\n return block\n # sort_keys=True) # indent=4)\n return json.dumps(prepare(block), default=lambda o: o.__dict__)",
"def _sanitize(data_dict):\n return data_dict",
"def format_dicti(self, args):\n\n new_list = []\n new_list.append(args[0])\n\n try:\n my_dict = eval(args[1][ args[1].find('{') : args[1].find('}') + 1] )\n except Exception:\n my_dict = None\n\n if type(my_dict) is dict:\n\n new_str = args[1][ args[1].find('(') + 1 : args[1].find(')') ]\n new_list.append( ((new_str.split(\", \"))[0]).strip('\"'))\n new_list.append(my_dict)\n\n return new_list",
"def dict_to_beautified_json(d):\n return jsbeautifier.beautify(json.dumps(d))",
"def bbjson(s):\n return BunchDict(json.loads(unquote(s)))",
"def json_compact(obj) -> str:\n return json.dumps(obj, separators=(\",\", \":\"))",
"def sanitize(object: dict, regex: re.Pattern = (\n re.compile(r\"(?<!(?P<bound><)\\W)\\b(?P<word>\\w+)\\b(?(bound)(?!>)|)\")\n), placeholder: str = \"<obscured>\") -> dict:\n\n result = dict()\n\n # import json; print(json.dumps(object, indent=4))\n\n for key, value in object.items():\n\n if isinstance(value, dict):\n value = sanitize(value)\n\n if key in (\n \"enterprise_name\", \"email\", \"name\", \"name_normalized\",\n \"real_name\", \"real_name_normalized\", \"display_name\",\n \"display_name_normalized\", \"title\", \"phone\", \"skype\",\n \"first_name\", \"last_name\"\n ):\n value = placeholder if value else None\n\n if key.startswith((\"image\", \"status\")) or key == \"blocks\":\n continue # message blocks are too complex to sanitize, drop them\n\n if key in (\"topic\", \"purpose\"):\n if isinstance(value, dict):\n value.update(value=placeholder)\n elif isinstance(value, str):\n value = placeholder\n if key == \"previous_names\":\n value = [placeholder] * len(value)\n\n if key == \"text\":\n value = re.sub(regex, lambda match: (\n \"X\" * len(match[\"word\"])), value)\n\n if key in (\"files\", \"attachments\"):\n key, value = f\"{key}_count\", len(value)\n\n result[key] = value\n\n return result",
"def _Sanitize(data):\n if isinstance(data, collections.OrderedDict):\n return collections.OrderedDict([(str(k), _Sanitize(v))\n for k, v in data.items()])\n if isinstance(data, dict):\n return {str(k): _Sanitize(v) for k, v in data.items()}\n elif isinstance(data, list):\n return [_Sanitize(x) for x in data]\n elif data is None:\n return []\n elif isinstance(data, bool):\n return 1 if data else 0\n else:\n return data",
"def testPrettyPrintJSON(self):\n test_dict = {'test': [{'dict1': {'key1': 'val1'}, 'dict2': None}]}\n expected_string = ('{\\n \"test\": [\\n {\\n \"dict1\": {\\n'\n ' \"key1\": \"val1\"\\n }, \\n'\n ' \"dict2\": null\\n }\\n ]\\n}\\n')\n self.assertEqual(expected_string, utils.PrettyPrintJSON(test_dict))"
] | [
"0.6101548",
"0.60665554",
"0.6039736",
"0.5973466",
"0.5965454",
"0.59043264",
"0.56827843",
"0.56486374",
"0.5458422",
"0.5438886",
"0.5437395",
"0.539824",
"0.5383692",
"0.5371119",
"0.5345633",
"0.53277117",
"0.5322137",
"0.53118145",
"0.5297029",
"0.5265412",
"0.52639145",
"0.52392024",
"0.52348536",
"0.52152663",
"0.51928717",
"0.5173182",
"0.5158914",
"0.51538354",
"0.51489526",
"0.51433355"
] | 0.68492764 | 0 |
Expand all op nodes to the given basis. | def run(self, dag):
# Walk through the DAG and expand each non-basis node
for node in dag.gate_nodes():
if node.name in self.basis: # If already a base, ignore.
continue
# TODO: allow choosing other possible decompositions
decomposition_rules = node.op.decompositions()
if not decomposition_rules:
raise QiskitError("Cannot unroll the circuit to the given basis, %s. "
"The current node being expanded, %s, "
"is defined in terms of an invalid basis." %
(str(self.basis), node.op.name))
decomposition_dag = self.run(decomposition_rules[0]) # recursively unroll gates
dag.substitute_node_with_dag(node=node, input_dag=decomposition_dag)
return dag | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_expand(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n if op.input(\"Shape\"):\n sizes = g.get_node(op.input(\"Shape\")[0])\n else:\n sizes = op.attr(\"shape\")\n\n if isinstance(sizes, _expr.Expr):\n sizes = try_infer_value(sizes, parameters=g.get_params())[0]\n\n if isinstance(sizes, np.ndarray):\n sizes = sizes.tolist()\n\n out = _op.broadcast_to(x, sizes)\n g.add_node(op.output(\"Out\")[0], out)",
"def convert_expand_as(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n target_shape = op.attr(\"target_shape\")\n out = _op.broadcast_to(x, target_shape)\n g.add_node(op.output(\"Out\")[0], out)",
"def _expand_node(expand_n, base_cost, randomizer):\n\n for next_n, props in nb[expand_n].items():\n randomizer -= 1\n total_cost = props['weight'] + base_cost\n e_cost = (total_cost, props['weight'], randomizer)\n\n # Check for tree membership as this signifies a loop back to the tree\n if next_n not in scanned or e_cost < scanned[next_n] and not tree.has_node(next_n):\n heappush(queue, (e_cost[0], e_cost[1], e_cost[2], next_n))\n scanned[next_n] = e_cost\n p[next_n] = expand_n",
"def expand(self):\n self._express = self._express.expand()\n self._del_derived()\n return self",
"def expand(self):\n nodes = []\n\n for action in self.board.actions():\n # copy the current board\n board = copy.deepcopy(self.board)\n board.apply_action(action)\n\n nodes.append(Node(board, action, self.cost + 1, self))\n \n return nodes",
"def __opExpand1(self,that,op, out=None):\n A = self\n B = that if isinstance(that,Factor) else Factor([],that)\n vall = A.v | B.v\n axA = list(map(lambda x:A.v.index(x) if x in A.v else -1 ,vall))\n axB = list(map(lambda x:B.v.index(x) if x in B.v else -1 ,vall))\n if ( (not (out is None)) and (out.v == vall) ):\n f = out\n else:\n f = Factor(vall) # TODO: should also change \"out\" if specified!\n it = np.nditer([A.t, B.t, f.t], \n op_axes = [ axA, axB, None ], \n op_flags=[['readonly'], ['readonly'], ['writeonly']])\n for (i,j,k) in it:\n op(i,j,out=k)\n return f",
"def expand(self, policy):\n if self.children != {}: return\n actionWeights = policy(self.state)\n for action in actionWeights:\n succ = self.state.succ(self.state.player, action)\n self.children[action] = TreeNode(succ, actionWeights[action], self)",
"def test_expansion(self, n_wires, expected_names, expected_wires):\n\n shapes = expected_shapes(1, n_wires)\n weights = [np.random.random(shape) for shape in shapes]\n\n op = qml.CVNeuralNetLayers(*weights, wires=range(n_wires))\n tape = op.expand()\n\n i = 0\n for gate in tape.operations:\n if gate.name != \"Interferometer\":\n assert gate.name == expected_names[i]\n assert gate.wires.labels == tuple(expected_wires[i])\n i = i + 1\n else:\n for gate_inter in gate.expand().operations:\n assert gate_inter.name == expected_names[i]\n assert gate_inter.wires.labels == tuple(expected_wires[i])\n i = i + 1",
"def expand(self, graph):\n self.r.from_graph(graph)\n triples = self.r.reason()\n graph.add(*triples)",
"def expand(self, graph):\n owlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(graph)",
"def create_helper_expand_node(input_name, output_name, expand_shape):\n expand_node = onnx.helper.make_node(\n \"Expand\",\n inputs=[input_name, expand_shape],\n outputs=[output_name],\n name=output_name,\n )\n return [expand_node]",
"def __opExpand2(self,that,op, out=None):\n A = self\n B = that if isinstance(that,Factor) else Factor([],that)\n vall = A.v | B.v\n dA = list(map(lambda x:x.states if x in A.v else 1 ,vall))\n dB = list(map(lambda x:x.states if x in B.v else 1 ,vall))\n if ( (out is not None) and (out.v == vall) ):\n f = out # if out can be written to directly, do so\n else: \n f = Factor(vall) # otherwise, make storage for output function\n op( A.t.reshape(dA,order='A') , B.t.reshape(dB,order='A'), out=f.t ) # TODO: order=A necessary?\n if (out is not None and f is not out):\n out.__build(f.v,f.t) # if out requested but not used, write f's table into out\n return out\n return f",
"def recruit(self, nidxs_remain, oldlinks=None):\n\n steps = [{'nidxs': list(nidxs_remain), 'links': set()}]\n new_links = set()\n\n if oldlinks is None:\n oldlinks = set(self.link2featidx.keys())\n\n # all nodes associated to oldlinks\n nidxs = set([n for link in oldlinks if link in self.link2lidx for n in self.lidx2nidx[self.link2lidx[link]]])\n\n # neighbor nodes to eval/train in this expansion\n new_nidxs = nidxs & nidxs_remain\n if new_nidxs:\n # remaining node for next expansion\n nidxs_remain -= new_nidxs\n\n # find new links to expand feature set\n steps[0]['nidxs'] = list(new_nidxs)\n new_links = set([self.links[l] for n in nidxs for l in self.nidx2lidx[n]]) - oldlinks\n if new_links:\n steps[0]['links'] = new_links\n\n if nidxs_remain and new_links:\n return steps + self.recruit(nidxs_remain=nidxs_remain, oldlinks=oldlinks | new_links)\n\n return steps",
"def flatten(self):\n # get flattened circuit and corresponding expr_map\n cq_flat, expr_map = cirq.flatten(self)\n self.assign(cq_flat)\n if self.expr_map is not None:\n self._expr_map = quple.resolve_expression_map_conflicts(self.expr_map, expr_map)\n else:\n self._expr_map = expr_map",
"def expansion(self, actions):\n for action in actions: \n self.children[action[0]] = TreeNode()",
"def extend(self, mu):\n with self.logger.block('Extending the basis...'):\n U_h_mu = self.reductor.fom.solve(mu)\n self.fom_evaluations += 1\n self.new_reductor = deepcopy(self.reductor)\n try:\n self.new_reductor.extend_basis(U_h_mu)\n except ExtensionError:\n self.new_reductor = self.reductor\n self.new_rom = self.new_reductor.reduce()",
"def expand(node):\n if node.isTerminal():\n return node\n\n # Get the next unexplored state\n nextState = node.exploreChildNode()\n\n # If all states are already explored, recurse\n if nextState is not None:\n return nextState\n else:\n return expand(node.UCB1())",
"def expand(self, problem):\n return [self.child_node(problem, action)\n for action in problem.actions(self.state)]",
"def expand(self, problem):\n return [self.child_node(problem, action)\n for action in problem.actions(self.state)]",
"def SBMLTransforms_expandInitialAssignments(*args):\n return _libsbml.SBMLTransforms_expandInitialAssignments(*args)",
"def __expandNodes(self, node):\n for childNode in node.children():\n if childNode.expanded:\n idx = self.__bookmarksModel.nodeIndex(childNode)\n idx = self.__proxyModel.mapFromSource(idx)\n self.bookmarksTree.setExpanded(idx, True)\n self.__expandNodes(childNode)",
"def make_dag(self, expand=set()):\n G = nx.DiGraph()\n\n ## Inputs-to-Functions\n for f in self.functions:\n # Expand composed models\n if isinstance(f, FunctionModel) and (f.name in expand):\n G_ref = f.model.make_dag(expand=expand - {f})\n G_sub = nx.DiGraph()\n # Add nodes\n G_sub.add_node(f.name + \".var\")\n G_sub.add_node(f.name + \".out\")\n for g in f.model.functions:\n G_sub.add_node(f.name + \".\" + g.name)\n # Add node metadata\n nx.set_node_attributes(G_sub, f.name, \"parent\")\n\n # Add edges\n for u, v, d in G_ref.edges(data=True):\n # Add renamed edge\n if u == \"(var)\":\n G_sub.add_edge(f.name + \".var\", f.name + \".\" + v, **d)\n elif v == \"(out)\":\n G_sub.add_edge(f.name + \".\" + u, f.name + \".out\", **d)\n else:\n G_sub.add_edge(f.name + \".\" + u, f.name + \".\" + v, **d)\n\n # Compose the graphs\n G = nx.compose(G, G_sub)\n\n i_var = set(self.var).intersection(set(f.var))\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(\"(var)\", f.name + \".var\", label=s_var)\n else:\n G.add_edge(\"(var)\", f.name, label=s_var)\n\n ## Function-to-Function\n for i0 in range(len(self.functions)):\n for i1 in range(i0 + 1, len(self.functions)):\n f0 = self.functions[i0]\n f1 = self.functions[i1]\n i_var = set(f0.out).intersection(set(f1.var))\n\n ## If connected\n if len(i_var) > 0:\n s_var = \"{}\".format(i_var)\n ## Handle composed models\n if isinstance(f0, FunctionModel) and (f0.name in expand):\n name0 = f0.name + \".out\"\n else:\n name0 = f0.name\n if isinstance(f1, FunctionModel) and (f1.name in expand):\n name1 = f1.name + \".out\"\n else:\n name1 = f1.name\n\n G.add_edge(name0, name1, label=s_var)\n\n ## Functions-to-Outputs\n for f in self.functions:\n i_out = set(self.out).intersection(set(f.out))\n\n if len(i_out) > 0:\n s_out = \"{}\".format(i_out)\n ## Target composed model's out\n if isinstance(f, FunctionModel) and (f.name in expand):\n G.add_edge(f.name + \".out\", \"(out)\", label=s_out)\n ## An ordinary function\n else:\n G.add_edge(f.name, \"(out)\", label=s_out)\n\n # Add node metadata\n nx.set_node_attributes(G, {f.name: {\"parent\": self.name}})\n\n # Final metadata\n nx.set_node_attributes(G, {\"(var)\": {\"parent\": self.name}})\n nx.set_node_attributes(G, {\"(out)\": {\"parent\": self.name}})\n\n return G",
"def fL():\n for n in b.allNodes():\n n.autoplace()",
"def flatten(node: ir.Node) -> ir.Node:\n\n def visitor(node: ir.Node, args=None) -> ir.Node:\n if isinstance(node, ir.BinaryOp):\n\n # Flatten singleton BinaryOp\n if len(node.operand) == 1:\n return flatten(node.operand[0])\n\n # Flatten BinaryOp with reduction operators\n new_operator: List[str] = []\n new_operand: List[ir.Expr] = []\n for child_operator, child_operand in zip((None, *node.operator),\n node.operand):\n if child_operator is not None:\n new_operator.append(child_operator)\n # The first operator can always be flattened if two operations has the\n # same type.\n if child_operator in (None, '||', '&&', *'|&+*') and \\\n type(child_operand) is type(node):\n new_operator.extend(child_operand.operator)\n new_operand.extend(child_operand.operand)\n else:\n new_operand.append(child_operand)\n # At least 1 operand is flattened.\n if len(new_operand) > len(node.operand):\n return flatten(type(node)(operator=new_operator, operand=new_operand))\n\n # Flatten compound Operand\n if isinstance(node, ir.Operand):\n for attr in node.ATTRS:\n val = getattr(node, attr)\n if val is not None:\n if isinstance(val, ir.Node):\n return flatten(val)\n break\n else:\n raise util.InternalError('undefined Operand')\n\n # Flatten identity unary operators\n if isinstance(node, ir.Unary):\n minus_count = node.operator.count('-')\n if minus_count % 2 == 0:\n plus_count = node.operator.count('+')\n if plus_count + minus_count == len(node.operator):\n return flatten(node.operand)\n not_count = node.operator.count('!')\n if not_count % 2 == 0 and not_count == len(node.operator):\n return flatten(node.operand)\n\n # Flatten reduction functions\n if isinstance(node, ir.Call):\n operator = getattr(node, 'name')\n if operator in ir.REDUCTION_FUNCS:\n operands: List[ir.Expr] = []\n for operand in getattr(node, 'arg'):\n if (isinstance(operand, ir.Call) and\n getattr(operand, 'name') == operator):\n operands.extend(getattr(operand, 'arg'))\n else:\n operands.append(operand)\n if len(operands) > len(getattr(node, 'arg')):\n return flatten(ir.Call(name=operator, arg=operands))\n\n return node\n\n if not isinstance(node, ir.Node):\n return node\n\n return node.visit(visitor)",
"def expand(self):\n\t\tfor move in self.moves:\n\t\t\tm = self.Game.create_move(self.State, move.row, move.column, move.shift, False)\n\t\t\tchildstate = self.Game.apply_move(copy.deepcopy(self.State), m)\n\t\t\tchild = GameNode(self.Game, m, childstate, self.Game.get_moves(childstate), self)\n\t\t\tself.addChild(child)",
"def zzX_expand(*polys):\n f = polys[0]\n\n for g in polys[1:]:\n f = zzX_mul(f, g)\n\n return f",
"def reduce_basis(blst):\n if blst == []: # blst represents scalar\n blst_coef = [S.One]\n blst_expand = [[]]\n return blst_coef, blst_expand\n blst_expand = [blst]\n blst_coef = [S.One]\n blst_flg = [False]\n # reduce untill all blst revise flgs are True\n while not reduce(operator.and_, blst_flg):\n for i in range(len(blst_flg)):\n if not blst_flg[i]: # keep revising if revise flg is False\n tmp = MV.reduce_basis_loop(blst_expand[i])\n if isinstance(tmp, bool):\n blst_flg[i] = tmp # revision of blst_expand[i] complete\n elif len(tmp) == 3: # blst_expand[i] contracted\n blst_coef[i] = tmp[0] * blst_coef[i]\n blst_expand[i] = tmp[1]\n blst_flg[i] = tmp[2]\n else: # blst_expand[i] revised\n blst_coef[i] = -blst_coef[i]\n # if revision force one more pass in case revision\n # causes repeated index previous to revised pair of\n # indexes\n blst_flg[i] = False\n blst_expand[i] = tmp[3]\n blst_coef.append(-blst_coef[i] * tmp[0])\n blst_expand.append(tmp[1])\n blst_flg.append(tmp[2])\n new_blst_coef = []\n new_blst_expand = []\n for (coef, expand) in zip(blst_coef, blst_expand):\n if expand in new_blst_expand:\n i = new_blst_expand.index(expand)\n new_blst_coef[i] += coef\n else:\n new_blst_expand.append(expand)\n new_blst_coef.append(coef)\n return new_blst_coef, new_blst_expand",
"def expand(self, individual: Dict[str, Union[str, Dict[str, List[int]], Callable]]):\n genes = individual[\"genome\"]\n\n pattern = [\"<expr_0>\"] # starts the pattern as the root symbol\n\n current_index = {i: 0 for i in self.grammar.keys()} # initializes the indexes for each gene respective\n # to a non terminal in the grammar\n\n i = 0\n while i < len(pattern): # while we have not reached the end of the expansion\n key = pattern[i]\n\n if key in self.grammar.keys():\n current_option = genes[key][current_index[key]] # option set by the gene\n\n out = self.grammar[key][current_option] \n out = out.split(\" \")\n \n pattern = pattern[:i] + out + pattern[i + 1:] # inserts the expantion into the current pattern\n\n current_index[key] += 1 # sets the index to look for the next gene\n continue\n i += 1\n\n individual[\"fenotype\"] = eval(\"lambda X1, X2: \" + \" \".join(pattern)) # generates the function as a lambda function\n # the idea is to speed up the evaluation process\n # while still having the flexibility of the\n # eval function in python",
"def ExpandAll(self):\r\n\r\n if self._anchor:\r\n self.ExpandAllChildren(self._anchor)\r\n\r\n self._sendEvent = True\r\n self._dirty = True",
"def run(self, dag):\n if self._target_basis is None and self._target is None:\n return dag\n\n qarg_indices = {qubit: index for index, qubit in enumerate(dag.qubits)}\n\n # Names of instructions assumed to supported by any backend.\n if self._target is None:\n basic_instrs = [\"measure\", \"reset\", \"barrier\", \"snapshot\", \"delay\"]\n target_basis = set(self._target_basis)\n source_basis = set(self._extract_basis(dag))\n qargs_local_source_basis = {}\n else:\n basic_instrs = [\"barrier\", \"snapshot\"]\n target_basis = self._target.keys() - set(self._non_global_operations)\n source_basis, qargs_local_source_basis = self._extract_basis_target(dag, qarg_indices)\n\n target_basis = set(target_basis).union(basic_instrs)\n\n logger.info(\n \"Begin BasisTranslator from source basis %s to target basis %s.\",\n source_basis,\n target_basis,\n )\n\n # Search for a path from source to target basis.\n search_start_time = time.time()\n basis_transforms = _basis_search(self._equiv_lib, source_basis, target_basis)\n\n qarg_local_basis_transforms = {}\n for qarg, local_source_basis in qargs_local_source_basis.items():\n expanded_target = set(target_basis)\n # For any multiqubit operation that contains a subset of qubits that\n # has a non-local operation, include that non-local operation in the\n # search. This matches with the check we did above to include those\n # subset non-local operations in the check here.\n if len(qarg) > 1:\n for non_local_qarg, local_basis in self._qargs_with_non_global_operation.items():\n if qarg.issuperset(non_local_qarg):\n expanded_target |= local_basis\n else:\n expanded_target |= self._qargs_with_non_global_operation[tuple(qarg)]\n\n logger.info(\n \"Performing BasisTranslator search from source basis %s to target \"\n \"basis %s on qarg %s.\",\n local_source_basis,\n expanded_target,\n qarg,\n )\n local_basis_transforms = _basis_search(\n self._equiv_lib, local_source_basis, expanded_target\n )\n\n if local_basis_transforms is None:\n raise TranspilerError(\n \"Unable to translate the operations in the circuit: \"\n f\"{[x[0] for x in local_source_basis]} to the backend's (or manually \"\n f\"specified) target basis: {list(expanded_target)}. This likely means the \"\n \"target basis is not universal or there are additional equivalence rules \"\n \"needed in the EquivalenceLibrary being used. For more details on this \"\n \"error see: \"\n \"https://qiskit.org/documentation/stubs/qiskit.transpiler.passes.\"\n \"BasisTranslator.html#translation_errors\"\n )\n\n qarg_local_basis_transforms[qarg] = local_basis_transforms\n\n search_end_time = time.time()\n logger.info(\n \"Basis translation path search completed in %.3fs.\", search_end_time - search_start_time\n )\n\n if basis_transforms is None:\n raise TranspilerError(\n \"Unable to translate the operations in the circuit: \"\n f\"{[x[0] for x in source_basis]} to the backend's (or manually specified) target \"\n f\"basis: {list(target_basis)}. This likely means the target basis is not universal \"\n \"or there are additional equivalence rules needed in the EquivalenceLibrary being \"\n \"used. For more details on this error see: \"\n \"https://qiskit.org/documentation/stubs/qiskit.transpiler.passes.BasisTranslator.\"\n \"html#translation_errors\"\n )\n\n # Compose found path into a set of instruction substitution rules.\n\n compose_start_time = time.time()\n instr_map = _compose_transforms(basis_transforms, source_basis, dag)\n extra_instr_map = {\n qarg: _compose_transforms(transforms, qargs_local_source_basis[qarg], dag)\n for qarg, transforms in qarg_local_basis_transforms.items()\n }\n\n compose_end_time = time.time()\n logger.info(\n \"Basis translation paths composed in %.3fs.\", compose_end_time - compose_start_time\n )\n\n # Replace source instructions with target translations.\n\n replace_start_time = time.time()\n\n def apply_translation(dag, wire_map):\n dag_updated = False\n for node in dag.op_nodes():\n node_qargs = tuple(wire_map[bit] for bit in node.qargs)\n qubit_set = frozenset(node_qargs)\n if node.name in target_basis:\n if isinstance(node.op, ControlFlowOp):\n flow_blocks = []\n for block in node.op.blocks:\n dag_block = circuit_to_dag(block)\n dag_updated = apply_translation(\n dag_block,\n {\n inner: wire_map[outer]\n for inner, outer in zip(block.qubits, node.qargs)\n },\n )\n if dag_updated:\n flow_circ_block = dag_to_circuit(dag_block)\n else:\n flow_circ_block = block\n flow_blocks.append(flow_circ_block)\n node.op = node.op.replace_blocks(flow_blocks)\n continue\n if (\n node_qargs in self._qargs_with_non_global_operation\n and node.name in self._qargs_with_non_global_operation[node_qargs]\n ):\n continue\n\n if dag.has_calibration_for(node):\n continue\n if qubit_set in extra_instr_map:\n self._replace_node(dag, node, extra_instr_map[qubit_set])\n elif (node.op.name, node.op.num_qubits) in instr_map:\n self._replace_node(dag, node, instr_map)\n else:\n raise TranspilerError(f\"BasisTranslator did not map {node.name}.\")\n dag_updated = True\n return dag_updated\n\n apply_translation(dag, qarg_indices)\n replace_end_time = time.time()\n logger.info(\n \"Basis translation instructions replaced in %.3fs.\",\n replace_end_time - replace_start_time,\n )\n\n return dag"
] | [
"0.56806636",
"0.56788045",
"0.5609273",
"0.55485564",
"0.5548129",
"0.54040587",
"0.52648944",
"0.518253",
"0.51816136",
"0.5165354",
"0.51469094",
"0.5050234",
"0.5043392",
"0.5037604",
"0.49747393",
"0.49306676",
"0.49135447",
"0.4849872",
"0.4849872",
"0.48495495",
"0.48488018",
"0.48470798",
"0.48309085",
"0.4828204",
"0.48113358",
"0.48022115",
"0.47651997",
"0.47584695",
"0.47521135",
"0.4748734"
] | 0.60018677 | 0 |
Compile the AST into a bytecode | def ast_to_bytecode(ast):
bc = compile_ast(ast, ast.scope)
return bc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compile(cls, module_ast, filename):\n compiler = cls(filename)\n compiler.visit(module_ast)\n\n module_ops = [(SetLineno, 1)]\n extend_ops = module_ops.extend\n\n # Generate the startup code for the module\n for start in STARTUP:\n start_code = compile(start, filename, mode='exec')\n # Skip the SetLineo and ReturnValue codes\n extend_ops(Code.from_code(start_code).code[1:-2])\n\n # Add in the code ops for the module\n extend_ops(compiler.code_ops)\n\n # Generate the cleanup code for the module\n for end in CLEANUP:\n end_code = compile(end, filename, mode='exec')\n # Skip the SetLineo and ReturnValue codes\n extend_ops(Code.from_code(end_code).code[1:-2])\n \n # Add in the final return value ops\n extend_ops([\n (LOAD_CONST, None),\n (RETURN_VALUE, None),\n ])\n\n # Generate and return the module code object.\n mod_code = Code(\n module_ops, [], [], False, False, False, '', filename, 0, '',\n )\n return mod_code.to_code()",
"def compile_simple(py_ast, filename):\n code = compile(py_ast, filename, mode='eval')\n code = update_firstlineno(code, py_ast.lineno)\n bp_code = Code.from_code(code)\n replace_global_loads(bp_code.code)\n optimize_locals(bp_code.code)\n bp_code.newlocals = False\n return bp_code.to_code()",
"def compile(cls, module_ast, filename):\n\n # Protect against unicode filenames, which are incompatible\n # with code objects created via types.CodeType\n if isinstance(filename, unicode):\n filename = filename.encode(sys.getfilesystemencoding())\n\n # Generate the startup code for the module\n module_ops = [(SetLineno, 1)]\n for start in STARTUP:\n start_code = compile(start, filename, mode='exec')\n bp_code = Code.from_code(start_code)\n # Skip the SetLineo and ReturnValue codes\n module_ops.extend(bp_code.code[1:-2])\n\n # Add in the code ops for the module\n compiler = cls(filename)\n compiler.visit(module_ast)\n module_ops.extend(compiler.code_ops)\n\n # Generate the cleanup code for the module\n for end in CLEANUP:\n end_code = compile(end, filename, mode='exec')\n bp_code = Code.from_code(end_code)\n # Skip the SetLineo and ReturnValue codes\n module_ops.extend(bp_code.code[1:-2])\n\n # Add in the final return value ops\n module_ops.extend([\n (LOAD_CONST, None),\n (RETURN_VALUE, None),\n ])\n\n # Generate and return the module code object.\n mod_code = Code(\n module_ops, [], [], False, False, False, '', filename, 0, '',\n )\n return mod_code.to_code()",
"def code(self):\r\n if (os.path.exists(self._bytecode_path) and\r\n os.path.getmtime(self.full_path) <= os.path.getmtime(self._bytecode_path)):\r\n with open(self._bytecode_path, 'rb') as bytecode:\r\n return marshal.load(bytecode)\r\n else:\r\n with open(self.full_path, 'rb') as source:\r\n code = compile(source.read(), self.full_path, 'exec')\r\n with open(self._bytecode_path, 'wb') as bytecode:\r\n marshal.dump(code, bytecode)\r\n return code",
"def _compile(self, source: str, filename: str) -> CodeType:\n return compile(source, filename, \"exec\") # type: ignore",
"def compile(self, workdir):\n with open(workdir) as f:\n ast = self.parser.generate_ast(f.read())\n f.close()\n\n return None",
"def generate_bytecode(assembly: list, insert_vyper_signature: bool) -> bytes:\n return compile_ir.assembly_to_evm(assembly, insert_vyper_signature=insert_vyper_signature)[0]",
"def compile(cls, node, filename):\n compiler = cls(filename)\n compiler.visit(node)\n return compiler.stack.pop()",
"def _compile(self, filename, source):\n \n if source and source[-1] != '\\n':\n source = source + '\\n'\n code = __builtin__.compile(source, filename.cStr(), 'exec')\n\n # try to cache the compiled code\n pycFilename = Filename(filename)\n pycFilename.setExtension(pycExtension)\n try:\n f = open(pycFilename, 'wb')\n except IOError:\n pass\n else:\n f.write('\\0\\0\\0\\0')\n f.write(struct.pack('<I', self.timestamp))\n f.write(marshal.dumps(code))\n f.flush()\n f.seek(0, 0)\n f.write(imp.get_magic())\n f.close()\n\n return code",
"def __compile_ir(self):\n self.builder.ret_void()\n llvm_ir = str(self.module)\n mod = self.binding.parse_assembly(llvm_ir)\n mod.verify()\n\n self.engine.add_module(mod)\n self.engine.finalize_object()\n self.engine.run_static_constructors()\n return mod",
"def source_to_code(self, data, path):\n\t\treturn _call_with_frames_removed(compile, data, path, 'exec', dont_inherit=True)",
"def get_code(self, fullname):\n source_path = self.get_filename(fullname)\n source_bytes = self.get_data(source_path)\n return compile(source_bytes, source_path, 'exec',\n dont_inherit=True)",
"def code_ast(source):\n source = textwrap.dedent(source)\n root = ast.parse(source)\n return ast.dump(root)",
"def visit_Python(self, node):\n # This compiles the given Python ast into a Python code object\n # then disassembles it into a byteplay code object. This allows\n # us to interleave the instructions with those generated for\n # the rest of the module and then compile a single unified \n # code object.\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bpc = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.code_ops.extend(bpc.code[1:-2])",
"def _compile(self, tocompile, parameters):\n compiler = self.dialect.statement_compiler(self.dialect, tocompile, parameters)\n compiler.compile()\n return compiler",
"def compile(cls, node, filename):\n compiler = cls(filename)\n compiler.visit(node)\n code_ops = compiler.code_ops\n code = Code(\n code_ops, [], ['identifiers', 'toolkit'], False, False, True, \n node.name, filename, node.lineno, node.doc,\n )\n return code",
"def compileModule(self, code):\n r = ast.Module(None, self.compileSuite(code))\n #print r\n return r",
"def compile_IR(ir):\n triple = re.search(\n r'target\\s+triple\\s*=\\s*\"(?P<triple>[-\\d\\w\\W_]+)\"\\s*$',\n ir, re.M).group('triple')\n\n # Create execution engine\n target = llvm.Target.from_triple(triple)\n target_machine = target.create_target_machine()\n backing_mod = llvm.parse_assembly(\"\")\n engine = llvm.create_mcjit_compiler(backing_mod, target_machine)\n\n # Create LLVM module and compile\n mod = llvm.parse_assembly(ir)\n mod.verify()\n engine.add_module(mod)\n engine.finalize_object()\n engine.run_static_constructors()\n\n return engine",
"def compile_asm(self, src, dst):\n cmd = [self.get_command(), \"-S\", src, \"-o\", dst] + self.__compiler_flags + self.__compiler_flags_extra + self.__definitions + self.__include_directories\n (so, se) = run_command(cmd)\n if 0 < len(se) and is_verbose():\n print(se)",
"def compile(expression):",
"def compileInstruction(self, ins):\n pass",
"def compile_ir(engine, llvm_ir):\n # Create a LLVM module object from the IR\n mod = llvm.parse_assembly(llvm_ir)\n mod.verify()\n # Now add the module and make sure it is ready for execution\n engine.add_module(mod)\n engine.finalize_object()\n return mod",
"def asts_to_code(asts, flags=0,tab_char=\"\\t\"):\n\tif flags& RAW_JUMPS:\n\t\tmax_offset_len = len(str(asts[-1][2]))\n\t\treturn \"\\n\".join(str(offset).ljust(max_offset_len,\" \") + tab_char * (indent + 1) + str(ast) for indent, ast, offset in asts)\n\telse:\n\t\treturn \"\\n\".join(tab_char * indent + str(ast) for indent, ast in asts)",
"def compile(self, code, options=''):\n try:\n data = self.client.cli.compile_contract(body=dict(\n code=code,\n options=options\n ))\n return data.bytecode\n except OpenAPIClientException as e:\n raise ContractError(e)",
"def dump_compiler(input_bytes):\n return dump_from_release(input_bytes, \"compiler\")",
"def compile(self, s):\n f = io.BytesIO()\n for t in s.split():\n t_up = t.upper()\n if t_up in self.opcode_to_int:\n f.write(int2byte(self.opcode_to_int[t]))\n elif (\"OP_%s\" % t_up) in self.opcode_to_int:\n f.write(int2byte(self.opcode_to_int[\"OP_%s\" % t]))\n elif t_up.startswith(\"0X\"):\n d = binascii.unhexlify(t[2:])\n f.write(d)\n else:\n v = self.compile_expression(t)\n self.write_push_data([v], f)\n return f.getvalue()",
"def compile_to_object_code(self, module):\n target_machine = self.target.create_target_machine(codemodel='small')\n\n # Convert LLVM IR into in-memory representation\n llvmmod = llvm.parse_assembly(str(module))\n return target_machine.emit_object(llvmmod)",
"def compile(self):\n\n for v in list(self.graph.values()):\n if isinstance(v, Graph):\n v.compile()\n self._compiled = compile_graph(self.graph)\n return self._compiled",
"def compile(expression: str) -> Compiled:\r\n e = Compiled(expression)\r\n e.tokenize()\r\n return e",
"def compile(path: str) -> bytes:\n if not path.endswith('.py'):\n raise InvalidPathException(path)\n\n return Compiler().compile(path)"
] | [
"0.69451076",
"0.6880073",
"0.6878707",
"0.68005747",
"0.67566556",
"0.67168176",
"0.6529854",
"0.64560026",
"0.6446436",
"0.6423977",
"0.6411034",
"0.63917476",
"0.6340888",
"0.6338778",
"0.6334012",
"0.63297427",
"0.6328317",
"0.63082427",
"0.6300762",
"0.6297357",
"0.62907624",
"0.62629384",
"0.6259067",
"0.62431204",
"0.61546725",
"0.6143328",
"0.61307657",
"0.6088242",
"0.60684353",
"0.6063108"
] | 0.8348027 | 0 |
Reset the current epoch's progress every new epoch | def on_epoch_begin(self, epoch, logs={}):
self.current_progress = 0
self.loss = 0
self.accuracy = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_epoch(self):\n self.ix = 0",
"def set_epoch(self, epoch):\r\n pass",
"def on_train_begin(self):\n self.epoch_tqdm = self.tqdm(total=self.trainer.total_epochs,\n unit='epoch',\n leave=True,\n position=0,\n ascii=self.ascii)\n self.epoch_bar = self.epoch_tqdm.__enter__()\n self.last_step = 0",
"def changeEpochs(self,epochs):\n self.epochs = epochs",
"def set_epoch(self, epoch):\n self.epoch = epoch",
"def on_epoch_start(self):\n self.current_epoch += 1\n self.current_lr = self.fn(self.current_epoch)\n self.model.set_learning_rate(self.current_lr)\n self.epochs += [self.current_epoch]\n self.learning_rate += [self.current_lr]",
"def cur_epoch(self, epoch: int):\n # allow setter for training resumption\n self._cur_epoch = epoch",
"def run_epoch( self ):\n # --- Init Epoch ----\n total_epoch_loss = 0.0\n epoch_batches = self.dataset.dataloader( self.config.neuron.epoch_length )\n progress_bar = qqdm(enumerate(epoch_batches), total=len(epoch_batches), desc=format_str('blue', f'Epoch Progress'))\n for iteration, (inputs) in progress_bar:\n\n # ---- Forward / Backward ----\n prev_mechanism_weights = self.mechanism_weights.tolist()\n output = self.train ( batch = { 'inputs': inputs } )\n next_mechanism_weights = self.mechanism_weights.tolist()\n total_epoch_loss += output.local_target_loss.item()\n\n # ---- Logs ----\n self.epoch_logs (\n progress_bar,\n iteration = iteration,\n output = output,\n prev_mechanism_weights = prev_mechanism_weights,\n next_mechanism_weights = next_mechanism_weights\n )\n self.global_step += 1\n\n self.epoch_loss = total_epoch_loss / self.config.neuron.epoch_length\n self.epoch += 1",
"def on_epoch_end(self):\n self.epoch_bar.update()",
"def update_epoch(self):\n raise NotImplementedError",
"def reset_train(self):\n\n self.model.apply(self._reset_weights)\n self.epoch_loss.reset()\n self.epoch = 0\n del self.batch_process\n self.batch_process = None",
"def epoch_start(self, epoch):\n self.epoch = epoch",
"def next_epoch(self, state):\n return self.reset(state)",
"def _epoch_before_hook(self):\n self._train_steps_this_epoch = 0",
"def new_epoch(self):\n self._curr_batch = 0\n if self.shuffle_order:\n self.shuffle()",
"def on_train_end(self, logs=None):\n self.epoch_iter = 0",
"def reset_epoch_cache(self):\n self.epoch_cache = {\"train\":PerformanceBatch(), \n \"val\":PerformanceBatch(), \n \"test\":PerformanceBatch()}",
"def reset(self, batch_size=None, is_new_epoch=False):\n if is_new_epoch:\n self.epoch += 1\n\n self.batch_sampler.reset(batch_size, epoch=self.epoch)",
"def _set_current_step(self, epoch: int):\n self._cur_step = epoch * self._steps_per_epoch",
"def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0",
"def begin_epoch(self, epoch, model):\n super().begin_epoch(epoch, model)\n if hasattr(self.criterion, \"set_epoch\"):\n self.criterion.set_epoch(epoch)",
"def train_epoch(self):\r\n for loader in self.loaders:\r\n if self.epoch % loader.epoch_interval == 0:\r\n self.cycle_dataset(loader)\r\n\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n print('{}th epoch train / eval done!'.format(self.epoch))",
"def set_train_epoch(self, epoch: int):\n self._train_epoch = epoch",
"def set_train_epoch(self, epoch: int):\n if hasattr(self, 'cls_head'):\n self.cls_head.set_train_epoch(epoch)",
"def on_epoch_start(self):\n self.current_epoch += 1\n self.current_w = self.fn(self.current_epoch)\n self.model.set_kl_weight(self.current_w)\n self.epochs += [self.current_epoch]\n self.kl_weights += [self.current_w]",
"def reset(self):\n self.tracker.reset()\n self.total_max_q = 0.0\n self.episode_step = 0\n self.episode += 1",
"def reset(self):\n self.tracker.reset()\n self.episode += 1\n self.episode_step = 0",
"def __new_epoch(self):\n self.epoch += 1\n indices = np.arange(self.data.shape[0])\n np.random.shuffle(indices)\n self.q = list(indices)",
"def before_training_epoch(self, epoch, **kw):\n self.current_row = {MetricName(\"epoch\"): epoch}",
"def on_epoch_begin(self, epoch, logs=None):"
] | [
"0.7894786",
"0.68834776",
"0.68710005",
"0.68561596",
"0.67527735",
"0.67445993",
"0.67407787",
"0.67324847",
"0.6667862",
"0.6618142",
"0.6615541",
"0.659098",
"0.65909296",
"0.6580884",
"0.65741605",
"0.6461013",
"0.6439226",
"0.641213",
"0.64069456",
"0.6395008",
"0.6384092",
"0.63657165",
"0.63607705",
"0.63516325",
"0.6328321",
"0.63162965",
"0.63081527",
"0.629692",
"0.6281084",
"0.62711036"
] | 0.7316069 | 1 |
Given a data point, append new node to linked list. | def append(self, data):
new_node = Node(data)
current_node = self.head
while current_node.next!=None:
current_node = current_node.next
current_node.next = new_node #when we are at the last node, set it's pointer to point at the new Node
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def append(self, data):\n if not self.head:\n self.head = DListNode(data=data)\n return\n curr = self.head\n while curr.next:\n curr = curr.next\n curr.next = DListNode(data=data, prev=curr)",
"def append(self, data):\n if self.head is None:\n self.head = ListNode(data, None)\n else:\n itr = self.head\n while itr:\n if itr.next is None:\n itr.next = ListNode(data, None)\n return\n itr = itr.next",
"def append_node(self, new_data):\n\n #create a new node and put new data.\n new_node = Node(new_data)\n\n if self.head is None:\n self.head = new_node\n return\n\n end = self.head\n while end.next:\n end = end.next\n\n end.next = new_node",
"def append(self, data):\n new_node = Node(data)\n\n if not self.head:\n self.head = new_node\n return\n\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n\n last_node.next = new_node",
"def append(self, data):\n if self.head is None: # checking a corner case of linked list being empty\n self.head = ListNode(data)\n else: # a normal traversal and append to the end of the tail node\n temp_node = self.head\n new_node = ListNode(data)\n while temp_node.next is not None:\n temp_node = temp_node.next\n temp_node.next = new_node",
"def append(self, data):\n if not self.head :\n self.head = SinglyLLNode(data = data, next = None)\n return\n current = self.head\n while current.next:\n current = current.next\n current.next = SinglyLLNode(data = data ,next = None)\n return current.next",
"def append(self, data):\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n return\n\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n last_node.next = new_node",
"def append(self, data):\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n return\n\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n last_node.next = new_node",
"def append(self, data):\n node = Node(data)\n if not self.head:\n self.head = node\n return\n temp = self.head\n while temp.next:\n temp = temp.next\n temp.next = node",
"def append(self, data):\n new_node = SingleNode(data)\n\n if self.head is None:\n self.head = new_node\n return\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n last_node.next = new_node",
"def append(self, data):\n if not self.head :\n new_element = DoublyLLNode(data = data)\n self.head = new_element\n return \n current = self.head\n while current.next is not None:\n current = current.next\n new_node = DoublyLLNode(data = data)\n current.next = new_node\n new_node.previous = current",
"def appendPoint(self, point):\n self.points.append(point)",
"def insert_append(self, data):\n node = Node(data)\n if self.is_empty():\n self.head = node\n else:\n cur = self.head\n while cur.next != None:\n cur = cur.next\n cur.next = node\n node.prev = cur",
"def append(self, point):\n self.points.append(point)\n return self",
"def add_after(self, data, new_node):\n if not self.head:\n raise Exception(\"List is empty\")\n for node in self:\n if node.data == data:\n new_node.next = node.next\n node.next = new_node\n return\n raise Exception(\"Node with data '{}' not found\".format(data))",
"def append_point(self, point):\n self._points.append(point)",
"def add_node(self, data):\n new_node = Node(data)\n if self.cur_node is not None:\n new_node.next, self.cur_node.next = self.cur_node.next, new_node\n self.cur_node = new_node\n self.length += 1\n self.cur_pos += 1\n if self.start_node is None:\n self.start_node = self.cur_node\n # print(\"Node({}) added to {}\".format(new_node.data, self.cur_pos-1))",
"def append(self, data):\n\n node = Node(data)\n\n if self.head == None:\n\n self.head = node\n\n else:\n\n traverse = self.head\n\n while traverse.next != None:\n traverse = traverse.next\n\n traverse.next = node",
"def add(self, point):\n self.points.append(point)",
"def append(self, data):\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.size += 1",
"def addPoint(self, point):\n self.points.append(point)",
"def append(self, data):\n\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.size += 1",
"def append(self, point):\n point = Point(point)\n self._elements.append(point)",
"def append(self, point):\n point = Point(point)\n self._elements.append(point)",
"def append(self, point):\n point = Point(point)\n if offset_attr not in point:\n point[offset_attr] = self.next_offset()\n self._elements.append(point)",
"def push(self, new_data):\n new_node = Node(new_data)\n new_node.next = self.head\n self.head = new_node",
"def insert_end(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n while temp.next is not None:\n temp = temp.next\n temp.next = ListNode(data)",
"def addNode(self, new_data):\r\n curr = self.head\r\n\r\n # Add new Node\r\n if curr is None:\r\n n = Node(new_data) \r\n self.head = n\r\n return\r\n \r\n # Sort Nodes \r\n if curr.data > new_data:\r\n n = Node(new_data) \r\n n.next = curr\r\n self.head = n\r\n return\r\n\r\n while curr.next is not None:\r\n if curr.next.data > new_data:\r\n break\r\n curr = curr.next\r\n n = Node(new_data) \r\n n.next = curr.next\r\n curr.next = n\r\n return",
"def push(self, new_data):\n new_node = Node(new_data)\n new_node.next = self.head\n self.head = new_node",
"def add_before(self, data, new_node):\n if not self.head:\n raise Exception(\"List is empty\")\n if self.head.data == data:\n return self.add_first(new_node)\n prev_node = self.head\n for node in self:\n if node.data == data:\n prev_node.next = new_node\n new_node.next = node\n return\n prev_node = node\n raise Exception(\"Node with data '{}' not found\".format(data))"
] | [
"0.76220006",
"0.747702",
"0.7363767",
"0.7332331",
"0.72958094",
"0.7197168",
"0.7193461",
"0.7193461",
"0.7162837",
"0.714711",
"0.71415627",
"0.70632493",
"0.70528865",
"0.7026272",
"0.6981679",
"0.6967536",
"0.69511336",
"0.6921676",
"0.68736476",
"0.6815803",
"0.68100655",
"0.67945164",
"0.6777118",
"0.6777118",
"0.674973",
"0.67278016",
"0.67138445",
"0.67066526",
"0.665371",
"0.6635432"
] | 0.75516033 | 1 |
Return the index at the given data point in the linked list | def get_index(self, data):
current_node = self.head
current_index = 0
while current_node.next != None:
current_node = current_node.next
if current_node.data == data:
return current_index
current_index += 1
print("data doesn't exist")
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index(self, data):\n\n traverse = self.head\n index = 0\n while traverse.next != None:\n\n if traverse.data == data:\n return index\n traverse = traverse.next\n index += 1\n\n if traverse.data == data:\n return index",
"def find(self, data):\n index = 0\n current = self.head\n while current:\n if current.data == data:\n return index\n index += 1\n current = current.next\n\n return -1",
"def index(self, item):\n \"\"\"\n :type item: Node\n :rtype int\n \"\"\"\n curr = self.head\n idx = 0\n while curr:\n if item == curr.getData():\n break\n idx += 1\n curr = curr.getNext()\n return idx",
"def index(self, item):\n \n pos = 0\n current = self.head\n \n while current is not None:\n if current.get_data() == item:\n return pos\n else:\n current = current.get_next()\n pos += 1\n \n raise ValueError('{} is not in list'.format(item))",
"def index(self, elem):\n pointer = self.head\n i = 0\n while (pointer):\n if pointer.data == elem:\n return i\n pointer = pointer.next\n i += 1\n raise ValueError(\"{} is not in list\".format(elem))",
"def __find_node_index(self, index):\n cur_index = 0\n cur_node = self.head\n prev_node = None\n while cur_node is not None:\n if index >= len(cur_node.data_list) + cur_index:\n cur_index += len(cur_node.data_list)\n prev_node = cur_node\n cur_node = cur_node.next_node\n else:\n index -= cur_index\n break\n return index, prev_node, cur_node",
"def index(self, item):\n \n pos = 0\n current = self.head\n \n while current is not None:\n if current.get_data() == item:\n return pos\n # Early stop\n elif current.get_data() > item:\n raise ValueError('{} is not in list'.format(item))\n else:\n current = current.get_next()\n pos += 1\n \n raise ValueError('{} is not in list'.format(item))",
"def index(self, item: T) -> int:\n current = self.head\n index = 0\n while current is not None and current.item != item:\n current = current.link\n index += 1\n if current is None:\n raise ValueError(\"Item is not in list\")\n else:\n return index",
"def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1",
"def look_up(self, val):\n index = 0\n if self.head is None:\n print(\"List is empty\")\n start = self.head\n while start is not None:\n if start.data == val:\n return index\n start = start.next\n index += 1\n return \"No such element\"",
"def element_at(ll, position):\n curr = ll.head\n i = 1\n while curr != None:\n if i == position:\n break\n i += 1\n curr = curr.link\n\n if curr == None:\n return \"Index out of range\"\n else:\n return curr.data",
"def index(self, data):\n\n traverse = self.head\n count = 0\n while traverse.next != None:\n\n if traverse.data == data:\n return count\n traverse = traverse.next\n count += 1\n\n if traverse.data == data:\n return count",
"def get(self, index: int) -> int:\n curr = self.head\n count = 0\n if self.head is None:\n return -1\n if index == 0:\n return self.head.data\n while curr:\n if count == index:\n return curr.data\n count += 1\n curr = curr.next\n return -1",
"def index(self, item: Any) -> int:\n index_so_far = 0\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return index_so_far\n index_so_far += 1\n curr = curr.next\n raise ValueError",
"def get_list_index(self):\r\n return self.n",
"def index(self, item):\n\t\ti = 0\t\t\n\t\tif not self.len:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\treturn i\n\t\tactual = self.prim\n\t\twhile actual and actual.dato != item:\n\t\t\tactual = actual.prox\n\t\t\ti += 1\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\treturn i",
"def item_at_index(self, index):\n if index < 0 or index >= self.size:\n return -1\n\n if self.head is None:\n return -1\n\n curr = self.head\n for i in range(index):\n curr = curr.next\n return curr.val",
"def get_idx(self, key):\n found = [i for i, e in enumerate(self.list) if e.key == key]\n if found:\n return found[0]\n\n else:\n return -1",
"def _insertion_index(points, point):\n distance = sys.float_info.max\n index = None\n begin = points[-1]\n for i, p in enumerate(points):\n temp = _distance_to_line(begin, p, point)\n if temp < distance:\n distance = temp\n index = i\n begin = p\n return index",
"def get(self, index: int) -> int:\n if index < 0 or index >= self.size: return -1\n \n # choose search from head or tail\n if index + 1 < self.size - index:\n ptr = self.head\n for _ in range(index + 1):\n ptr = ptr.next\n else: # from tail\n ptr = self.tail\n for _ in range(self.size - index):\n ptr = ptr.prev\n return ptr.val",
"def get_data_index(self, data, data_point):\n\n if self.method == 1:\n idx = np.where((data[0,:]==data_point[0]) & \\\n (data[1,:]==data_point[1]) & \\\n (data[2,:]==data_point[2]) & \\\n (data[3,:]==data_point[3]) & \\\n (data[4,:]==data_point[4]))\n else:\n idx = np.where((data[0,:]==data_point[0]) & \\\n (data[1,:]==data_point[1]) & \\\n (data[2,:]==data_point[2]) & \\\n (data[3,:]==data_point[3]))\n\n idx = idx[0][0]\n\n return idx",
"def get(self, index: int) -> int:\n cnt = 0\n cur = self.head \n while cur != None:\n if(cnt == index):\n return cur.val\n cur = cur.next \n cnt += 1\n return -1",
"def get_p_idx(self, node_idx):\n idx = (node_idx + 1) / 2 - 1\n return idx",
"def get(self, index):\n cur = self.head\n while cur and index>0:\n cur = cur.next\n index -= 1\n if cur:\n return cur.val\n else:\n return -1",
"def get(self, index: int) -> int:\n if index < 0 or self.size <= index:\n return -1\n curr = self.head\n for _ in range(index + 1):\n curr = curr.next\n return curr.value",
"def get(self, index):\n if index < 0:\n return -1\n # print('index:',index)\n p = self.head\n while index and p:\n p = p.next\n index -= 1\n # print('after,index:',index)\n if index:\n return -1\n if p and p.next:\n return p.next.val\n return -1\n # self.printList()",
"def read_index(self, index):\n current = self.head\n if index == 0:\n return current.data\n elif index >= self.size() :\n return None\n else:\n position = 0\n while position < index:\n current = current.next_node\n position += 1\n return current.data",
"def find(self, number):\n cur_node = self.head\n while cur_node is not None:\n if number == cur_node.data.number():\n return cur_node.data\n cur_node = cur_node.next\n return -1",
"def get(self, index):\n if index < 0 or index >= self.length:\n return -1\n curr = self.head\n for i in range(1, index + 1):\n curr = curr.next\n return curr.val",
"def index(liste, value):\n\n for ii in range(len(liste)):\n if liste[ii] == value:\n return ii\n return None"
] | [
"0.7930216",
"0.7905522",
"0.7863779",
"0.78147644",
"0.77501416",
"0.76755315",
"0.7358543",
"0.71234745",
"0.706917",
"0.69959533",
"0.6918794",
"0.6905238",
"0.67796844",
"0.67350304",
"0.6718491",
"0.6688518",
"0.6673004",
"0.66494036",
"0.65912384",
"0.65364593",
"0.6528047",
"0.65264505",
"0.6525396",
"0.6503596",
"0.6492185",
"0.64894074",
"0.64641327",
"0.64634705",
"0.6450754",
"0.64268947"
] | 0.7952277 | 0 |
Given the index, will erase the node in the linked list | def erase(self, index):
if index >= self.length():
print("ERROR")
return None
current_index = 0
current_node = self.head
while True:
last_node = current_node
current_node = current_node.next
if current_index == index:
last_node.next = current_node.next
return
current_index += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def erase(self, index):\n node = self._get_node_at(index) \n if node is None:\n raise IndexError('List index out of range.') \n if node == self.head: \n if node.next_node is None:\n self.tail = None \n else: \n node.next_node.prev_node = None \n self.head = node.next_node\n elif node == self.tail: \n node.prev_node.next_node = None \n self.tail = node.prev_node\n else: \n node.prev_node.next_node = node.next_node\n node.next_node.prev_node = node.prev_node\n return node.value",
"def delete(self, index):\n # check validity of index:\n if index < 0 or index > self.n:\n print(\"Index Error; please input valid index\")\n return\n # if head element is to be removed,\n if index == 0:\n _ = self.pop_front()\n return\n # else,\n temp_node = self.head\n for _ in range(index-1):\n temp_node = temp_node.next # traverse the list\n index_node = temp_node.next\n # unlink\n temp_node.next = temp_node.next.next\n index_node = None\n self.n -= 1",
"def delete_node_at_index(self, index):\n if index < 0 or index >= self.size:\n return\n\n curr = self.head\n if index == 0:\n self.head = curr.next\n else:\n for i in range(index - 1):\n curr = curr.next\n curr.next = curr.next.next\n\n self.size -= 1",
"def deleteAtIndex(self, index: int) -> None:\n node = self.get_node(index)\n\n if node:\n #print(\"called inside node to delete is \" + str(node) )\n prev_node = node.prev\n next_node = node.next\n\n if prev_node:\n prev_node.next = next_node\n else:\n self.head = next_node\n if next_node:\n next_node.prev = prev_node\n\n\n\n\n self.node_count -= 1",
"def delete_by_index(self, index):\n if index < 0 or index >= self.get_size():\n raise IndexError('Index out of bounds')\n if index == 0:\n self.head = self.head.next\n return\n i = 0\n temp = self.head\n while temp is not None:\n if i == index-1:\n temp.next = temp.next.next\n break\n temp = temp.next\n i += 1",
"def deleteAtIndex(self, index: int) -> None:\n if(index == 0):\n self.head = self.head.next\n else:\n prev = None \n cur = self.head \n cnt = 0 \n \n while cur != None:\n if(cnt == index):\n next_node = cur.next\n prev.next = next_node \n return\n else:\n prev = cur \n cur = cur.next\n cnt += 1",
"def deleteAtIndex(self, index):\n cur = self.head\n if cur == None:\n return\n elif index == 0:\n self.head = cur.next\n\n cur, i = self.head, 1\n while cur and i != index:\n cur = cur.next\n i += 1\n if cur.next == None:\n cur = None\n else:\n cur.next = cur.next.next",
"def delete_by_index(self, index):\n cur = self.head\n length=self.get_length()\n if type(index) is int:\n if self.is_empty():\n return\n else:\n if index > length:\n # The index value is out of range and prompts and exits\n print(\"Index is out of range.\")\n return\n else:\n if index == 0:\n if cur.next == None:\n self.head = None\n else:\n cur.next.prev = None\n self.head = cur.next\n return\n else:\n while (index) > 0:\n cur = cur.next\n index -= 1\n\n # Point the next node of cur to the next node of cur\n cur.prev.next = cur.next\n # Point the prev of the next node of cur to the previous node of cur\n cur.next.prev = cur.prev\n length -= 1\n return\n else:\n print(\"Index value is not int.\")\n return",
"def delete(self, index):\n if index == 0 and self.head is not None:\n self.head = self.head.next\n return\n\n current_index = 0\n current = self.head\n previous = None\n\n while current:\n if current_index == index:\n previous.next = current.next\n\n previous = current\n current = current.next\n current_index += 1",
"def deleteAtIndex(self, index: int) -> None:\n if index < 0 or index >= self.size:\n return\n\n curr = self.head\n for _ in range(index):\n curr = curr.next\n curr.next = curr.next.next\n self.size -= 1",
"def removeNode(self, index):\n del self.nodes[index]",
"def deleteAtIndex(self, index):\n if index >= 0 and index < self.length:\n prev = None\n curr = self.head\n _next = None\n if curr:\n _next = curr.next\n for i in range(1, index + 1):\n prev = curr\n curr = curr.next\n if curr:\n _next = curr.next\n if prev:\n prev.next = _next\n else:\n self.head = _next\n self.length -= 1",
"def remove_a_specific_item(self, index):\n\n current = self.head\n previous = None\n for i in range(index):\n previous = current\n current = current.next\n if previous is None: self.head = current.next\n else: previous.next = current.next\n self.size -= 1",
"def deleteAtIndex(self, index: int) -> None:\n if self.head == None:\n return -1\n curr = self.head\n if index == 0:\n self.head = curr.next\n return\n if index < 0:\n return -1\n for i in range(index - 1):\n curr = curr.next\n if curr is None:\n break\n if curr is None:\n return -1\n if curr.next is None:\n return -1\n \n next = curr.next.next\n curr.next = None\n curr.next = next",
"def deleteAtIndex(self, index):\n if index < 0 or index >= self.size:\n return\n\n curr = self.head\n if index == 0:\n self.head = curr.next\n else:\n for i in range(index - 1):\n curr = curr.next\n curr.next = curr.next.next\n\n self.size -= 1",
"def deleteAtIndex(self, index):\n cur = self.head\n prev = None\n# self.display(\"deleteAtIndex, deleting value at index \"+str(index))\n if not index:\n head = head.nxt\n if self.tail == cur:\n self.tail = None\n del cur\n return\n \n i = 0\n while i < index and cur:\n prev = cur\n cur = cur.nxt\n i+=1\n if prev:\n if cur:\n prev.nxt = cur.nxt\n if self.tail == cur:\n self.tail = prev\n del cur",
"def deleteAtIndex(self, index):\n\n if index < 0:\n return -1\n\n p = self.head\n while index and p: # 0-index before index-th\n p = p.next\n index -= 1\n\n if p == None or p.next == None:\n return\n if p.next.next:\n p.next.next.prev = p\n p.next = p.next.next\n if p.next == None:\n self.tail = p\n # self.printList()",
"def erase(self, index):\n if self.empty():\n return \"Linked List is empty\"\n size = self.size()\n if index > size - 1:\n return \"Size of the Linked List is less than the index\"\n\n idx = 0\n h = self.head\n previous = self.head\n while h.next is not None:\n if idx is index:\n if previous is h:\n data = h.data\n self.head = h.next\n return data\n else:\n data = h.data\n previous.next = h.next\n h = None\n return data\n idx += 1\n previous = h\n h = h.next\n\n # Pop the last element\n data = previous.data\n previous.next = None\n return data",
"def deleteAtIndex(self, index):\n if index >= self.len:\n return\n p = self.head\n while index > 0:\n index -= 1\n p = p.next\n if p.next is self.tail:\n self.tail = p\n p.next = p.next.next\n self.len -= 1",
"def deleteAtIndex(self, index: int) -> None:\n # if the index is invalid, do nothing\n if index < 0 or index >= self.size:\n return\n \n # find predecessor and successor of the node to be deleted\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next.next\n else:\n succ = self.tail\n for _ in range(self.size - index - 1):\n succ = succ.prev\n pred = succ.prev.prev\n \n # delete pred.next \n self.size -= 1\n pred.next = succ\n succ.prev = pred",
"def deleteAtIndex(self, index: int) -> None:\n if index < 0 or index > self.cnt-1:\n return \n tmp = self.dummy\n for _ in range(index):\n tmp = tmp.next\n if index == self.cnt - 1:\n tmp.next = None\n else:\n tmp.next = tmp.next.next\n if tmp.next:\n tmp.next.pre = tmp\n self.cnt -= 1",
"def remove_index(self, index):\n current = self.head\n position = index\n if index > (self.size() - 1):\n return None\n elif index == 0:\n self.head = current.next_node\n else: \n while position >= 1:\n previous = current\n current = current.next_node\n position -= 1 \n previous.next_node = current.next_node\n\n return current",
"def deleteAtIndex(self, index):\n if index < 0 or index >= self._size:\n return\n elif index == 0:\n self.deleteHead()\n return\n elif index == self._size - 1:\n self.deleteTail()\n return\n\n current = self._head\n for _ in range(index - 1):\n current = current.next\n current.next = current.next.next\n self._size -= 1",
"def delete_at_index(self, index: int) -> T:\n try:\n previous_node = self.__get_node_at_index(index-1)\n except ValueError as e:\n if self.is_empty(): \n raise ValueError(\"List is empty\")\n elif index == 0:\n item = self.head.items\n self.head = self.head.link\n else:\n raise e\n else:\n item = previous_node.link.items\n previous_node.link = previous_node.link.link\n self.length -= 1\n return item",
"def remove(self,index=0):\n if index>self.size-1: raise IndexError(\"Index out of range.\")\n elif self.size==1: self.reference=None\n else:\n pointer = self.reference\n for i in range(index): pointer = pointer.next\n pointer.previous.next, pointer.next.previous = pointer.next, pointer.previous\n if index==0: self.reference=self.reference.next\n self.size-=1",
"def __delitem__(self, index):\n # If input is a slice then delete all elements as determined\n # by the slice attributes, using an offset to account for the\n # changing size of the list.\n if isinstance(index, slice):\n offset = 0\n for i in xrange(*index.indices(len(self))):\n if i > -(len(self) + 1) or i < len(self):\n del self[i - offset]\n offset += 1\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n del cur_node.data_list[index]\n self.length -= 1\n\n self.__balance_node(prev_node, cur_node)",
"def remove(self, index=0):\n # Error case: Index out of acceptable range\n if index < 0 or index >= self._size:\n raise RangeError(\"index out of range.\")\n\n # Edge case: Remove from front of list\n # Behave list pop_front()\n if (index == 0):\n return self.pop_front()\n\n # Edge case: Remove from end of list\n # Behave list pop_back()\n if (index == self._size - 1):\n return self.pop_back()\n\n i = 1\n current_node = self._head.next\n\n while(i < index):\n current_node = current_node.next\n i += 1\n\n current_node.prev.next = current_node.next\n current_node.next.prev = current_node.prev\n self._size -= 1\n\n return current_node.value",
"def remove(self, index):\n raise NotImplementedError()",
"def delete_at_index(self, index: int) -> T:\n pass",
"def delete_at_index(self, index: int) -> T:\n pass"
] | [
"0.86580855",
"0.8498837",
"0.8386962",
"0.836057",
"0.82192457",
"0.82032645",
"0.81427",
"0.8103172",
"0.8099474",
"0.80821234",
"0.8024515",
"0.79906446",
"0.79616684",
"0.79434097",
"0.79299194",
"0.79077476",
"0.7847517",
"0.78380835",
"0.78185445",
"0.7800675",
"0.7760799",
"0.7724598",
"0.7587059",
"0.75216526",
"0.73111254",
"0.7253881",
"0.7065625",
"0.6796604",
"0.67727524",
"0.67727524"
] | 0.8852697 | 0 |
Given the data point, will delete the node in the linked list | def delete(self, data):
current_node = self.head
current_index = 0
index = self.get_index(data)
while current_node.next != None:
last_node = current_node
current_node = current_node.next
if current_index == index:
last_node.next = current_node.next
return
current_index += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, data):\n if self.head.data == data:\n self.head = self.head.next\n return\n prev, curr = self.lookup(data)\n if curr is None:\n raise AttributeError('Data node not found.')\n prev.next = curr.next\n curr = None\n return",
"def delete_by_data(self, data):\n if self.is_empty():\n return\n else:\n cur = self.head\n if cur.data == data:\n # If the element of the first node is the element to be deleted\n if cur.next == None:\n self.head = None\n else:\n cur.next.prev = None\n self.head = cur.next\n return\n while cur != None:\n if cur.data == data:\n # Point the next node of cur to the next node of cur\n cur.prev.next = cur.next\n # Point the prev of the next node of cur to the previous node of cur\n cur.next.prev = cur.prev\n break\n cur = cur.next",
"def delete(self, data):\n\n current = self.head\n previous = None\n found = False\n while current and found is False:\n if current.data == data:\n found = True\n else:\n previous = current\n current = current.next\n if current is None:\n raise ValueError(\"Data not in list\")\n if previous is None:\n self.head = current.next\n else:\n previous.next = current.next\n self.size -= 1",
"def deleteActual(self, x_data):\n\n if self.is_empty():\n return print(\"List is empty. Nothing to show.\")\n\n headval = self.head\n\n while headval is not None:\n if headval.data == x_data:\n break\n\n headval = headval.next\n\n if headval is None:\n return print(\"can't find item. sorry.\")\n\n if headval.next is not None:\n headval.next.prev = headval.prev\n headval.prev.next = headval.next\n else:\n headval.prev.next = headval.next # will point to default null",
"def remove(self, d):\n\n if self.head is not None:\n if self.head.data == d:\n self.head = self.head.next\n else:\n temp = self.head\n while temp.next is not None:\n if temp.next.data == d:\n temp.next = temp.next.next\n break\n else:\n temp = temp.next",
"def remove_node(self, data):\n if not self.head:\n raise Exception(\"List is empty\")\n if self.head.data == data:\n self.head = self.head.next\n return\n previous_node = self.head\n for node in self:\n if node.data == data:\n previous_node.next = node.next\n return\n previous_node = node\n raise Exception(\"Node with data '{}' not found\".format(data))",
"def delete_node(self, node):\n curr = self.head\n while curr.next is not None:\n if curr.next == node:\n break\n curr = curr.next\n curr.next = node.next\n node = None\n return",
"def delete_point(self, point, index):\n root_hash = None\n\n if get_parent_hash(point) == 'p':\n if len(self._points[index]) == 1:\n del self._points[index]\n return\n\n root_hash = 'p'\n else:\n root_hash = get_parent_hash(point)\n\n p_hash = hash_graphics_point(point)\n\n children = [x for x in self._points[index] if get_parent_hash(x) == p_hash]\n\n if len(children) > 0:\n new_p = children.pop(0)\n new_p.setData(ItemDataTypes.PARENT_HASH, root_hash)\n p_hash = hash_graphics_point(new_p)\n\n for child in children:\n child.setData(ItemDataTypes.PARENT_HASH, p_hash)\n\n self._points[index].remove(point)\n self.set_changed()",
"def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next",
"def delete(self, node):\n # TODO: Catch errors if empty or node not in list\n self.length -= 1 # Update length\n # If head and tail, both get set to None\n if self.head is self.tail:\n self.head = None\n self.tail = None\n elif node is self.head: # If head, set current head to next\n self.head = self.head.next\n node.delete()\n elif node is self.tail: # If tail, set current tail to prev\n self.tail = self.tail.prev\n node.delete()\n else: # If regular node, just delete\n node.delete()",
"def _delete(self, current_node):\n pass",
"def deleteNode(self, key):\n\n cur = self.head\n while cur:\n if cur.data == key and cur == self.head:\n if not cur.next:\n cur = None\n self.head = None\n return\n else:\n afterNode = cur.next\n cur.next = None\n afterNode.prev = None\n cur = None\n self.head = afterNode\n return\n elif cur.data == key:\n if cur.next:\n afterNode = cur.next\n prev = cur.prev\n prev.next = afterNode\n afterNode.prev = prev\n cur.next = None\n cur.prev = None\n cur = None\n return\n else:\n prev = cur.prev\n prev.next = None\n cur.prev = None\n cur = None\n return\n cur = cur.next",
"def _delete_node(self, node):\n predecessor = node._prev\n successor = node._next\n predecessor._next = successor\n successor._prev = predecessor\n self._size -= 1\n element = node._element # record deleted element\n node._prev = node._next = node._element = None # deprecate node\n return element # return deleted element",
"def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if self.head == None:\n return None\n\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next",
"def delete_node(self, key):\n if not self.head:\n print('List is empty. No item to delete')\n return\n if self.head.data == key:\n self.head = self.head.next\n return\n temp = self.head\n while temp.next:\n if temp.next.data == key:\n break\n temp = temp.next\n temp.next = temp.next.next",
"def _delete_node(self, node):\n predecessor = node._prev\n successor = node._next\n predecessor._next = successor\n successor._prev = predecessor\n self._size -= 1\n element = node._element # record deleted element\n node._prev = node._next = node._element = None # deprecate node\n return element # return deleted element",
"def delete(self,pos):\n pos.next = pos.next.next",
"def delete_node(self, key):\n cur_node = self.head\n if cur_node and cur_node.data == key:\n self.head = cur_node.next\n cur_node = None\n return\n\n prev = None\n while cur_node and cur_node.data != key:\n prev = cur_node\n cur_node = cur_node.next\n\n if cur_node is None:\n return\n\n prev.next = cur_node.next\n cur_node = None",
"def delete(self,del_node,start_node=None):\n if not self.head:\n raise IsEmpty(\n \"There are no nodes.\"\n )\n\n elif self.head == del_node:\n self.head = self.head.next\n return\n\n elif start_node == del_node or not start_node:\n start_node = self.head\n\n start = start_node\n next_node = start_node.next\n\n while True:\n if next_node == del_node:\n start.next = start.next.next\n return\n elif not next_node:\n raise NotFound(\"Can't find node\")\n start = next_node\n next_node = next_node.next",
"def delete_ll_node(node):\n node.val = node.next.val\n node.next = node.next.next",
"def _delete_point(self):\r\n if self._point_index >= 0 and self._point_index < len(self._structure.points):\r\n self.command_stack.do(model.structure.DeletePoint(self._structure, self._point_index))",
"def delete_node(self, node: 'GraphNode'):\n\n self.operator.delete_node(node)",
"def delete(self, ele):\n prev = current = self.head\n element_in_head = False\n if self.head:\n while True:\n\tif current.data == ele:\n\t if current == self.head:\n\t element_in_head = True\n\t else:\n\t prev.next = current.next\n\t break\n\tprev = current\n\tcurrent = current.next\n\tif current == self.head:\n\t break\n if element_in_head:\n\tif self.head.next == self.head:\n\t self.head = None\n\telse:\n\t prev.next = self.head.next\n\t self.head = self.head.next",
"def del_node(node, delnum):\n pass",
"def delete_node_at_index(self, index):\n if index < 0 or index >= self.size:\n return\n\n curr = self.head\n if index == 0:\n self.head = curr.next\n else:\n for i in range(index - 1):\n curr = curr.next\n curr.next = curr.next.next\n\n self.size -= 1",
"def remove(self, pointer=None, index=None, data=None):\n def rPointer(pointer):\n start = self.head\n if start==pointer:\n self.head = self.head.getLink()\n return start\n while start:\n if start.getLink()==None:\n return None\n if start.getLink()==pointer:\n temp=start.getLink()\n start.setLink(temp.getLink())\n return temp\n start = start.getLink()\n\n def rIndex(index):\n start = self.head\n count = 0\n if index == count:\n self.head = start.getLink()\n return start\n while count < index-1:\n start = start.getLink()\n if not start:\n return None\n count+=1\n else:\n temp=start.getLink()\n start.setLink(temp.getLink())\n return temp\n\n def rData(data):\n start = self.head\n if start.getMember()==data:\n self.head = start.getLink()\n return start\n while start:\n if start.getLink().getMember()==data:\n temp=start.getLink()\n start.setLink(temp.getLink())\n return temp\n start = start.getLink()\n return None\n \n if pointer and type(pointer)==Member:\n return rPointer(pointer)\n if index and type(index)==int:\n return rIndex(index)\n if data and type(data)==dict:\n return rData(data)\n return None",
"def _delete_node(self, node):\n\n if self.is_empty():\n raise Empty(\"List is empty!\")\n\n predecessor = node._prev\n successor = node._next\n\n predecessor._next = successor\n successor._prev = predecessor\n\n elem = node._element\n node._prev = node._next = node._element = None\n\n self._size -= 1\n\n return elem",
"def delete(self, value):\n current = self.head\n if current.value == value:\n self.head = current.next\n else:\n while current:\n if current.value == value:\n break\n prev = current\n current = current.next\n if current == None:\n return\n prev.next = current.next\n current = None",
"def delete(self, node):\n\n if self.size == 0: # if list is empty\n return None # nothing to delete\n\n removed_value = node.value # copy deleted node's value\n\n if self.size == 1: # if only one item in list\n self.head = self.tail = None\n self.size -= 1\n\n else: # more than one element in list\n if self.head is node: # node to delete is head\n self.head = node.next # reassign head to be element after head\n\n elif self.tail is node: # node to delete is tail\n self.tail = node.prev # reassign tail to be element before tail\n\n else: # node is neither head nor tail, putting it somewhere in the middle\n node.prev.next = node.next\n node.next.prev = node.prev\n\n node.next = node.prev = None\n self.size -= 1\n\n return removed_value",
"def delete(self, lstnode):\n if lstnode is self.sentinel:\n raise Exception('Cannot delete the sentinel')\n lstnode._delete()\n self.N -= 1"
] | [
"0.8042581",
"0.7689726",
"0.75389934",
"0.721128",
"0.7150525",
"0.71124315",
"0.7111399",
"0.71054244",
"0.7086867",
"0.70839566",
"0.7073029",
"0.7016989",
"0.6971378",
"0.6966138",
"0.694801",
"0.6915151",
"0.68923706",
"0.68759763",
"0.68745667",
"0.68702126",
"0.68108785",
"0.6799626",
"0.6799062",
"0.67941374",
"0.6772454",
"0.6740191",
"0.6732128",
"0.6703599",
"0.6695831",
"0.668688"
] | 0.79490376 | 1 |
NMC diffusivity as a function of stochiometry, in this case the diffusivity is taken to be a constant. The value is taken from Peyman MPM. References | def NMC_diffusivity_PeymanMPM(sto, T):
D_ref = 8 * 10 ** (-15)
E_D_s = 18550
arrhenius = np.exp(E_D_s / pybamm.constants.R * (1 / 298.15 - 1 / T))
return D_ref * arrhenius | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Nsat(self, m):\n result = (m - self.kappa * self.mMinHod)\n if result>0.:\n result /= self.m1\n result **= self.alpha\n result *= self.Ncen(m)\n else:\n result = 0.\n return result",
"def Nsat(self, m):\n result = (m - self.kappa * self.mCut)\n if result>0.:\n result /= self.m1\n result **= self.alpha\n result *= self.Ncen(m)\n else:\n result = 0.\n return result",
"def self_diffusion_coefficient(self, n, T):\n Tstar = T / self.epsilon_Kelvin\n omd = self._OmegaDiffusion(Tstar)\n numerator = 3 * (4 * pi * kB * T / self.mass)**(1/2)\n denominator = 16 * pi * n * self.sigma ** 2 * omd\n return numerator/denominator",
"def _nelec(self):\n pd = self.particle_distribution(self._gam * mec2)\n return pd.to(1/mec2_unit).value",
"def graphite_entropic_change_PeymanMPM(sto, c_s_max):\n\n du_dT = 10 ** (-3) * (\n 0.28\n - 1.56 * sto\n - 8.92 * sto ** (2)\n + 57.21 * sto ** (3)\n - 110.7 * sto ** (4)\n + 90.71 * sto ** (5)\n - 27.14 * sto ** (6)\n )\n\n return du_dT",
"def nmc(\n prob,\n weights=None\n):\n N = len(prob)\n if weights is None:\n weights = np.ones(N)\n\n rho = np.dot(weights, prob) / np.sum(weights)\n H_Z = - np.dot(rho, np.log(rho + 1e-50))\n H_ZbarX = (\n - np.sum(np.dot(weights, prob * np.log(prob + 1e-50)))\n / np.sum(weights)\n )\n\n if H_Z < 1e-15:\n return 0.0\n else:\n return (H_Z - H_ZbarX) / H_Z",
"def nmse(gt, pred):\n return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2",
"def nmse(gt, pred):\n return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2",
"def Ncen(self, m):\n result = np.log10(m) - np.log10(self.mMinHod)\n result /= self.sLogM\n result = 0.5 * (1. + special.erf(result))\n return result",
"def NMC_ocp_PeymanMPM(sto):\n\n u_eq = (\n 4.3452\n - 1.6518 * sto\n + 1.6225 * (sto**2)\n - 2.0843 * (sto**3)\n + 3.5146 * (sto**4)\n - 2.2166 * (sto**5)\n - 0.5623e-4 * np.exp(109.451 * sto - 100.006)\n )\n\n return u_eq",
"def Nsat(self, m):\n result = np.log10(m) - np.log10(2.*self.mMinHod)\n result /= self.sLogM\n result = 0.5 * (1. + special.erf(result))\n result *= (m/self.mSat)**self.aSat\n return result",
"def nashsutcliffe():\r\n average_obs = sum(obs_data)/len(obs_data)\r\n sum_sim_obs = 0\r\n sum_obs_obsave = 0\r\n for i in range(len(min(obs_data, hydrograph))-1):\r\n diff_sim_obs = (obs_data[i] - hydrograph[i])**2\r\n sum_sim_obs = sum_sim_obs + diff_sim_obs\r\n diff_obs_obsave = (obs_data[i] - average_obs)**2\r\n sum_obs_obsave = sum_obs_obsave + diff_obs_obsave\r\n mNSE = sum_sim_obs/sum_obs_obsave\r\n return mNSE",
"def graphite_diffusivity_PeymanMPM(sto, T):\n\n D_ref = 5.0 * 10 ** (-15)\n E_D_s = 42770\n arrhenius = np.exp(E_D_s / pybamm.constants.R * (1 / 298.15 - 1 / T))\n\n return D_ref * arrhenius",
"def modularity_gain(n, c, dnodecomm):\n\n totc = _tot[c]\n degc = k[n]\n return dnodecomm - (totc * degc) / m",
"def Ngal(self, m):\n return self.Ncen(m) + self.Nsat(m)",
"def get_M(self):\n return 1.0",
"def C_Na_eq():\n global C_Na, C_Mg, C_dNTP\n return C_Na + 120*sqrt(C_Mg - C_dNTP)",
"def Ncen(self, m):\n result = np.log(m/self.mCut)\n result /= np.sqrt(2.) * self.sigma\n result = 0.5 * (1. + special.erf(result))\n return result",
"def Ncen(self, m):\n result = np.log10(m) - self.log10mMin\n result /= self.sLog10m\n result = 0.5 * (1. + special.erf(result))\n result *= self.fInc(m)\n return result",
"def nC(self):\n return int(self.vnC.prod())",
"def Nsat(self, m):\n pass",
"def epsilon_delta(self):",
"def NS(s,o):\n s,o = filter_nan(s,o)\n return 1 - sum((s-o)**2)/sum((o-np.mean(o))**2)\n #return 1 - sum((np.log(s)-np.log(o))**2)/sum((np.log(o)-np.mean(np.log(o)))**2)",
"def nhat(self):\n cosE = np.cos(self.E())\n return 2.0*np.pi/self.PB.to('second')/(1-self.ecc()*cosE)",
"def chao1_var_no_singletons(n, observed):\n o = float(observed)\n return o*exp(-n/o)*(1-exp(-n/o))",
"def estimate_diffusion_coefficient(n_CFx: int) -> t.Diffusivity:\n return t.Diffusivity(10 ** (-4.5360 + -0.1088 * n_CFx), \"cm^2/s\")",
"def nrmse(self) -> float:\n return float(self.rmse() / (np.max(self.true) - np.min(self.true)))",
"def nN(self):\n return int(self.vnN.prod())",
"def discharge_coefficient(self) -> _VectorisedFloat:\n return 0.6",
"def distmod(self):\n val = 5. * np.log10(self.to(u.pc).value) - 5.\n return u.Quantity(val, u.mag)"
] | [
"0.6880065",
"0.6615266",
"0.6472032",
"0.6333169",
"0.632034",
"0.6175014",
"0.61480594",
"0.61480594",
"0.61378735",
"0.61342615",
"0.61112505",
"0.61079407",
"0.59707457",
"0.59698695",
"0.59507924",
"0.59083015",
"0.58945155",
"0.5889738",
"0.5874483",
"0.5851936",
"0.5810767",
"0.58051026",
"0.57677",
"0.5752851",
"0.5749212",
"0.57047343",
"0.5696675",
"0.56955343",
"0.5687419",
"0.5654406"
] | 0.70094794 | 0 |
Nickel Managanese Cobalt Oxide (NMC) Opencircuit Potential (OCP) as a function of the stochiometry. The fit is taken from Peyman MPM. References Peyman MPM manuscript (to be submitted) | def NMC_ocp_PeymanMPM(sto):
u_eq = (
4.3452
- 1.6518 * sto
+ 1.6225 * (sto**2)
- 2.0843 * (sto**3)
+ 3.5146 * (sto**4)
- 2.2166 * (sto**5)
- 0.5623e-4 * np.exp(109.451 * sto - 100.006)
)
return u_eq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def graphite_ocp_PeymanMPM(sto):\n\n u_eq = (\n 0.063\n + 0.8 * np.exp(-75 * (sto + 0.001))\n - 0.0120 * np.tanh((sto - 0.127) / 0.016)\n - 0.0118 * np.tanh((sto - 0.155) / 0.016)\n - 0.0035 * np.tanh((sto - 0.220) / 0.020)\n - 0.0095 * np.tanh((sto - 0.190) / 0.013)\n - 0.0145 * np.tanh((sto - 0.490) / 0.020)\n - 0.0800 * np.tanh((sto - 1.030) / 0.055)\n )\n\n return u_eq",
"def open_circ():\n\n set_mode(mode_cc) # set operation mode to CC\n time.sleep(.250)\n set_CC_current(cc_current=0) # set CC mode current to 0 amps\n time.sleep(.1)\n \n oc_vals = get_input_values() # read open circuits levels\n oc_data_point = data_point(oc_vals) # create data point for open circuit measurement\n voc = oc_data_point[3] # open circuit voltage measurement\n print('Open circuit voltage: ', voc)\n write_data_tofile(oc_data_point) # write data to file\n \n return voc",
"def Mol_SO(Nat, multip, charge, sym, SO_3rdrow_mols_val): # number of atoms, multiplicity, charge, array of atoms in molecule, value of SO_3rdrow_mols (from orca.inp file)\n\n Mol_SO = 0\n \n # Special Case - Acetleyne - S\n if Nat == 4 and multip == 2 and charge == 1:\n countH_temp =0\n countC_temp =0\n for tmp in range(len(sym)):\n if sym[tmp] == \"H\":\n countH_temp= countH_temp +1\n if sym[tmp] == \"C\":\n countC_temp = countC_temp +1\n if countH_temp == 2 and countC_temp == 2:\n Mol_SO = -0.07 #-0.07d0\n \n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"DETECTED A C2H2+ SYSTEM: Using SO parameters for acetylene cation\\n\")\n ther_chem.write(\"Ref: JCP 114, 9287, 2001\\n\\n\")\n # Special Case - Acetleyne - E\n \n # For diatomics with multip = 2\n if Nat == 2 and multip == 2 :\n sort_sym = sorted(sym, reverse=True)\n if SO_3rdrow_mols_val == \"true\": # for 3rd_row elements\n \n if charge == 0:\n if sort_sym[0] == 'O' and sort_sym[1] == 'Br': Mol_SO=-2.20\n \n # COMMMENT: paper has it for cation, but it looks like it is for neutral\n if sort_sym[0] == 'Se' and sort_sym[1] == 'H': Mol_SO=-4.21\n \n if charge == +1: ### RECHECK what the values of charge is!!!!!!!!!!!!!!!!!!!!!IMPORTANT\n if sort_sym[0] == 'K' and sort_sym[1] == 'Br': Mol_SO=-2.99\n if sort_sym[0] == 'H' and sort_sym[1] == 'As': Mol_SO=-3.54\n if sort_sym[0] == 'H' and sort_sym[1] == 'Br': Mol_SO=-6.26\n if sort_sym[0] == 'F' and sort_sym[1] == 'Br': Mol_SO=-6.10\n if sort_sym[0] == 'Na' and sort_sym[1] == 'Br': Mol_SO=-3.93\n if sort_sym[0] == 'Br' and sort_sym[1] == 'Br': Mol_SO=-6.55\n \n else: # for non 3rd row elements, first and second rows\n if charge == 0:\n if sort_sym[0] == 'H' and sort_sym[1] == 'C': Mol_SO=-0.07\n if sort_sym[0] == 'O' and sort_sym[1] == 'H': Mol_SO=-0.30\n if sort_sym[0] == 'O' and sort_sym[1] == 'N': Mol_SO=-0.27\n if sort_sym[0] == 'O' and sort_sym[1] == 'Cl': Mol_SO=-0.61\n if sort_sym[0] == 'S' and sort_sym[1] == 'H': Mol_SO=-1.01\n if sort_sym[0] == 'P' and sort_sym[1] == 'O': Mol_SO=-0.53\n if sort_sym[0] == 'Si' and sort_sym[1] == 'H': Mol_SO=-0.34\n \n if charge == -1:\n if sort_sym[0] == 'N' and sort_sym[1] == 'H': Mol_SO=-0.12\n if sort_sym[0] == 'P' and sort_sym[1] == 'H': Mol_SO=-0.45\n if sort_sym[0] == 'O' and sort_sym[1] == 'O': Mol_SO=-0.34\n if sort_sym[0] == 'S' and sort_sym[1] == 'S': Mol_SO=-1.12\n \n if charge == +1:\n if sort_sym[0] == 'H' and sort_sym[1] == 'F': Mol_SO=-0.62\n if sort_sym[0] == 'P' and sort_sym[1] == 'H': Mol_SO=-0.67\n if sort_sym[0] == 'H' and sort_sym[1] == 'Cl': Mol_SO=-1.60\n if sort_sym[0] == 'N' and sort_sym[1] == 'N': Mol_SO=-0.17\n if sort_sym[0] == 'O' and sort_sym[1] == 'O': Mol_SO=-0.43\n if sort_sym[0] == 'P' and sort_sym[1] == 'P': Mol_SO=-0.57\n if sort_sym[0] == 'S' and sort_sym[1] == 'S': Mol_SO=-1.25\n if sort_sym[0] == 'Cl' and sort_sym[1] == 'Cl': Mol_SO=-1.77\n if sort_sym[0] == 'F' and sort_sym[1] == 'Cl': Mol_SO=-1.60\n \n return(Mol_SO)",
"def KPMO(XVal,YVal_State_1,YVal_State_2,YVal_State_3,XVal_Mean_Trans_1,XVal_Mean_Trans_2,XVal_Sig_Trans_1,XVal_Sig_Trans_2,iOpt):\n#\t1. Computations:\n\tTiny=1E-20\n\tP_Trans_1 = fCPD(XVal,XVal_Mean_Trans_1, XVal_Sig_Trans_1) # Transition of kerogen from State #1 to State #2\n\tP_Trans_2 = fCPD(XVal,XVal_Mean_Trans_2, XVal_Sig_Trans_2) # Transition of kerogen from State #2 to State #3\n\tFunVal=0\n\tif(iOpt==0):\n\t\tP_State_1=(1-P_Trans_1)*(1-P_Trans_2)\n\t\tP_State_2=P_Trans_1*(1 - P_Trans_2)\n\t\tP_State_3=1-P_State_1-P_State_2\n\t\tFunVal=(YVal_State_1*P_State_1)+(YVal_State_2*P_State_2)+(YVal_State_3*P_State_3)\n\tif(iOpt==1):\n\t\tFunVal=YVal_State_1+P_Trans_1*YVal_State_2+P_Trans_2*YVal_State_3\n\tif(FunVal==0):\n\t\tFunVal=Tiny\n\treturn FunVal",
"def simulation_OFC(self,ncmE,ncmC,f,g,Cfun,h,dt,tf,x0,z0=None,dscale=10.0,\\\n xnames=\"num\",Ncol=1,FigSize=(20,10),FontSize=20,phis=None):\n \"\"\"\n \n \n 1) SIMULATION\n \n \n \"\"\"\n if len(sig(f).parameters) == 1:\n fun1 = f\n f = lambda x,p: fun1(x)\n if len(sig(g).parameters) == 1:\n fun2 = g\n g = lambda x,p: fun2(x)\n if len(sig(Cfun).parameters) == 1:\n fun3 = Cfun\n Cfun = lambda x,p: fun3(x)\n if len(sig(h).parameters) == 1:\n fun4 = h\n h = lambda x,p: fun4(x)\n print(\"========================================================\")\n print(\"====================== SIMULATIOM ======================\")\n print(\"========================================================\")\n if dt <= self.dt_rk:\n self.dt_rk = dt\n self.Nrk = int(dt/self.dt_rk)\n Nsim = int(tf/dt)\n np.set_printoptions(precision=1)\n print(\"time step =\",dt)\n print(\"terminal time =\",tf)\n print(\"initial state =\",x0)\n print(\"estimated initial state =\",z0)\n funx = lambda x,p,dEf: f(x,p)+dEf(x,p)\n z = z0\n zhis = np.zeros((Nsim+1,self.n))\n zhis[0,:] = z\n x = x0\n xhis = np.zeros((Nsim+1,self.n))\n xhis[0,:] = x\n tit1 = \"Performance of NCM-based Output Feedback (1)\"\n tit2 = \"Performance of NCM-based Output Feedback (2)\"\n tit3 = \"Performance of NCM-based Output Feedback (3)\"\n tit4 = \"Performance of NCM-based Output Feedback (4)\"\n ly = r\"estimation error: $\\|x-\\hat{x}\\|_2$\"\n l1 = r\"estimation error\"\n lyb = r\"tracking error: $\\|x-x_d\\|_2$\"\n l1b = r\"tracking error\"\n bNam1 = \"=================== ESTIMATION ERROR ===================\"\n bNam2 = \"============ ESTIMATION ERROR OF EACH STATE ============\"\n bNam3 = \"==================== Tracking ERROR ====================\"\n bNam4 = \"============= Tracking ERROR OF EACH STATE =============\"\n l2 = r\"optimal steady-state upper bound\"\n if phis == None:\n phis = np.linspace(self.plims[0,:],self.plims[1,:],Nsim)\n for k in range(Nsim):\n p = phis[k,:]\n Mc = ncmC.ncm(z,p)\n u = -g(z,p).T@Mc@z\n dEfC = lambda x,p: g(x,p)@u\n d1 = self.unifrand2(ncmC.d1_over,np.size(ncmC.Bw(x,p),1))*dscale\n x = self.rk4(x,p,dEfC,funx)+ncmC.Bw(x,p)@d1*dt\n xhis[k+1,:] = x\n Me = ncmE.ncm(z,p)\n Cx = Cfun(z,p)\n Lx = [email protected]\n #Lx = K.T\n d2 = self.unifrand2(ncmE.d2_over,np.size(ncmE.Gw(x,p),1))*dscale\n y = h(x,u,p)+ncmE.Gw(x,p)@d2\n funz = lambda z,p,dEf: f(z,p)+g(z,p)@u+dEf(z,p)\n dEfE = lambda z,p: Lx@(y-h(z,u,p))\n z = self.rk4(z,p,dEfE,funz)\n zhis[k+1,:] = z\n this = np.linspace(0,tf,Nsim+1)\n \"\"\"\n \n \n 2) FIGURE GENERATION\n \n \n \"\"\"\n print(\"========================================================\")\n print(bNam1)\n print(\"========================================================\")\n matplotlib.rcParams.update({\"font.size\": 15})\n matplotlib.rc(\"text\",usetex=True)\n plt.figure()\n plt.plot(this,np.sqrt(np.sum((xhis-zhis)**2,1)))\n plt.plot(this,np.ones(np.size(this))*ncmE.Jcv_opt)\n plt.xlabel(r\"time\",fontsize=FontSize)\n plt.ylabel(ly,fontsize=FontSize)\n plt.legend([l1,l2],loc=\"best\")\n plt.title(tit1,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(bNam2)\n print(\"========================================================\")\n Nrow = int(self.n/Ncol)+np.remainder(self.n,Ncol)\n fig,ax = plt.subplots(Nrow,Ncol,figsize=FigSize)\n plt.subplots_adjust(wspace=0.25,hspace=0.25)\n if Ncol == 1:\n ax = np.reshape(ax,(self.n,1))\n elif Nrow == 1:\n ax = np.reshape(ax,(1,self.n))\n if xnames == \"num\":\n xnames = []\n for i in range(self.n):\n xnames += [r\"state \"+str(i+1)]\n for row in range(Nrow):\n for col in range(Ncol):\n i = Ncol*row+col\n if i+1 <= self.n:\n ax[row,col].plot(this,xhis[:,i]-zhis[:,i])\n ax[row,col].set_xlabel(r\"time\",fontsize=FontSize)\n LabelName = r\"estimation error: \"+xnames[i]\n ax[row,col].set_ylabel(LabelName,fontsize=FontSize)\n fig.suptitle(tit2,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(bNam3)\n print(\"========================================================\")\n matplotlib.rcParams.update({\"font.size\": 15})\n matplotlib.rc(\"text\",usetex=True)\n plt.figure()\n plt.plot(this,np.sqrt(np.sum((xhis)**2,1)))\n plt.plot(this,np.ones(np.size(this))*ncmC.Jcv_opt)\n plt.xlabel(r\"time\",fontsize=FontSize)\n plt.ylabel(lyb,fontsize=FontSize)\n plt.legend([l1b,l2],loc=\"best\")\n plt.title(tit3,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(bNam4)\n print(\"========================================================\")\n Nrow = int(self.n/Ncol)+np.remainder(self.n,Ncol)\n fig,ax = plt.subplots(Nrow,Ncol,figsize=FigSize)\n plt.subplots_adjust(wspace=0.25,hspace=0.25)\n if Ncol == 1:\n ax = np.reshape(ax,(self.n,1))\n elif Nrow == 1:\n ax = np.reshape(ax,(1,self.n))\n if xnames == \"num\":\n xnames = []\n for i in range(self.n):\n xnames += [r\"state \"+str(i+1)]\n for row in range(Nrow):\n for col in range(Ncol):\n i = Ncol*row+col\n if i+1 <= self.n:\n ax[row,col].plot(this,xhis[:,i])\n ax[row,col].set_xlabel(r\"time\",fontsize=FontSize)\n LabelName = r\"tracking error: \"+xnames[i]\n ax[row,col].set_ylabel(LabelName,fontsize=FontSize)\n fig.suptitle(tit4,fontsize=FontSize)\n plt.show()\n print(\"========================================================\")\n print(\"==================== SIMULATIOM END ====================\")\n print(\"========================================================\")\n return this,xhis,zhis",
"def scoreCirc_CmosVoltageReference_2(circuit, gen, indi, MOEAMODE):\n \n if debug > 2:\n print \"\\t\\tG_\" + str(gen) + \"_I_\" + str(indi)\n #----------#\n VREF = 1.5\n #----------#\n\n #---------------------------------------------------------BigMatrix stuff, check short-circuits, matrix density, matrix identifier (obsolete) \n FullBigCircuitMatrix = copy(circuit.fullRedundancyMatrix)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #--------------------------------------------------------- \n \n score = np.array([0,0,0], dtype=\"float64\") if MOEAMODE == 1 else 0\n \n score += 2e4*np.exp(OcSc)\n results = None\n if OcSc > 1:\n score += 1e4*np.exp(OcSc)\n else:\n #----------------------------------------------------------Try to make netlist and evaluate the individual\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateCmosVoltageRef(gen, indi)\n #----------------------------------------------------------Start of results analysis and objectives creation\n disfCount = 0\n \n #Vdd sweeps on 3 temperatures - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # -20 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t1 = np.array(results['vout_vdd_temp1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t1)):\n disfCount = disfCount + 1\n vdd_s_t1 = 0\n vdd_s_t1_d = 0\n else:\n x = np.median(vdd_sweep_t1)\n vdd_s_t1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t1_d = np.max(vdd_sweep_t1) - np.min(vdd_sweep_t1)\n \n \n # 25 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t2 = np.array(results['vout_vdd_temp2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t2)):\n disfCount = disfCount + 1\n vdd_s_t2 = 0\n vdd_s_t2_d = 0\n else:\n x = np.median(vdd_sweep_t2)\n vdd_s_t2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t2_d = np.max(vdd_sweep_t2) - np.min(vdd_sweep_t2) \n \n # 120 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t3 = np.array(results['vout_vdd_temp3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t3)):\n disfCount = disfCount + 1\n vdd_s_t3 = 0\n vdd_s_t3_d = 0\n else:\n x = np.median(vdd_sweep_t3)\n vdd_s_t3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t3_d = np.max(vdd_sweep_t3) - np.min(vdd_sweep_t3) \n \n #Vdd sweeps on 3 loads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # 10e6 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r1 = np.array(results['vout_vdd_res1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r1)):\n disfCount = disfCount + 1\n vdd_s_r1 = 0\n vdd_s_r1_d = 0\n else:\n x = np.median(vdd_sweep_r1)\n vdd_s_r1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r1_d = np.max(vdd_sweep_r1) - np.min(vdd_sweep_r1)\n \n # 10e4 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r2 = np.array(results['vout_vdd_res2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r2)):\n disfCount = disfCount + 1\n vdd_s_r2 = 0\n vdd_s_r2_d = 0\n else:\n x = np.median(vdd_sweep_r2)\n vdd_s_r2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r2_d = np.max(vdd_sweep_r2) - np.min(vdd_sweep_r2) \n \n # 10e2 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r3 = np.array(results['vout_vdd_res3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r3)):\n disfCount = disfCount + 1\n vdd_s_r3 = 0\n vdd_s_r3_d = 0\n else:\n x = np.median(vdd_sweep_r3)\n vdd_s_r3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r3_d = np.max(vdd_sweep_r3) - np.min(vdd_sweep_r3) \n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n psrr = results['psrr']['nominal']\n# if np.isnan(np.array(psrr, dtype=float)):\n# disfCount = disfCount + 1\n# psr = 0\n# else:\n# psr = 1.0/psrr #abs(90 - psrr) if psrr < 90 else 0 #tole kot objective ni ok. ker je opravljena meritev samo pri vdd=15 je to precej stala.\n\n\n #----------------------------------------------------------Score function SINGLE-OBJECTIVE\n if MOEAMODE == 0:\n score =(vdd_s_t1 + 5*vdd_s_t1_d +\n\t 2*vdd_s_t2 + 2*vdd_s_t2_d +\n\t vdd_s_t3 + 5*vdd_s_t3_d +\n\t #vdd_s_r1 + 2*vdd_s_r1_d +\n\t #vdd_s_r2 + 2*vdd_s_r2_d + \n\t #vdd_s_r3 + 2*vdd_s_r3_d + \n\t (100*powe)\n )\n if disfCount > 0:\n\tscore = 0 + np.exp(disfCount) * 1e3\n\t\n #----------------------------------------------------------Score function MULTI-OBJECTIVE\t\n else: #MOEAMODE == 1:\n oMediana = vdd_s_t1 + vdd_s_t2 + vdd_s_t3\n oPsrr = vdd_s_t1_d + vdd_s_t2_d + vdd_s_t3_d\t#DC rejection\n #oPsrr = psr\n oP = powe\n\t\t\t\t\t #add constraints\n score = (np.array([oMediana, oPsrr, oP]) \t+ (oMediana if oMediana > 4 else 0) + \n\t\t\t\t\t\t#+ (oPsrr*1000 if oPsrr > 1.0/40 else 0) +\n\t\t\t\t\t\t+ (oPsrr if oPsrr > 3 else 0) +\n\t\t\t\t\t\t+ (oP if oP > 1e-1 else 0)\n )\n if disfCount > 0:\n\tscore = (np.array([0,0,0])+np.exp(disfCount) * 1e3) + random.randint(0, 200)\n\n #-------------------------------------------------------------------\n if debug > 2: \n print \"\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n \n \n # TRIGGER STOP SIGNAL if:\n if (vdd_s_t2 <= 0.001 and \n\tpsrr >= 80 and \n\tpowe <= 1e-5):\n globalVars.DONE = 1 # End evolution, feasible solution evolved.\n \n\n return score, results",
"def circuitSat(C):",
"def short_circ():\n \n set_mode(mode_cv)\n time.sleep(.250)\n set_CV_volts(0.1)\n time.sleep(.250)\n \n sc_vals = get_input_values()\n sc_data_point = data_point(sc_vals)\n jsc = sc_data_point[4]\n print('Short circuit current: ', jsc)\n write_data_tofile(sc_data_point)\n\n return jsc",
"def fixedZMPchi2(self, pars):\n\t\tif not self.hasZMP and self.nZero > 0:\n\t\t\traise RuntimeError(\"No zero mode parameters set\")\n\t\tif pars is not None:\n\t\t\tself.setShapeParameters(pars)\n\t\ta,b,c = self.getOwnTheoryABC()\n\t\tA = np.zeros((2*self.nFunc, 2*self.nFunc))\n\t\tB = np.zeros((2*self.nFunc))\n\t\tC = c\n\t\tfor i in range(2*self.nZero):\n\t\t\tC += b[i]*self.zeroModeParameters[i]\n\t\t\tfor j in range(2*self.nZero):\n\t\t\t\tC += self.zeroModeParameters[i]*self.zeroModeParameters[j]*a[i,j]\n\t\tfor i in range(2*self.nFunc):\n\t\t\tB[i] += b[2*self.nZero+i]\n\t\t\tfor j in range(2*self.nZero):\n\t\t\t\tB[i] += (a[2*self.nZero+i,j]+a[j,2*self.nZero+i])*self.zeroModeParameters[j]\n\t\t\tfor j in range(2*self.nFunc):\n\t\t\t\tA[i,j] += a[2*self.nZero + i, 2*self.nZero+j]\n\t\tif self.ownPinv:\n\t\t\tcouplings = -np.dot(B, utils.pinv(np.transpose(A) + A, numLim = self.numLim))\n\t\telse:\n\t\t\tcouplings = -np.dot(B, la.pinv(np.transpose(A) + A))\n\t\treturn np.dot(couplings, np.dot(A,couplings)) + np.dot(B,couplings) + C",
"def scoreCirc_CmosVoltageReference(circuit, gen, indi, makeRedundancyInMatrix): #TODO 6.9.2016 napisi cost function ki se sklada z evaluateCmosVoltageRef\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateCmosVoltageRef(gen, indi)\n disfCount = 0\n \n \n #Vdd sweeps on 3 temperatures - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # -20 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t1 = np.array(results['vout_vdd_temp1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t1)):\n disfCount = disfCount + 1\n vdd_s_t1 = 0\n vdd_s_t1_d = 0\n else:\n x = np.median(vdd_sweep_t1)\n vdd_s_t1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t1_d = np.max(vdd_sweep_t1) - np.min(vdd_sweep_t1)\n \n \n # 25 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t2 = np.array(results['vout_vdd_temp2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t2)):\n disfCount = disfCount + 1\n vdd_s_t2 = 0\n vdd_s_t2_d = 0\n else:\n x = np.median(vdd_sweep_t2)\n vdd_s_t2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t2_d = np.max(vdd_sweep_t2) - np.min(vdd_sweep_t2) \n \n # 120 deg\n vdd_sweep_scale = np.array(results['vout_vdd_scale']['nominal'], dtype=float)\n vdd_sweep_t3 = np.array(results['vout_vdd_temp3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_t3)):\n disfCount = disfCount + 1\n vdd_s_t3 = 0\n vdd_s_t3_d = 0\n else:\n x = np.median(vdd_sweep_t3)\n vdd_s_t3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_t3_d = np.max(vdd_sweep_t3) - np.min(vdd_sweep_t3) \n \n #Vdd sweeps on 3 loads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # 10e6 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r1 = np.array(results['vout_vdd_res1']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r1)):\n disfCount = disfCount + 1\n vdd_s_r1 = 0\n vdd_s_r1_d = 0\n else:\n x = np.median(vdd_sweep_r1)\n vdd_s_r1 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r1_d = np.max(vdd_sweep_r1) - np.min(vdd_sweep_r1)\n \n # 10e4 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r2 = np.array(results['vout_vdd_res2']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r2)):\n disfCount = disfCount + 1\n vdd_s_r2 = 0\n vdd_s_r2_d = 0\n else:\n x = np.median(vdd_sweep_r2)\n vdd_s_r2 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r2_d = np.max(vdd_sweep_r2) - np.min(vdd_sweep_r2) \n \n # 10e2 Ohm\n vdd_sweep_scale = np.array(results['vout_vdd_res_scale']['nominal'], dtype=float)\n vdd_sweep_r3 = np.array(results['vout_vdd_res3']['nominal'], dtype=float) #This line changes Nones to np.nans\n # if measurement is empty \n if np.any(np.isnan(vdd_sweep_r3)):\n disfCount = disfCount + 1\n vdd_s_r3 = 0\n vdd_s_r3_d = 0\n else:\n x = np.median(vdd_sweep_r3)\n vdd_s_r3 = abs(x - VREF) #if x > VREF else 0\n vdd_s_r3_d = np.max(vdd_sweep_r3) - np.min(vdd_sweep_r3) \n \n power = results['power']['nominal']\n if np.isnan(np.array(power, dtype=float)):\n disfCount = disfCount + 1\n powe = 0\n else:\n powe = power\n \n #---COST FUNCTION DEFINITION---#\n score = vdd_s_t1 + vdd_s_t1_d + \\\n\t vdd_s_t2 + vdd_s_t2_d + \\\n\t vdd_s_t3 + vdd_s_t3_d + \\\n\t vdd_s_r1 + vdd_s_r1_d + \\\n\t vdd_s_r2 + vdd_s_r2_d + \\\n\t vdd_s_r3 + vdd_s_r3_d + \\\n\t (100*powe)\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n if np.isnan(score):\n score = 2e4\n score = score + (IcNc+1) #add small punishment if not all nodes connected\n\n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n \n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename) #cleanup current subcircuit\n\n return score, matrixDensity, matrixQuaziID, results",
"def rb_nfw(m200,c,z):\n\n #Setting up cosmology\n rho0=1.4876862e+11;\n omegam=0.238000;\n msun=1.98892e+33;\n delta_vir=200.;\n G=6.6730003e-08;\n kmpsToCmps = 1.0*10.**(5.);\n Rvir=200.;\n kpc2cm=3.086*10.**(21);\n \n deltac = (delta_vir/3.)*( (c**3.)/( np.log(1.+c) - (c / (1.+c))));\n rho_crit =rho0*omegam*(1.+z)**3.;\n r200 =(m200/delta_vir / rho_crit / (4.*np.pi/3.) )**0.33333 * 1000. ;\n v200 = ((6.67e-8 * m200 * msun / (r200* 3.086*10.**(21.)) )**0.5)/1e5 ;\n \n r =np.linspace(1.,3.*r200,500); # kpc\n rs = r200 / c; \n ss=(((r/rs)*(1.+(r/rs))**2.)*1000.**3);\n rho = (rho_crit * deltac)/(ss); \n M_r = 4.*np.pi* integrate.cumtrapz((r**2)*rho, r,initial=0.)\n \n x = r/r200 ;\n tab=1./x*(np.log(1.+c*x)-c*x/(1.+c*x))/(np.log(1.+c)-c/(1.+c));\n vcirc = v200*(tab)**0.5 ;\n maxvcirc = np.max(vcirc) ;\n q=np.where((vcirc == np.max(vcirc)));\n maxvcircr = r[q];\n \n \n # Now compute V_Esc as per nfw.pro Binney & Tremaine equation 2.31\n Phi_new = r * 0.0;\n vesc = r * 0.0 ;\n for ir in range(2,len(r)-4):\n term1 = (np.trapz(rho[0:ir]*(r[0:ir]**2.),x=r[0:ir])/(r[ir]))* msun; \n term2 = np.trapz(rho[ir:len(r)]*r[ir:len(r)],x=r[ir:len(r)])*msun; \n Phi_new[ir] = -4. *np.pi*6.67e-8*(term1 + term2)/3.086e21 ;\n vesc[ir] = ((2. * np.abs(Phi_new[ir]))**0.5) / 1e5 ; # See Binney & Tremaine (2-22) \n \n\n # Chage Units to do velocity dispersion calculations\n rcm=r*kpc2cm;\n\n #M_r in gram\n M_r_gram=M_r*msun;\n\n Phi=G*integrate.cumtrapz((M_r_gram/rcm**(2)),rcm,initial=0);\n \n Phi=Phi*(1./((1e5)**2.));#%km^2/s^2\n Phi_out=np.max(Phi);\n\n k=0.41;\n a=0.29;\n\n sig = np.sqrt(a *(( Phi/Phi_out)**(k))*(Phi_out -Phi));\n \n nfw={}\n qqqt=np.where((vesc==0.))\n vesc[qqqt]=1e-99\n\n nfw[\"m200\"]=m200;\n nfw[\"c\"]=c;\n nfw[\"r200\"]=r200;\n nfw[\"v200\"]=v200;\n nfw[\"maxvcirc\"]=maxvcirc;\n nfw[\"maxvcircr\"]=maxvcircr;\n nfw[\"r\"]=r;\n nfw[\"rho\"]=rho;\n nfw[\"vcirc\"]=vcirc;\n nfw[\"M_r\"]=M_r;\n nfw[\"sig_v\"]=sig;\n nfw[\"vesc\"]=vesc;\n \n return nfw",
"def eui_modesign():\n\n # setup design space\n # N ds ws wc lc g\n GAP = Struct()\n GAP.gd_min = np.array([1, 1e-3, 1e-3, 1e-3, 1e-3, 1e-5])\n GAP.gd_max = np.array([1e3, 1e-1, 1e-1, 1e-1, 1e-1, 1e-2])\n\n\n # setup genetic algorithm parameters--------------------------------------\n nobj=2 # number of objectives\n ngen=100 # number of generations\n npop = 100 # population size\n \n problem = MyProblem()\n problem.n_var = len(GAP.gd_min)\n problem.n_obj = nobj\n problem.n_constr = 2\n problem.xl = GAP.gd_min\n problem.xu = GAP.gd_max\n problem.elementwise_evaluation = True\n\n algorithm = NSGA2(\n pop_size=npop,\n eliminate_duplicates=True\n )\n \n # conduct the optimization-------------------------------------------------\n res = minimize(problem, algorithm, (\"n_gen\", ngen), verbose=True)\n\n # save results-------------------------------------------------------------\n return res",
"def contract_pepo_obc(pepo, mps_top, mps_right):\n \n nr,nc = pepo.shape\n\n # turn bottom row => MPS vector\n # left and down must be \"a\" inputs\n mps0 = pepo[0,0][:,a,a,:]\n for i in range(1,nc):\n mps0 = einsum(\"uI,UIr->uUr\", mps0, pepo[0,i][:,:,a,:])\n\n mps0 = np.reshape(mps0, [mps0.shape[0]*mps0.shape[1], mps0.shape[2]])\n\n # contract with bottom tensor of mps_right\n mps0 = einsum(\"uI,UI->uU\", mps0, mps_right[0])\n mps0 = np.reshape(mps0, [mps0.shape[0]*mps0.shape[1]])\n\n # turn other rows into MPO mat, and contract into MPS vector\n # leftmost must be \"a\" input\n for i in range(1,nr):\n mpo = pepo[i,0][:,a,:,:]\n for j in range(1,nc):\n mpo = einsum(\"udI,UIDr->uUdDr\", mpo, pepo[i,j])\n mpo = np.reshape(mpo, [mpo.shape[0]*mpo.shape[1], mpo.shape[2]*mpo.shape[3], mpo.shape[4]])\n\n # contract with tensor of mps_right\n mpo = einsum(\"udI,UID->uUdD\", mpo, mps_right[i])\n mpo = np.reshape(mpo, [mpo.shape[0]*mpo.shape[1],mpo.shape[2]*mpo.shape[3]])\n\n # mul into mps\n mps0 = np.dot(mpo, mps0)\n\n # contract with mps_top boundary\n # This is a special state which ensures that only a single\n # outgoing z index is summed over\n scalar = np.dot(mps_top, mps0)\n\n return scalar",
"def Nsat(self, m):\n result = (m - self.kappa * self.mCut)\n if result>0.:\n result /= self.m1\n result **= self.alpha\n result *= self.Ncen(m)\n else:\n result = 0.\n return result",
"def getOmegaMVEst(Sn):\n\n sols = defaultdict(lambda: defaultdict(int))\n\n for a, b, c in tripletGenerator(Sn):\n if a is b or a is c or b is c: continue\n if isSolvableVect(a, b, c):\n d = solveVect(a, b, c)\n dtuple = tuple(d[:-1])\n dclass = d[-1]\n sols[dtuple][dclass] += 1\n\n nOK = nKO = 0\n for x in Sn:\n xtuple = tuple(x[:-1])\n xclass = x[-1]\n if xtuple not in sols: continue\n maj_class = max(sols[xtuple].keys(), key=lambda k: sols[xtuple][k])\n\n if maj_class == xclass:\n nOK += 1\n else:\n nKO += 1\n\n try:\n estW = nOK / (nOK + nKO)\n except ZeroDivisionError:\n estW = 0\n\n return estW",
"def OxSol(T,S):\n\n x = S\n y = math.log((298.15 - T)/(273.15 + T))\n\n \"\"\"umol/kg coefficients\n a0 = 5.80871\n a1 = 3.20291\n a2 = 4.17887\n a3 = 5.10006\n a4 = -9.86643e-2\n a5 = 3.80369\n b0 = -7.01577e-3\n b1 = -7.70028e-3\n b2 = -1.13864e-2\n b3 = -9.51519e-3\n c0 = -2.75915e-7\n\n \"\"\"\n\n \"\"\"ml/l coefficients\"\"\"\n a0 = 2.00907\n a1 = 3.22014\n a2 = 4.05010\n a3 = 4.94457\n a4 = -2.56847e-1\n a5 = 3.88767\n b0 = -6.24523e-3\n b1 = -7.37614e-3\n b2 = -1.03410e-2\n b3 = -8.17083e-3\n c0 = -4.88682e-7\n\n O2sol = math.exp(a0 + y*(a1 + y*(a2 + y*(a3 + y*(a4 + a5*y)))) + x*(b0 + y*(b1 + y*(b2 + b3*y)) + c0*x))\n return O2sol",
"def xsec_CEvNS(E_R, E_nu, N_p, N_n):\n \n A = N_p + N_n\n Z = N_p\n \n m_A = A*0.9315 #Mass of target nucleus (in GeV)\n q = np.sqrt(2.0*E_R*m_A) #Recoil momentum (in MeV)\n #Note: m_A in GeV, E_R in keV, E_nu in MeV\n \n #Calculate SM contribution\n Qv = (A-Z) - (1.0-4.0*SIN2THETAW)*Z #Coherence factor\n \n xsec_SM = (G_FERMI*G_FERMI/(4.0*np.pi))*Qv*Qv*m_A* \\\n (1.0-(q*q)/(4.0*E_nu*E_nu))\n \n #Calculate New-Physics correction from Z' coupling\n #Assume universal coupling to quarks (u and d)\n #QvNP = 3.0*A*gsq\n\n #Factor of 1e6 from (GeV/MeV)^2\n #G_V = 1 - 1e6*(SQRT2/G_FERMI)*(QvNP/Qv)*1.0/(q*q + m_med*m_med)\n \n #Convert from (GeV^-3) to (cm^2/keV)\n #and multiply by form factor\n return xsec_SM*1e-6*(1.98e-14)*(1.98e-14)*calcSIFormFactor(E_R, A)",
"def ncm(self,x,p):\n n = self.n\n n_p = self.n_p\n x = np.reshape(np.hstack((x,p)),(1,n+n_p))\n cholM = self.model.predict(x)\n cholM = np.reshape(cholM,int(n*(n+1)/2))\n M = self.cholM2M(cholM)\n return M",
"def Nsat(self, m):\n result = (m - self.kappa * self.mMinHod)\n if result>0.:\n result /= self.m1\n result **= self.alpha\n result *= self.Ncen(m)\n else:\n result = 0.\n return result",
"def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))",
"def showNonOpponency(C,theta):\n GI = retina.gauss_norm_img(x, y, dcoeff[i], dloc[i], imsize=imgsize,rgb=False)\n # Sample using the other recepetive field, note there is no temporal response with still images\n S = retina.sample(img,x,y,dcoeff[i],dloc[i],rgb=True)\n #backproject the imagevectors\n ncentreV,nsurrV = rgc.nonopponency(C,S,theta)\n ninverse = retina.inverse(ncentreV,x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True)\n ninv_crop = retina.crop(ninverse,x,y,dloc[i])\n ninverse2 = retina.inverse(nsurrV,x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True)\n ninv_crop2 = retina.crop(ninverse2,x,y,dloc[i])\n # place descriptive text onto generated images\n cv2.putText(ninv_crop,\"R+G + \",(xx,yy), font, 1,(255,255,255),2)\n cv2.putText(ninv_crop2,\"R+G - \",(xx,yy), font, 1,(255,255,255),2)\n merged = np.concatenate((ninv_crop, ninv_crop2),axis=1)\n \n # create cortical maps of the imagevectors\n lposnon, rposnon = cortex.cort_img(ncentreV, L, L_loc, R, R_loc, cort_size, G)\n lnegnon, rnegnon = cortex.cort_img(nsurrV, L, L_loc, R, R_loc, cort_size, G)\n pos_cort_img = np.concatenate((np.rot90(lposnon),np.rot90(rposnon,k=3)),axis=1)\n neg_cort_img = np.concatenate((np.rot90(lnegnon),np.rot90(rnegnon,k=3)),axis=1)\n mergecort = np.concatenate((pos_cort_img,neg_cort_img),axis=1)\n return merged, mergecort",
"def _iso_ic_on_planck(electron_energy, soft_photon_temperature, gamma_energy):\n Ktomec2 = 1.6863699549e-10\n soft_photon_temperature *= Ktomec2\n\n def G34(x, a):\n \"\"\"\n Eqs 20, 24, 25\n \"\"\"\n alpha, a, beta, b, c = a\n pi26 = np.pi ** 2 / 6.0\n tmp = (1 + c * x) / (1 + pi26 * c * x)\n G = pi26 * tmp * np.exp(-x)\n tmp = 1 + b * x ** beta\n g = 1. / (a * x ** alpha / tmp + 1.)\n return G * g\n\n gamma_energy = np.vstack(gamma_energy)\n # Parameters from Eqs 26, 27\n a3 = [0.606, 0.443, 1.481, 0.540, 0.319]\n a4 = [0.461, 0.726, 1.457, 0.382, 6.620]\n z = gamma_energy / electron_energy\n x = z / (1 - z) / (4. * electron_energy * soft_photon_temperature)\n # Eq. 14\n cross_section = z ** 2 / (2 * (1 - z)) * G34(x, a3) + G34(x, a4)\n tmp = (soft_photon_temperature / electron_energy) ** 2\n # r0 = (e**2 / m_e / c**2).to('cm')\n # (2 * r0 ** 2 * m_e ** 3 * c ** 4 / (pi * hbar ** 3)).cgs\n tmp *= 2.6318735743809104e+16\n cross_section = tmp * cross_section\n cc = ((gamma_energy < electron_energy) * (electron_energy > 1))\n return np.where(cc, cross_section,\n np.zeros_like(cross_section))",
"def cvstem0(self,xs,ps,alp):\n epsilon = self.epsilon\n Ncv = np.size(xs,0)\n n = self.n\n I = np.identity(n)\n Ws = []\n for k in range(Ncv):\n Ws.append(cp.Variable((n,n),PSD=True))\n nu = cp.Variable(nonneg=True)\n chi = cp.Variable(nonneg=True)\n errtxt = \"https://github.com/AstroHiro/ncm#troubleshooting\"\n if len(sig(self.Afun).parameters) == 1:\n fun1 = self.Afun\n self.Afun = lambda x,p: fun1(x)\n if (self.iEC == \"est\") and (len(sig(self.Cfun).parameters) == 1):\n fun2 = self.Cfun\n self.Cfun = lambda x,p: fun2(x)\n if self.iEC == \"est\":\n Af = self.Afun\n Cf = self.Cfun\n J = (self.d1_over*self.b_over*chi\\\n +self.d2_over*self.c_over*self.g_over*nu)/alp\n elif self.iEC == \"con\":\n Af = lambda x,p: self.Afun(x,p).T\n Cf = lambda x,p: self.h_or_g(x,p).T\n J = self.d1_over*self.b_over*chi/alp+self.d2_over*nu\n else:\n raise ValueError('Invalid iEC: iEC = \"est\" or \"con\"')\n constraints = []\n for k in range(Ncv):\n x = xs[k,:]\n p = ps[k,:]\n Ax = Af(x,p)\n Cx = Cf(x,p)\n W = Ws[k]\n constraints += [chi*I-W >> 0,W-I >> 0]\n constraints += [-2*alp*W-((W-I)/self.dt+W@Ax+Ax.T@W-2*nu*Cx.T@Cx)\\\n >> epsilon*I]\n prob = cp.Problem(cp.Minimize(J),constraints)\n prob.solve(solver=cp.MOSEK)\n cvx_status = prob.status\n if cvx_status in [\"infeasible\",\"infeasible_inaccurate\"]:\n raise ValueError(\"Problem infeasible: see \"+errtxt)\n elif cvx_status in [\"unbounded\",\"unbounded_inaccurate\"]:\n raise ValueError(\"Problem unbounded: \"+errtxt)\n Wsout = []\n for k in range(Ncv):\n Wk = Ws[k].value/nu.value\n Wsout.append(Wk)\n self.Ws = Wsout\n self.nu = nu.value\n self.chi = chi.value\n self.Jcv = prob.value\n self.cvx_status = cvx_status\n pass",
"def do_mcp_nonlinearity_calibration():\n no_sample_data_path = ''.join([DATA_PATH, 'run108allevts.h5'])\n f = h5py.File(no_sample_data_path)\n phot = _get_photon_energy(f, 108)\n mcp = np.array(f['Acqiris2']['acq'])\n andor = np.array(f['Andor']['signal']-f['Andor']['reference'])\n # Subtract dark signals:\n dark_calibration = _get_dark_calibration()\n mcp = mcp-dark_calibration['mcp']\n andor = andor-dark_calibration['andor']\n # Take data within (relatively) narrow photon energy range:\n phot_in_range = (phot > 781) & (phot < 782)\n mcp = mcp[phot_in_range]\n andor = andor[phot_in_range]\n # make sure to only take data for which andor doesn't saturate\n mcp_percentile_cutoff = min([percentileofscore(andor, 4000), 99.9])\n mcp_cutoff_value = scoreatpercentile(mcp, mcp_percentile_cutoff)\n mcp_in_range = mcp < mcp_cutoff_value\n mcp = mcp[mcp_in_range]\n andor = andor[mcp_in_range]\n correction_polynomial = np.polyfit(\n mcp, \n andor*(np.mean(mcp)/np.mean(andor)),\n deg=3)\n plt.figure()\n plt.scatter(mcp, andor)\n plt.scatter(np.polyval(correction_polynomial, mcp), andor)\n pickle_on = open(MCP_CALIBRATION_FILE, 'wb')\n pickle.dump(correction_polynomial, pickle_on)\n pickle_on.close()",
"def get_dasep_mpo(N, params, periodic=False):\n # Get the needed operators\n ops = get_ops(4)\n\n # Generate the Bulk MPO\n Dmpo = 10\n d = 4\n ten = np.zeros((Dmpo, Dmpo, d, d), dtype=complex)\n ten[0, 0] = ops['I']\n ten[1, 0] = ops['t01']\n ten[2, 0] = ops['n0']\n ten[3, 0] = ops['t23']\n ten[4, 0] = ops['n2']\n ten[5, 0] = ops['t01']\n ten[6, 0] = ops['n0']\n ten[7, 0] = ops['t23']\n ten[8, 0] = ops['n2']\n ten[9, 0] = params['wA'] * ops['t01'] + \\\n -params['wA'] * ops['n0'] + \\\n params['wD'] * ops['t10'] + \\\n -params['wD'] * ops['n1'] + \\\n params['kA'] * ops['t02'] + \\\n -params['kA'] * ops['n0'] + \\\n params['kD'] * ops['t20'] + \\\n -params['kD'] * ops['n2'] + \\\n params['wmA'] * ops['t23'] + \\\n -params['wmA'] * ops['n2'] + \\\n params['wmD'] * ops['t32'] + \\\n -params['wmD'] * ops['n3'] + \\\n params['kmA'] * ops['t13'] + \\\n -params['kmA'] * ops['n1'] + \\\n params['kmD'] * ops['t31'] + \\\n -params['kmD'] * ops['n3']\n\n ten[-1, 1] = params['v'] * ops['t10']\n ten[-1, 2] = - params['v'] * ops['n1']\n ten[-1, 3] = params['vmp'] * ops['t10']\n ten[-1, 4] = - params['vmp'] * ops['n1']\n ten[-1, 5] = params['vmm'] * ops['t32']\n ten[-1, 6] = - params['vmm'] * ops['n3']\n ten[-1, 7] = params['vm'] * ops['t32']\n ten[-1, 8] = - params['vm'] * ops['n3']\n ten[-1, 9] = ops['I']\n\n # Create a list to hold all MPO tensors\n mpo = [ten.copy() for _ in range(N)]\n mpo = [mpoi.transpose(0,2,3,1) for mpoi in mpo]\n print(mpo[0].shape)\n\n # Convert from a periodic MPO\n if periodic:\n raise ValueError('Periodic DMRG not implemented here')\n else:\n mpo[0] = mpo[0][-1,:]\n mpo[0] = np.array([mpo[0]])\n mpo[-1] = mpo[-1][:,:,:,0]\n mpo[-1] = np.array([mpo[-1]]).transpose(1,2,3,0)\n\n # Return result\n return [mpo]",
"def graphite_entropic_change_PeymanMPM(sto, c_s_max):\n\n du_dT = 10 ** (-3) * (\n 0.28\n - 1.56 * sto\n - 8.92 * sto ** (2)\n + 57.21 * sto ** (3)\n - 110.7 * sto ** (4)\n + 90.71 * sto ** (5)\n - 27.14 * sto ** (6)\n )\n\n return du_dT",
"def nonflatcosmo(self):\n return LambdaCDM(70, 0.4, 0.8)",
"def taucmm1_ncmm3(n):\n # Données issues de Kukharaskii : Solid States Communications 13, 1761 (1973) : Plasmon-phonon coupling in GaAs\n wp_cmm1 = np.array([156, 193, 202, 357, 692, 748, 802])\n tau_cmm1 = np.array([75, 84, 86, 88, 95, 102, 105])\n wp_n = convert_wpcmm1_dop(wp_cmm1)\n p = np.poly1d(np.polyfit(wp_n,tau_cmm1,3))\n return p(n)",
"def wol(noi, noft, times,\r\n M, NE, NI,\r\n Omee, Omei, Omie, Omii, F):\r\n FF=np.zeros((NE,1))\r\n for h in range(noi):\r\n ip=np.random.rand(M,1)\r\n ip/=np.linalg.norm(ip)\r\n cn=np.zeros((NE,1))\r\n for i in range(noft):\r\n c = np.zeros((M,1))\r\n VE, VI=np.zeros((NE, times)), np.zeros((NI, times))\r\n oe, oi=np.zeros((NE, times)), np.zeros((NI, times))\r\n re, ri=np.zeros((NE, times)), np.zeros((NI, times))\r\n for j in range(times):\r\n epev, epiv = sigv * np.random.randn(1,1), sigv * np.random.randn(1,1)\r\n epet, epit = sigt * np.random.randn(1,1), sigt * np.random.randn(1,1)\r\n c[:,0] = + 1 * ip[:,0]\r\n VE[:,j]=(1 - lam * dt) * VE[:,j-1] + dt* F[:,:] @ c[:,0] + Omee[:,:] @ oe[:,j-1] + Omie[:,:] @ oi[:,j-1] + epev[0,0]\r\n if VE[ne,j]>TE and RE[ne,0] < 0:\r\n oe[ne,j] = 1\r\n re[:,j]=(1 - lam * dt) * re[:,j-1]+oe[:,j-1]\r\n VI[:,j]=(1 - lam * dt) * VI[:,j-1] + Omei[:,:] @ oe[:,j-1] + Omii[:,:] @ oi[:,j-1] + epiv[0,0]\r\n ni=np.argmax(VI[:,j] - TI - epit[0,0])\r\n if VI[ni,j]>TI and RI[ni,0] < 0:\r\n oi[ni,j] = 1\r\n ri[:,j]=(1 - lam * dt) * ri[:,j-1]+oi[:,j-1]\r\n np.hstack((cn,np.sum(oe, axis=1, keepdims=True)))\r\n np.hstack((FF, np.var(cn[:,1:], axis=1)/np.mean(cn[:,1:], axis=1, keepdims=True)))\r\n return np.nanmean(FF[:,1:])",
"def test_linear_buckling_iso_CCSS(plot_static=False, plot_lb=False):\n # number of nodes\n nx = 5 # along x\n ny = 5 # along y\n\n # getting integration points\n nint = 4\n points, weights = get_points_weights(nint=nint)\n\n # geometry\n a = 3 # along x\n b = 3 # along y\n\n # material properties\n E = 200e9\n nu = 0.3\n laminaprop = (E, E, nu)\n stack = [0]\n h = 0.001\n lam = read_stack(stack=stack, plyt=h, laminaprop=laminaprop)\n\n # creating mesh\n x = np.linspace(0, a, nx)\n y = np.linspace(0, b, ny)\n xmesh, ymesh = np.meshgrid(x, y)\n\n # node coordinates and position in the global matrix\n ncoords = np.vstack((xmesh.T.flatten(), ymesh.T.flatten())).T\n nids = 1 + np.arange(ncoords.shape[0])\n nid_pos = dict(zip(nids, np.arange(len(nids))))\n\n # identifying nodal connectivity for plate elements\n # similar than Nastran's CQUAD4\n #\n # ^ y\n # |\n #\n # 4 ________ 3\n # | |\n # | | --> x\n # | |\n # |_______|\n # 1 2\n\n\n nids_mesh = nids.reshape(nx, ny)\n n1s = nids_mesh[:-1, :-1].flatten()\n n2s = nids_mesh[1:, :-1].flatten()\n n3s = nids_mesh[1:, 1:].flatten()\n n4s = nids_mesh[:-1, 1:].flatten()\n\n num_elements = len(n1s)\n print('num_elements', num_elements)\n\n N = DOF*nx*ny\n Kr = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kc = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kv = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n KGr = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGc = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGv = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n init_k_KC0 = 0\n init_k_KG = 0\n\n plates = []\n for n1, n2, n3, n4 in zip(n1s, n2s, n3s, n4s):\n plate = BFSPlate2D()\n plate.n1 = n1\n plate.n2 = n2\n plate.n3 = n3\n plate.n4 = n4\n plate.c1 = DOF*nid_pos[n1]\n plate.c2 = DOF*nid_pos[n2]\n plate.c3 = DOF*nid_pos[n3]\n plate.c4 = DOF*nid_pos[n4]\n plate.ABD = lam.ABD\n plate.lex = a/(nx - 1)\n plate.ley = b/(ny - 1)\n plate.init_k_KC0 = init_k_KC0\n plate.init_k_KG = init_k_KG\n update_KC0(plate, points, weights, Kr, Kc, Kv)\n init_k_KC0 += KC0_SPARSE_SIZE\n init_k_KG += KG_SPARSE_SIZE\n plates.append(plate)\n\n KC0 = coo_matrix((Kv, (Kr, Kc)), shape=(N, N)).tocsc()\n\n # applying boundary conditions\n\n # locating nodes\n bk = np.zeros(KC0.shape[0], dtype=bool) # constrained DOFs, can be used to prescribe displacements\n\n x = ncoords[:, 0]\n y = ncoords[:, 1]\n\n # applying boundary conditions\n # simply supported\n check = isclose(x, 0) | isclose(x, a) | isclose(y, 0) | isclose(y, b)\n bk[2::DOF] = check\n check = isclose(x, 0) | isclose(x, a)\n bk[3::DOF] = check\n # point supports\n check = isclose(x, a/2) & (isclose(y, 0) | isclose(y, b))\n bk[0::DOF] = check\n check = isclose(y, b/2) & (isclose(x, 0) | isclose(x, a))\n bk[1::DOF] = check\n\n # unconstrained nodes\n bu = ~bk # logical_not\n\n # defining external force vector\n fext = np.zeros(KC0.shape[0], dtype=float)\n\n # applying unitary load along u at x=a\n # nodes at vertices get 1/2 the force\n for plate in plates:\n pos1 = nid_pos[plate.n1]\n pos2 = nid_pos[plate.n2]\n pos3 = nid_pos[plate.n3]\n pos4 = nid_pos[plate.n4]\n if isclose(x[pos3], a):\n Nxx = -1\n xi = +1\n elif isclose(x[pos1], 0):\n Nxx = +1\n xi = -1\n else:\n continue\n lex = plate.lex\n ley = plate.ley\n indices = []\n c1 = DOF*pos1\n c2 = DOF*pos2\n c3 = DOF*pos3\n c4 = DOF*pos4\n cs = [c1, c2, c3, c4]\n for ci in cs:\n for i in range(DOF):\n indices.append(ci + i)\n fe = np.zeros(4*DOF, dtype=float)\n for j in range(nint):\n eta = points[j]\n plate.update_Nu(xi, eta)\n Nu = np.asarray(plate.Nu)\n fe += ley/2*weights[j]*Nu*Nxx\n fext[indices] += fe\n\n Kuu = KC0[bu, :][:, bu]\n fextu = fext[bu]\n\n # static solver\n uu = spsolve(Kuu, fextu)\n u = np.zeros(KC0.shape[0], dtype=float)\n u[bu] = uu\n\n if plot_static:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n uplot = u[0::DOF].reshape(nx, ny).T\n vplot = u[1::DOF].reshape(nx, ny).T\n print('u extremes', uplot.min(), uplot.max())\n print('v extremes', vplot.min(), vplot.max())\n levels = np.linspace(uplot.min(), uplot.max(), 300)\n plt.contourf(xmesh, ymesh, uplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n # eigenvalue solver\n\n # getting integration points\n for plate in plates:\n update_KG(u, plate, points, weights, KGr, KGc, KGv)\n KG = coo_matrix((KGv, (KGr, KGc)), shape=(N, N)).tocsc()\n KGuu = KG[bu, :][:, bu]\n\n # solving modified generalized eigenvalue problem\n # Original: (KC0 + lambda*KG)*v = 0\n # Modified: (-1/lambda)*KC0*v = KG*v #NOTE here we find (-1/lambda)\n num_eigenvalues = 5\n eigvals, eigvecsu = eigsh(A=KGuu, k=num_eigenvalues, which='SM', M=Kuu,\n tol=1e-6, sigma=1., mode='cayley')\n eigvals = -1./eigvals\n eigvecs = np.zeros((KC0.shape[0], num_eigenvalues), dtype=float)\n eigvecs[bu, :] = eigvecsu\n\n if plot_lb:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n mode = 0\n wplot = eigvecs[2::DOF, mode].reshape(nx, ny).T\n levels = np.linspace(wplot.min(), wplot.max(), 300)\n plt.contourf(xmesh, ymesh, wplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n kc = eigvals[0]/(E*np.pi**2*(h/b)**2/(12*(1 - nu**2))*h)\n assert isclose(kc, 6.6, rtol=0.05)"
] | [
"0.6118903",
"0.6068508",
"0.58394",
"0.5727134",
"0.56680244",
"0.55649835",
"0.5558325",
"0.5498825",
"0.5482988",
"0.54439884",
"0.5436478",
"0.5405408",
"0.5393144",
"0.53485477",
"0.5344366",
"0.534232",
"0.53415734",
"0.5340031",
"0.53214604",
"0.53160346",
"0.5296659",
"0.5294829",
"0.52771103",
"0.5275852",
"0.5274316",
"0.524606",
"0.52366006",
"0.523581",
"0.52336043",
"0.52202696"
] | 0.65505815 | 0 |
Sets the sat_company_account_id of this EditAccountingJournalItem. | def sat_company_account_id(self, sat_company_account_id):
if sat_company_account_id is None:
raise ValueError("Invalid value for `sat_company_account_id`, must not be `None`") # noqa: E501
self._sat_company_account_id = sat_company_account_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account_id(self, account_id):\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def account_id(self, account_id):\n\n self._account_id = account_id",
"def setAccount(self, account_id):\n self.data_struct['_setAccount'] = account_id",
"def company(self, company):\n self._company = company",
"def company(self, company):\n\n self._company = company",
"def company(self, company):\n\n self._company = company",
"def set_company_id_value(self, company_id_value):\n self.company_id_value = company_id_value",
"def business_account(self, business_account):\n\n self._business_account = business_account",
"def account_bank_id(self, account_bank_id):\n\n self._account_bank_id = account_bank_id",
"def onchange_company_id(self, cr, uid, ids, company_id, context=None):\n # update related fields\n values =super(account_config_settings,self).onchange_company_id(cr, uid, ids, company_id, context=context).get('value',{})\n \n if company_id:\n company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)\n values.update({\n 'interval_number': company.interval_number\n })\n \n return {'value': values}",
"def onchange_property_account_category_id(self):\n if self.property_account_category_id:\n self.property_account_receivable_id = self.property_account_category_id.customer_receivable_acc_id and self.property_account_category_id.customer_receivable_acc_id.id or False\n self.property_account_payable_id = self.property_account_category_id.vendor_payable_acc_id and self.property_account_category_id.vendor_payable_acc_id.id or False",
"def __init__(self, sat_company_account_id=None, cargo=None, abono=None, comments=None): # noqa: E501 # noqa: E501\n\n self._sat_company_account_id = None\n self._cargo = None\n self._abono = None\n self._comments = None\n self.discriminator = None\n\n self.sat_company_account_id = sat_company_account_id\n self.cargo = cargo\n self.abono = abono\n if comments is not None:\n self.comments = comments",
"def onchange_company_id(self, cr, uid, ids, company_id, context=None):\n # update related fields\n values = super(account_config_settings,self).onchange_company_id(cr, uid, ids, company_id, context=context).get('value',{})\n \n if company_id:\n company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)\n values.update({\n 'statement_equation': company.statement_equation,\n 'statement_condition': company.statement_condition,\n })\n \n return {'value': values}",
"def set_audit_account(self, audit_account):\n self.single_selection_from_kendo_dropdown(self.statement_entry_audit_account_locator, audit_account)\n self.wait_for_ajax_spinner_load()",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def onchange_property_account_category_id(self):\n if self.property_account_category_id:\n self.income_acc_id = self.property_account_category_id.income_acc_id and self.property_account_category_id.income_acc_id.id or False\n self.expense_acc_id = self.property_account_category_id.expenses_acc_id and self.property_account_category_id.expenses_acc_id.id or False\n self.discount_acc_id = self.property_account_category_id.discount_acc_id and self.property_account_category_id.discount_acc_id.id or False",
"def set_account(self, account: str):\n ret = self._call_txtrader_api('set_account', {'account': account})\n if ret:\n self.account = account\n return ret",
"def design_company(self, design_company):\n\n self._design_company = design_company",
"def account(self, account: str):\n self._account = account",
"def account_amount(self, account_amount):\n\n self._account_amount = account_amount",
"def account_id(self, account_id: str):\n if account_id is None:\n raise ValueError(\"Invalid value for `account_id`, must not be `None`\") # noqa: E501\n\n self._account_id = account_id",
"def set_company_id_label(self, company_id_label):\n self.company_id_label = company_id_label"
] | [
"0.6178081",
"0.6147414",
"0.6147414",
"0.6147414",
"0.6147414",
"0.6147414",
"0.6147414",
"0.6134564",
"0.5944026",
"0.5931777",
"0.5931777",
"0.57107115",
"0.5660232",
"0.5629784",
"0.5591194",
"0.5558076",
"0.54987",
"0.54695153",
"0.5431011",
"0.5368474",
"0.5368474",
"0.5368474",
"0.5368474",
"0.5343813",
"0.5290234",
"0.5260098",
"0.5250068",
"0.52036625",
"0.5187502",
"0.5164788"
] | 0.7790871 | 0 |
Sets the cargo of this EditAccountingJournalItem. | def cargo(self, cargo):
if cargo is None:
raise ValueError("Invalid value for `cargo`, must not be `None`") # noqa: E501
if cargo is not None and cargo < 0: # noqa: E501
raise ValueError("Invalid value for `cargo`, must be a value greater than or equal to `0`") # noqa: E501
self._cargo = cargo | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def total_cargo(self, total_cargo):\n\n self._total_cargo = total_cargo",
"def cargo_fuel(self, cargo_fuel):\n\n self._cargo_fuel = cargo_fuel",
"def cargo_gas(self, cargo_gas):\n\n self._cargo_gas = cargo_gas",
"def dry_cargo(self, dry_cargo):\n\n self._dry_cargo = dry_cargo",
"def set_cash(self, cash):\n portfolio = self.get_portfolio_object()\n if portfolio is not None:\n portfolio.cash += cash\n portfolio.initial_cash += cash",
"def cidade(self, cidade):\n self._cidade = cidade",
"def cargo_water(self, cargo_water):\n\n self._cargo_water = cargo_water",
"def SetEditable(self, edit):\r\n \r\n self._edit = edit\r\n return self",
"def citation(self, citation):\n\n self._citation = citation",
"def set_credit_amount(self, credit_amount):\n self.set_value_into_input_field(self.credit_amount_textbox_locator, credit_amount)",
"def cad(self, cad):\n\n self.logger.debug(\"In 'cad' setter.\")\n\n self._cad = cad",
"def find(self, cargo):\r\n return self._find(cargo).cargo",
"def inventory(self, inventory):\n\n self._inventory = inventory",
"def _setbeneficiary_customer_no_option_59(self, val):\n self.swift_obj.BeneficiaryCustomer = val\n self.swift_obj.BeneficiaryCustomer.swiftTag = '59'",
"def course(self, value: int):\n self._course = value",
"def serialno(self, serialno):\n\n self._serialno = serialno",
"def _set_cr(self, cr):\n self.__cr = bool(cr)",
"def setCompartment(self, *args):\n return _libsbml.CompartmentReference_setCompartment(self, *args)",
"def journal_iso_abbreviation(self, journal_iso_abbreviation):\n\n self._journal_iso_abbreviation = journal_iso_abbreviation",
"def ordinal(self, ordinal):\n\n self._ordinal = ordinal",
"def set_qty(self, qty):\n self.__qty = qty",
"def setCompartment(self, *args):\n return _libsbml.QualitativeSpecies_setCompartment(self, *args)",
"def notary_journal_id(self, notary_journal_id):\n\n self._notary_journal_id = notary_journal_id",
"def notary_journal_id(self, notary_journal_id):\n\n self._notary_journal_id = notary_journal_id",
"def setCompartment(self, *args):\n return _libsbml.Reaction_setCompartment(self, *args)",
"def cash(self, cash: float):\n if cash is None:\n raise ValueError(\"Invalid value for `cash`, must not be `None`\") # noqa: E501\n\n self._cash = cash",
"def set_serial(self, serial=0):\n self.serial = serial",
"def contact(self, contact):\n\n self.logger.debug(\"In 'contact' setter.\")\n\n self._contact = contact",
"def set_text(self, texto):\n self.entry.set_text(texto)",
"def sequencing_contact(self, sequencing_contact):\n self.logger.debug(\"In 'sequencing_contact' setter.\")\n\n self._sequencing_contact = sequencing_contact"
] | [
"0.5894072",
"0.51697636",
"0.515634",
"0.5031346",
"0.50103515",
"0.4902964",
"0.47004157",
"0.4674612",
"0.46719876",
"0.4623271",
"0.4567104",
"0.45393223",
"0.45121405",
"0.44361523",
"0.44219348",
"0.4409397",
"0.43817514",
"0.4378921",
"0.43769842",
"0.4371719",
"0.43457574",
"0.43315452",
"0.43267134",
"0.43267134",
"0.4326454",
"0.42880112",
"0.4277859",
"0.42766124",
"0.42741066",
"0.42714685"
] | 0.67557865 | 0 |
Sets the abono of this EditAccountingJournalItem. | def abono(self, abono):
if abono is None:
raise ValueError("Invalid value for `abono`, must not be `None`") # noqa: E501
if abono is not None and abono < 0: # noqa: E501
raise ValueError("Invalid value for `abono`, must be a value greater than or equal to `0`") # noqa: E501
self._abono = abono | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def journal_iso_abbreviation(self, journal_iso_abbreviation):\n\n self._journal_iso_abbreviation = journal_iso_abbreviation",
"def complete_attribut(self,nom,contenu):\n self.nom=nom\n self.contenu=contenu",
"def editable(self, editable):\n\n self._editable = editable",
"def set_activo(self):\n return self.write({'state': 'Activo'})",
"def anexos(self, anexos):\n self._anexos = anexos",
"def osa(self, osa):\n\n self.logger.debug(\"In 'osa' setter.\")\n\n self._osa = osa",
"def setField(self, data):\n\t\tview = self.view\n\t\tview.sbAbstraccion.setValue(data['sbAbstraccion'])",
"def nucl_acid_ext(self, nucl_acid_ext):\n self.logger.debug(\"In 'nucl_acid_ext' setter.\")\n\n self._nucl_acid_ext = nucl_acid_ext",
"def setAn(self, an):\n self.an = an",
"def SetEditable(self, edit):\r\n \r\n self._edit = edit\r\n return self",
"def agregar_al_atril(self):\n self.atril.append(self.bolsa.tomar_bolsa())",
"def setAccidental(self, accidental):\n acc = _int(accidental)\n if acc != None:\n self.accidental = acc",
"def affiliate_oid(self, affiliate_oid):\n\n self._affiliate_oid = affiliate_oid",
"def affiliate_oid(self, affiliate_oid):\n\n self._affiliate_oid = affiliate_oid",
"def set_mab(self, username):\n self._mab.append(username)",
"def _add_attributo(spell_list):\n\n attr_list = [\"MU\", \"KL\", \"IN\", \"CH\", \"FF\", \"GE\", \"KO\", \"KK\"]\n # remove original attributo entry\n attributo_orig = spell_list.pop(-1)\n\n # change original attributo entry and add it to skill list\n for _, value in enumerate(attr_list):\n attributo_temp = copy.deepcopy(attributo_orig)\n attributo_temp.name = value + \" Attributo\"\n attributo_temp.attrs[2] = value\n spell_list.append(attributo_temp)\n\n return spell_list",
"def apoapsis(self, apoapsis):\n\n self._apoapsis = apoapsis",
"def apoapsis(self, apoapsis):\n\n self._apoapsis = apoapsis",
"def set_baja(self):\n return self.write({'state': 'Baja'})",
"def entrer(self):\n valeur = getattr(self.objet, self.attribut, None)\n if valeur is None:\n setattr(self.objet, self.attribut, [])",
"def setA(self, a):\n\t\tself.a = int(a)",
"def _set_contract_number_partyA_21N(self, val):\n self.swift_obj.SequenceA_GeneralInformation.ContractNumberPartyA = val\n self.swift_obj.SequenceA_GeneralInformation.ContractNumberPartyA.swiftTag = '21N'",
"def aerobics(self, aerobics):\n\n self.logger.debug(\"In 'aerobics' setter.\")\n\n self._aerobics = aerobics",
"def cambiar_naranjo(self):\r\n self.naranjo.setDisabled(True)",
"def __set_accion(self, widget, modo, accion):\n self.__cancel_toolbars()\n if accion == \"Salir\":\n self.toolbar.switch(\"menu\")\n\n else:\n self.base_panel.set_accion(modo, accion)",
"def affiliate_ledger_oid(self, affiliate_ledger_oid):\n\n self._affiliate_ledger_oid = affiliate_ledger_oid",
"def obtenerObra(self):\n rowActual = self.tableOs.currentItem().row()\n self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))\n self.lineRazon.setEnabled(False)\n self.obraSocial=str(self.tableOs.item(rowActual,0).text())\n self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))\n self.lineCuit.setEnabled(False)\n self.tableOs.setEnabled(False)\n self.gbFactura.setEnabled(True)\n self.gbNotaCredito.setEnabled(True)",
"def set_nom(self, annonce):\n n= annonce.find_element_by_class_name('prdtBILTit')\n self.nom = n.text",
"def set_job_accounting(self, data):\n required = {'admin_token', 'accounting'}\n api.validate(data, required)\n admin_token = data['admin_token']\n accounting = data['accounting']\n self.credentials_module.authorize_admin(admin_token)\n data = self.batch_module.set_job_accounting(accounting)\n return data",
"def add_journal_abbrev(b):\n if 'journal' in b:\n if 'journal_abbrev' not in b:\n # Create one abbrev\n journal, abbrev = identify_some_journals(b)\n b['journal_abbrev'] = abbrev\n b['journal'] = journal"
] | [
"0.51553214",
"0.5155304",
"0.50390625",
"0.50350344",
"0.49702317",
"0.49430567",
"0.4928677",
"0.49141398",
"0.49060485",
"0.48198193",
"0.47946447",
"0.47436208",
"0.47399092",
"0.47399092",
"0.47218883",
"0.47188574",
"0.46752125",
"0.46752125",
"0.46725613",
"0.46255463",
"0.4621169",
"0.4613059",
"0.46129173",
"0.46055114",
"0.45951408",
"0.45901775",
"0.45789266",
"0.4573444",
"0.45609036",
"0.45525596"
] | 0.6751695 | 0 |
Sets the comments of this EditAccountingJournalItem. | def comments(self, comments):
self._comments = comments | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comments(self, comments):\n\n self.container['comments'] = comments",
"def comments(self, comments):\n if comments is not None and len(comments) > 1000:\n raise ValueError(\"Invalid value for `comments`, length must be less than or equal to `1000`\") # noqa: E501\n\n self._comments = comments",
"def change_comments(self, new_comments):\n\n self.comments = new_comments",
"def comment(self, comment):\n\n self.logger.debug(\"In 'comment' setter.\")\n\n self._comment = comment",
"def set_comment(self, comment):\n\t\tself.comment_ = comment",
"def set_comment(self, comment):\n self.comment_text = str(comment)",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment):\n\n self._comment = comment",
"def comment(self, comment: str):\n\n self._comment = comment",
"def comment(self, comment: str):\n\n self._comment = comment",
"def comment(self, comment) :\n\t\ttry :\n\t\t\tself._comment = comment\n\t\texcept Exception as e:\n\t\t\traise e",
"def comment(self, comment):\n self.logger.debug(\"In 'comment' setter.\")\n\n if len(comment) > 512:\n raise Exception(\"Comment is too long, must be less than 512 characters.\")\n\n self._comment = comment",
"def edit(self, comment):\n try:\n self.comment = comment\n self.save()\n except Exception as e:\n raise Exception(\"Failed to save, rolling back transaction.\" \\\n \"Details: %s\" % e)",
"def set_comments(self, id, comments):\n logging.debug(f\"\"\"__set_comments {comments} for id {id}\"\"\")\n sql = f\"\"\"update {self.schemaRepo}.tablediff\n set comments = '{comments}' where id = {id}\"\"\"\n conn = self.connect(self.cxRepo)\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")",
"def comment(self, comment): # type: (str) -> None\n self._tmp_comment = comment",
"def edit_comment():\n # Implement me!\n\n logger.info(\"vars: %r\" % request.vars)\n logger.info(\"vars_comment_text: %r\" % request.vars.comment_text)\n logger.info(\"vars id: %r\" % request.vars.comment_id)\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n\n #comment.comment_text = request.vars.comment_text\n #comment.edited_on = datetime.datetime.utcnow()\n db(db.Comments.id == request.vars.comment_id).update(comment_text=request.vars.comment_text, edited_on=datetime.datetime.utcnow())\n db.commit()\n logger.info(\"comment_text: %r\" % db(db.Comments.id == request.vars.comment_id))\n return \"ok\"",
"def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n doc.comment = comment\n else:\n raise CardinalityError('Document::Comment')",
"def comment(self, value: str):\n self._comment = value",
"def comment(self, uuid, comment):\n # TODO: add overwrite (false by default) and append options\n cur = self.conn.cursor()\n cur.execute(\n \"\"\"\n UPDATE experiments\n SET comment = ?\n WHERE uuid = ?\n \"\"\", [comment, uuid])\n cur.close()\n self.conn.commit()",
"def update_comment_only(self, comment, incident_id):\n self.cursor.execute(\"\"\"UPDATE incidents SET comment='%s' WHERE incident_id='%s'\"\"\"%(comment ,incident_id))\n self.commiting()",
"def comment(self, *comments):\n for comment in comments:\n self._p('[*]', comment)",
"def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n if validations.validate_doc_comment(comment):\n doc.comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('Document::Comment')\n else:\n raise CardinalityError('Document::Comment')"
] | [
"0.7108144",
"0.65711296",
"0.6474661",
"0.6419131",
"0.64177847",
"0.6257858",
"0.6126435",
"0.6126435",
"0.6126435",
"0.6126435",
"0.6126435",
"0.6126435",
"0.6126435",
"0.6126435",
"0.6126435",
"0.6126435",
"0.5982231",
"0.5982231",
"0.59577405",
"0.5947953",
"0.58393097",
"0.57659906",
"0.57151955",
"0.5511608",
"0.5499276",
"0.54745626",
"0.5419542",
"0.541163",
"0.5374447",
"0.53540653"
] | 0.7272099 | 1 |
Override ExperimentConfig according to flags. | def config_override(params, flags_obj):
# Change runtime.tpu to the real tpu.
params.override({
'runtime': {
'tpu': flags_obj.tpu,
}
})
# Get the first level of override from `--config_file`.
# `--config_file` is typically used as a template that specifies the common
# override for a particular experiment.
for config_file in flags_obj.config_file or []:
params = hyperparams.override_params_dict(
params, config_file, is_strict=True)
# Get the second level of override from `--params_override`.
# `--params_override` is typically used as a further override over the
# template. For example, one may define a particular template for training
# ResNet50 on ImageNet in a config fid pass it via `--config_file`,
# then define different learning rates and pass it via `--params_override`.
if flags_obj.params_override:
params = hyperparams.override_params_dict(
params, flags_obj.params_override, is_strict=True)
params.validate()
params.lock()
pp = pprint.PrettyPrinter()
logging.info('Final experiment parameters: %s', pp.pformat(params.as_dict()))
model_dir = flags_obj.model_dir
if 'train' in flags_obj.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
return params | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def customize_experiment_config(self, config):\n # TODO: use ConfigList from Coach launcher, and share customization code.\n hyperparams_dict = json.loads(os.environ.get(\"SM_HPS\", \"{}\"))\n\n # Set output dir to intermediate\n # TODO: move this to before customer-specified so they can override\n hyperparams_dict[\"rl.training.local_dir\"] = \"/opt/ml/output/intermediate\"\n\n self.hyperparameters = ConfigurationList() # TODO: move to shared\n for name, value in hyperparams_dict.items():\n # self.map_hyperparameter(name, val) #TODO\n if name.startswith(\"rl.\"):\n # self.apply_hyperparameter(name, value) #TODO\n self.hyperparameters.store(name, value)\n # else:\n # raise ValueError(\"Unknown hyperparameter %s\" % name)\n\n self.hyperparameters.apply_subset(config, \"rl.\")\n return config",
"def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n\n options.duration = 160\n options.sigma = 40\n options.amplitudes = np.linspace(-0.95, 0.95, 51)\n options.schedule = None\n\n return options",
"def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n options.update_options(\n circuit_order=\"RIRIRI\",\n )\n return options",
"def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n options.frequency_shift = None\n\n return options",
"def override_configuration():\n\n bool_config_override(\"BUILDTEST_MODULE_FORCE_PURGE\")\n\n if os.environ.get(\"BUILDTEST_SPIDER_VIEW\"):\n config_opts[\"BUILDTEST_SPIDER_VIEW\"] = os.environ[\"BUILDTEST_SPIDER_VIEW\"]\n\n if os.environ.get(\"BUILDTEST_SUCCESS_THRESHOLD\"):\n threshold = float(os.environ.get(\"BUILDTEST_SUCCESS_THRESHOLD\"))\n\n if threshold >= 0.0 and threshold <= 1.0:\n config_opts[\"BUILDTEST_SUCCESS_THRESHOLD\"] = threshold",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def test_create_experiment_new_full_config(self, user_config):\n with OrionState() as cfg:\n experiment = create_experiment(**user_config, storage=cfg.storage_config)\n\n exp_config = experiment.configuration\n\n assert exp_config[\"space\"] == config[\"space\"]\n assert exp_config[\"max_trials\"] == config[\"max_trials\"]\n assert exp_config[\"max_broken\"] == config[\"max_broken\"]\n assert exp_config[\"working_dir\"] == config[\"working_dir\"]\n assert exp_config[\"algorithm\"] == config[\"algorithm\"]",
"def get_config():\n return ExperimentConfig(base_path=os.getenv(\"AICROWD_OUTPUT_PATH\", \"./scratch/shared\"),\n experiment_name=os.getenv(\"AICROWD_EVALUATION_NAME\", \"experiment_name\"),\n dataset_name=os.getenv(\"AICROWD_DATASET_NAME\", \"cars3d\"))",
"def _override_opt(self, new_opt):\n model_args = {\n 'arch',\n 'encoder-embed-dim',\n 'encoder-layers',\n 'decoder-embed-dim',\n 'decoder-layers',\n 'decoder-out-embed-dim',\n 'decoder-attention',\n }\n\n for k, v in new_opt.items():\n if k not in model_args:\n # skip non-model args\n continue\n if k not in self.opt:\n print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))\n elif self.opt[k] != v:\n print('Overriding option [ {k}: {old} => {v}]'.format(\n k=k, old=self.opt[k], v=v))\n self.opt[k] = v\n return self.opt",
"def tpe_configspace(self):\n raise NotImplementedError(\"Overwrite for actual experiment\")",
"def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue",
"def _config_set(self):\n p = self._params\n self._config = tf.estimator.RunConfig(save_checkpoints_steps = p.save_checkpoints_steps,\n keep_checkpoint_max = p.keep_checkpoint_max,\n save_summary_steps = p.save_summary_steps\n )",
"def config(self, **kw):\n group = kw.pop('group', None)\n for k, v in kw.items():\n CONF.set_override(k, v, group)",
"def _augment_pipeline_cfg(self):",
"def _load_backtesting_config(self, config: Dict[str, Any]) -> Dict[str, Any]:\n\n # If -i/--ticker-interval is used we override the configuration parameter\n # (that will override the strategy configuration)\n if 'ticker_interval' in self.args and self.args.ticker_interval:\n config.update({'ticker_interval': self.args.ticker_interval})\n logger.info('Parameter -i/--ticker-interval detected ...')\n logger.info('Using ticker_interval: %s ...', config.get('ticker_interval'))\n\n # If -l/--live is used we add it to the configuration\n if 'live' in self.args and self.args.live:\n config.update({'live': True})\n logger.info('Parameter -l/--live detected ...')\n\n # If --enable-position-stacking is used we add it to the configuration\n if 'position_stacking' in self.args and self.args.position_stacking:\n config.update({'position_stacking': True})\n logger.info('Parameter --enable-position-stacking detected ...')\n\n # If --disable-max-market-positions is used we add it to the configuration\n if 'use_max_market_positions' in self.args and not self.args.use_max_market_positions:\n config.update({'use_max_market_positions': False})\n logger.info('Parameter --disable-max-market-positions detected ...')\n logger.info('max_open_trades set to unlimited ...')\n else:\n logger.info('Using max_open_trades: %s ...', config.get('max_open_trades'))\n\n # If --timerange is used we add it to the configuration\n if 'timerange' in self.args and self.args.timerange:\n config.update({'timerange': self.args.timerange})\n logger.info('Parameter --timerange detected: %s ...', self.args.timerange)\n\n # If --datadir is used we add it to the configuration\n if 'datadir' in self.args and self.args.datadir:\n config.update({'datadir': self._create_datadir(config, self.args.datadir)})\n else:\n config.update({'datadir': self._create_datadir(config, None)})\n logger.info('Using data folder: %s ...', config.get('datadir'))\n\n # If -r/--refresh-pairs-cached is used we add it to the configuration\n if 'refresh_pairs' in self.args and self.args.refresh_pairs:\n config.update({'refresh_pairs': True})\n logger.info('Parameter -r/--refresh-pairs-cached detected ...')\n\n if 'strategy_list' in self.args and self.args.strategy_list:\n config.update({'strategy_list': self.args.strategy_list})\n logger.info('Using strategy list of %s Strategies', len(self.args.strategy_list))\n\n if 'ticker_interval' in self.args and self.args.ticker_interval:\n config.update({'ticker_interval': self.args.ticker_interval})\n logger.info('Overriding ticker interval with Command line argument')\n\n # If --export is used we add it to the configuration\n if 'export' in self.args and self.args.export:\n config.update({'export': self.args.export})\n logger.info('Parameter --export detected: %s ...', self.args.export)\n\n # If --export-filename is used we add it to the configuration\n if 'export' in config and 'exportfilename' in self.args and self.args.exportfilename:\n config.update({'exportfilename': self.args.exportfilename})\n logger.info('Storing backtest results to %s ...', self.args.exportfilename)\n\n return config",
"def setExperiment(self, **kwargs):\n # If the dictionary robot value is 'tb1' then change the button Style\n global robot_Selected_Value\n if kwargs['robot'] =='1':\n robot_Selected_Value = 'TB1'\n elif kwargs['robot'] =='2':\n robot_Selected_Value = 'TB2'\n elif kwargs['robot'] =='3':\n robot_Selected_Value = 'TB3'\n elif kwargs['robot'] =='4':\n robot_Selected_Value = 'TB4'\n elif kwargs['set'] =='OK':\n # CONFIGURATION VARIABLES\n robot_Type_Value = self.robot_Selection_Type.currentText()\n robot_Role_Value = self.robot_Selection_Role.currentText()\n robot_Task_Value = self.robot_Selection_Task.currentText()\n robot_Behavior_Value = self.robot_Selection_Behavior.currentText()\n robot_Experiment_Value = self.robot_Selection_Experiment.currentText()\n # XML CREATION\n environmentXMLFile = et.Element('EXP_CONFIGURATIONS')\n comment = et.Comment(\"Experiment Configuration and Variables\")\n environmentXMLFile.append(comment)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_SELECTED')\n environmentConfig.text = str(robot_Selected_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_TYPE')\n environmentConfig.text = str(robot_Type_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_ROLE')\n environmentConfig.text = str(robot_Role_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_TASK')\n environmentConfig.text = str(robot_Task_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_BEHAVIOR')\n environmentConfig.text = str(robot_Behavior_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_EXPERIMENT')\n environmentConfig.text = str(robot_Experiment_Value)\n try:\n tree = et.ElementTree(environmentXMLFile)\n tree.write('experimentConfig.xml', encoding='utf8')\n sendFiles.sshSendFiles()\n operationSucess()\n except Exception:\n operationError()",
"def config(self, **kw):\n group = kw.pop('group', None)\n for k, v in kw.iteritems():\n CONF.set_override(k, v, group)",
"def TileInfoLoaderCfg(flags, **kwargs):\n\n from AthenaCommon.Logging import logging\n msg = logging.getLogger('TileInfoLoaderCfg')\n\n from TileConditions.TileCablingSvcConfig import TileCablingSvcCfg\n acc = ComponentAccumulator()\n acc.merge (TileCablingSvcCfg(flags))\n\n if 'NoiseScaleIndex' not in kwargs:\n if flags.Tile.doOpt2 and not flags.Tile.doOptATLAS:\n msg.info(\"Adjusting TileInfo to return cell noise for Opt.Filter with iterations\")\n kwargs['NoiseScaleIndex'] = 2 # Noise for Optimal Filter with iterations\n else:\n msg.info(\"Adjusting TileInfo to return cell noise for Opt.Filter without iterations\")\n kwargs['NoiseScaleIndex'] = 1 # Noise for Optimal Filter without iterations\n\n\n if (flags.Input.isMC or flags.Detector.OverlayTile) and ('TileHitVec' in flags.Input.Collections or 'TileHitVec' in flags.Input.SecondaryCollections):\n\n G4Version = flags.Sim.G4Version\n G4VersionMajor, G4VersionMinor = G4Version.split(\".\")[1:3]\n G4V = int(G4VersionMajor) + int(G4VersionMinor) / 100.\n\n physicsList = flags.Sim.PhysicsList\n\n if 'EmScaleA' not in kwargs:\n\n # Default value since May-2011\n EmScaleA = 34.0\n\n # Default value for G4 9.6 since Nov-2013 (need to check G4 version as well)\n if physicsList == 'FTFP_BERT' or (physicsList == 'QGSP_BERT' and G4V > 9.05999) :\n EmScaleA = 33.9\n\n # Default value for G4 10.0 since June-2016\n # see https://indico.cern.ch/event/489520/contributions/2193913/attachments/1285565/1914309/sampling_fractions.pdf\n if G4V >= 10.0 :\n EmScaleA = 33.7\n\n # Old value\n if physicsList == 'QGSP_EMV' or physicsList == 'QGSP_BERT_EMV' or physicsList == '':\n EmScaleA = 35.9\n\n kwargs['EmScaleA'] = EmScaleA # 1/s.f. value for all normal cells\n\n else:\n EmScaleA = kwargs['EmScaleA']\n\n msg.info(\"Using 1/s.f. = %s for %s physics list and G4version %s (%s)\", EmScaleA, physicsList, G4V, G4Version)\n\n kwargs.setdefault('TileNoise', flags.Digitization.DoCaloNoise)\n if kwargs['TileNoise']:\n msg.info(\"Switching ON noise in Tile Digitization\" )\n else:\n msg.info(\"Switching OFF noise in Tile Digitization\" )\n\n\n TileInfoLoader=CompFactory.TileInfoLoader\n acc.addService(TileInfoLoader(**kwargs), primary = True)\n\n return acc",
"def save_experiment_config(self):\n\n if (self.use_dist and dist.get_rank() == 0) or not self.use_dist:\n logfile = os.path.join(self.experiment_dir, 'parameters.txt')\n log_file = open(logfile, 'w')\n log_file.write('\\n')\n json.dump(self.args.__dict__, log_file, indent=2)\n log_file.write('\\n')\n log_file.close()",
"def test_change_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)",
"def default_setting(self):\n\t\tdo_log = False if self.debug else True\n\t\tdo_validation, do_summary = False, False\n\t\tlog_step = 2\n\t\tepochs = 50\n\t\tvali_k = 5\n\n\t\t'''on the usage of mask_label\n\t\t(1) given a supervised dataset, True means that mask a supervised data to mimic unsupervised data\n\t\t(2) given an unsupervised dataset, this setting is not supported, since it is already an unsupervised data\n\t\t'''\n\t\tmask_label = False\n\t\tif mask_label:\n\t\t\tassert not self.data_id in MSLETOR_SEMI\n\t\t\tmask_ratio = 0.1\n\t\t\tmask_type = 'rand_mask_rele'\n\t\telse:\n\t\t\tmask_ratio = None\n\t\t\tmask_type = None\n\n\t\t# more evaluation settings that are rarely changed\n\t\tself.eval_dict = dict(debug=self.debug, grid_search=False, dir_output=self.dir_output,\n\t\t\t\t\t\t cutoffs=[1, 3, 5, 10, 20, 50], do_validation=do_validation, vali_k=vali_k,\n\t\t\t\t\t\t do_summary=do_summary, do_log=do_log, log_step=log_step, loss_guided=False, epochs=epochs,\n\t\t\t\t\t\t mask_label=mask_label, mask_ratio=mask_ratio, mask_type=mask_type)\n\n\t\treturn self.eval_dict",
"def get_custom_dsat_exp_conf_from_args(\n args: argparse.Namespace,\n) -> Dict[str, Any]:\n exp_config = _utils.get_dict_from_yaml_or_json_path(\n args.config_path\n ) # add the search runner's experiment id to the description of the corresonding Trial\n additional_description = f\"(#{args.experiment_id}) generated\"\n existing_description = exp_config.get(\"description\")\n if existing_description is not None:\n exp_config[\"description\"] = f\"{additional_description} - {exp_config['description']}\"\n else:\n exp_config[\"description\"] = additional_description\n\n # Overwrite the searcher section.\n exp_config[\"searcher\"] = {\n \"name\": \"custom\",\n \"metric\": args.metric,\n \"smaller_is_better\": _utils.smaller_is_better(args.metric),\n }\n # Add all necessary autotuning keys from defaults and user-supplied args.\n autotuning_config = _defaults.AUTOTUNING_DICT\n autotuning_config[\"autotuning\"][\"start_profile_step\"] = args.start_profile_step\n autotuning_config[\"autotuning\"][\"end_profile_step\"] = args.end_profile_step\n\n exp_config[\"hyperparameters\"] = merge_dicts(\n exp_config[\"hyperparameters\"], {_defaults.OVERWRITE_KEY: autotuning_config}\n )\n # Add an internal key to the HP dict which enables the DSAT code path for Trial classes.\n exp_config[\"hyperparameters\"][_defaults.USE_DSAT_MODE_KEY] = True\n\n return exp_config",
"def test_create_experiment_hit_no_config(self):\n with OrionState(experiments=[config]) as cfg:\n experiment = create_experiment(config[\"name\"], storage=cfg.storage_config)\n\n assert experiment.name == config[\"name\"]\n assert experiment.version == 1\n assert experiment.space.configuration == config[\"space\"]\n assert experiment.algorithm\n assert experiment.algorithm.configuration == config[\"algorithm\"]\n assert experiment.max_trials == config[\"max_trials\"]\n assert experiment.max_broken == config[\"max_broken\"]\n assert experiment.working_dir == config[\"working_dir\"]",
"def update_config(config, args):\n if args.n_train is not None:\n config['data']['n_train'] = args.n_train\n if args.n_valid is not None:\n config['data']['n_valid'] = args.n_valid\n if args.real_weight is not None:\n config['data']['real_weight'] = args.real_weight\n if args.lr is not None:\n config['optimizer']['learning_rate'] = args.lr\n if args.hidden_dim is not None:\n config['model']['hidden_dim'] = args.hidden_dim\n if args.n_graph_iters is not None:\n config['model']['n_graph_iters'] = args.n_graph_iters\n if args.batch_size is not None:\n config['data']['batch_size'] = args.batch_size\n if args.n_epochs is not None:\n config['training']['n_epochs'] = args.n_epochs\n if args.weight_decay is not None:\n config['optimizer']['weight_decay'] = args.weight_decay\n\n return config",
"def set_exp_defaults(self, **kwargs):\n default_exp = False\n for key in kwargs:\n if key in self._exp_defaults:\n setattr(self,key,kwargs[key])\n \n if self.exp is not None:\n self.instrument = self.exp[0:3]\n if self.instrument is None:\n self.instrument = psutils.instrument_guess()\n\n if self.station is None:\n station = 0\n else:\n station = self.station\n\n inst_id = '{:}:{:}'.format(self.instrument.upper(), station)\n\n if self.exp is None or self.live is True:\n if psutils.live_source(monshmserver=self.monshmserver) is not None:\n self.live = True\n self.exp = psutils.active_experiment(inst_id)\n self.run = 0\n self.h5 = False\n self.indexed = False\n else:\n self.live = False\n if self.ffb:\n self.indexed = True\n else:\n self.indexed = True\n if self.exp is None:\n self.exp = psutils.experiment_guess(instrument=self.instrument)\n\n if self.exp.startswith('dia'):\n self.instrument = self.exp[3:6]\n self.indexed = False\n else:\n self.instrument = self.exp[0:3]",
"def prepare_config(cls, config, is_mode_config):\n if not is_mode_config:\n if 'enable_events' not in config:\n config['enable_events'] = 'ball_started'\n if 'disable_events' not in config:\n config['disable_events'] = 'ball_will_end'\n return super().prepare_config(config, is_mode_config)",
"def prepare_config(cls, config, is_mode_config):\n if not is_mode_config:\n if 'enable_events' not in config:\n config['enable_events'] = 'ball_started'\n if 'disable_events' not in config:\n config['disable_events'] = 'ball_will_end'\n return super().prepare_config(config, is_mode_config)",
"def config(\n data_folder=settings.data_folder,\n logs_folder=settings.logs_folder,\n imgs_folder=settings.imgs_folder,\n cache_folder=settings.cache_folder,\n cache_responses=settings.cache_responses,\n log_file=settings.log_file,\n log_console=settings.log_console,\n log_level=settings.log_level,\n log_name=settings.log_name,\n log_filename=settings.log_filename,\n useful_idf_objects=settings.useful_idf_objects,\n default_weight_factor=\"area\",\n ep_version=settings.ep_version,\n debug=settings.debug,\n):\n # set each global variable to the passed-in parameter value\n settings.cache_responses = cache_responses\n settings.cache_folder = Path(cache_folder).expand().makedirs_p()\n settings.data_folder = Path(data_folder).expand().makedirs_p()\n settings.imgs_folder = Path(imgs_folder).expand().makedirs_p()\n settings.logs_folder = Path(logs_folder).expand().makedirs_p()\n settings.log_console = log_console\n settings.log_file = log_file\n settings.log_level = log_level\n settings.log_name = log_name\n settings.log_filename = log_filename\n settings.useful_idf_objects = useful_idf_objects\n settings.zone_weight.set_weigth_attr(default_weight_factor)\n settings.ep_version = ep_version\n settings.debug = debug\n\n # if logging is turned on, log that we are configured\n if settings.log_file or settings.log_console:\n get_logger(name=\"archetypal\")\n log(\"Configured archetypal\")",
"def _ApplyFlags(cls, config_values, flag_values):\n super()._ApplyFlags(config_values, flag_values)\n if flag_values['cloud_spanner_config'].present:\n config_values['config'] = flag_values.cloud_spanner_config\n if flag_values['cloud_spanner_nodes'].present:\n config_values['nodes'] = flag_values.cloud_spanner_nodes\n if flag_values['cloud_spanner_project'].present:\n config_values['project'] = flag_values.cloud_spanner_project",
"def adjust_pipeline_config(self, cfg):\n cfg_cp = copy.deepcopy(cfg)\n cfg_tiny = copy.deepcopy(cfg)\n workers_num = self._calc_workers_num()\n General.parallel_search = False\n self._get_time_params(cfg_cp)\n self._simulate_tiny_pipeline(cfg_tiny)\n General.parallel_search = cfg.general.parallel_search\n self._modify_pipeline_config(workers_num, self.epoch_time, self.params_dict)\n if vega.is_npu_device():\n os.environ['RANK_TABLE_FILE'] = os.environ['ORIGIN_RANK_TABLE_FILE']\n os.environ['RANK_SIZE'] = os.environ['ORIGIN_RANK_SIZE']\n logging.info('Adjust runtime config successfully.')"
] | [
"0.69408005",
"0.6118065",
"0.5987085",
"0.59833956",
"0.59729207",
"0.5912382",
"0.5890735",
"0.58508605",
"0.5849696",
"0.5837483",
"0.5785152",
"0.5770095",
"0.57107866",
"0.5698733",
"0.5692588",
"0.56796557",
"0.56731826",
"0.56684154",
"0.5664908",
"0.56209606",
"0.5597542",
"0.5594559",
"0.5571277",
"0.5553425",
"0.55522835",
"0.55511206",
"0.55511206",
"0.5546329",
"0.55418754",
"0.5541857"
] | 0.7048604 | 0 |
Compute the harmonic CQT from a given audio file | def compute_hcqt(audio_fpath):
(bins_per_octave, n_octaves, harmonics,
sr, f_min, hop_length) = get_hcqt_params()
y, fs = librosa.load(audio_fpath, sr=sr)
cqt_list = []
shapes = []
for h in harmonics:
cqt = librosa.cqt(
y, sr=fs, hop_length=hop_length, fmin=f_min*float(h),
n_bins=bins_per_octave*n_octaves,
bins_per_octave=bins_per_octave
)
cqt_list.append(cqt)
shapes.append(cqt.shape)
shapes_equal = [s == shapes[0] for s in shapes]
if not all(shapes_equal):
min_time = np.min([s[1] for s in shapes])
new_cqt_list = []
for i in range(len(cqt_list)):
new_cqt_list.append(cqt_list[i][:, :min_time])
cqt_list = new_cqt_list
log_hcqt = ((1.0/80.0) * librosa.core.amplitude_to_db(
np.abs(np.array(cqt_list)), ref=np.max)) + 1.0
return log_hcqt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_cqt(data):\n \n # Perform the Constant-Q Transform\n CQT = librosa.cqt(data, sr=AUDIO_SAMPLE_RATE, hop_length=512, fmin=None, n_bins=96, bins_per_octave=12)\n CQT_mag = librosa.magphase(CQT)[0]**4\n \n # Convert to dB\n CQTdB = librosa.core.amplitude_to_db(CQT_mag, ref = np.amax)\n\n return CQTdB",
"def music(csi_corr, csi_target, Ntx, Nrx, d_tx, d_rx, t):\n\n In = 0\n s = phase_correction(csi_corr, csi_target)\n s_lin = (s[:, :, 0, t:t + 2].reshape(6, 2, order='F'))\n\n '''Compute the covariance matrix and the eigendecompositon'''\n R_hat = np.cov(s_lin)\n D, Q = ln.eig(R_hat)\n\n '''Sort the eigenvalues in D'''\n Do = np.abs(D)\n D = np.sort(Do)[::-1]\n I = np.argsort(Do)[::-1]\n Q = Q[:, I]\n\n ''' Compute the Number of signal that are significative'''\n T = np.cumsum(np.real(D))\n for i in range(1, 1, np.size(T)):\n if T(i) >= 0.99 * T(np.size(T)):\n In = i\n break\n\n ''' Get the signal eigenvectors'''\n In = 0 # take the first signal\n Qs = Q[:, :In]\n\n ''' Get the noise eigenvectors'''\n Qn = Q[:, In + 1:]\n\n ''' Angles at which MUSIC Pseudospectrum will be computed '''\n angles1 = np.arange(-90, 90, 1)\n angles2 = np.arange(-90, 90, 1)\n\n '''Compute steering vectors corresponding values in angles'''\n a1 = np.exp(-1.j * 2 * np.pi * d_rx * np.tensordot(arange(Nrx), sin(angles1 * np.pi / 180), 0))\n a2 = np.exp(-1.j * 2 * np.pi * d_tx * np.tensordot(arange(Ntx), sin(angles1 * np.pi / 180), 0))\n\n '''Compute MUSIC \"spectrum\" '''\n music_spectrum = np.zeros((np.size(angles1), np.size(angles2)), dtype=complex)\n for k in range(1, np.size(angles2)):\n for j in range(1, np.size(angles1)):\n K = np.kron(a1[:, j], a2[:, k])\n s = dot(K.T, Qn)\n music_spectrum[j, k] = 1 / dot(abs(s), abs(s).T)\n\n ''' compute the mesh and plot the surf of the pseudospectrum '''\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x = angles2\n y = angles1\n X, Y = np.meshgrid(x, y)\n Z = np.abs(np.squeeze(music_spectrum))\n ax = fig.add_subplot(111, projection='3d')\n ax.set_ylabel('AoA')\n ax.set_xlabel('AoD')\n ax.set_xlim3d(-90, 90)\n ax.set_ylim3d(-90, 90)\n ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.jet, alpha=0.7, linewidth=0.25)\n\n ''' detect the peaks corresponding to DoD and DoA '''\n detect = detect_peaks(Z)\n index_max = np.column_stack(np.where(detect))\n x_ind = index_max[:, 0]\n y_ind = index_max[:, 1]\n tab = (np.transpose(np.array((Z[x_ind, y_ind], x[x_ind], y[y_ind])))).tolist()\n tab.sort(key=lambda e: e[0], reverse=True)\n myarray = np.asarray(tab[0])\n angles = myarray[1:]\n plt.show()\n\n return angles",
"def get_cqt(filepath, restrict=restrict_range, use_librosa=False):\n try:\n audio, fs = librosa.load(path=filepath, sr=22050)\n except Exception as e:\n fs, audio_ro = scipy.io.wavfile.read(filepath)\n audio = np.copy(audio_ro) / 32767\n if fs != 22050:\n print(\"incorrect fs\")\n return None\n # frame-wise calculation\n if restrict:\n start = start_sec * fs\n end = end_sec * fs\n audio = np.array(audio[start:end], dtype=np.float32)\n # normalize\n audio = (cqt_params['normalizing_constant'] * audio) / np.std(audio[np.abs(audio > 0.00001)])\n # normalize\n cqt = np.abs(librosa.core.cqt(audio, fmin=cqt_params['fmin'], sr=global_fs, hop_length=hopSize,\n n_bins=cqt_params['total_bins'], bins_per_octave=cqt_params['bins_per_8va']))\n return cqt",
"def harmonic_cqt(x_in, sr, hop_length=1024, fmin=27.5, n_bins=72,\n n_harmonics=5, bins_per_octave=36, tuning=0.0, filter_scale=1,\n aggregate=None, norm=1, sparsity=0.0, real=False):\n\n kwargs = dict(n_bins=n_bins, bins_per_octave=bins_per_octave,\n hop_length=hop_length, sr=sr, tuning=tuning,\n filter_scale=filter_scale, aggregate=aggregate, norm=norm,\n sparsity=sparsity, real=real)\n\n cqt_spectra = []\n min_tdim = np.inf\n for i in range(1, n_harmonics + 1):\n cqt_spectra += [np.array([librosa.cqt(x_c, fmin=i * fmin, **kwargs).T\n for x_c in x_in.T])[:, np.newaxis, ...]]\n min_tdim = min([cqt_spectra[-1].shape[2], min_tdim])\n cqt_spectra = [x[:, :, :min_tdim, :] for x in cqt_spectra]\n\n return np.concatenate(cqt_spectra, axis=1)",
"def midC() :\n\n num_samples = 44100\n duration = 1\n\n # TODO: implement mid C\n # see http://en.wikipedia.org/wiki/Audio_frequency for reference\n\n # audio.writeAudioData( './data/mid-c.wav', data, num_samples)",
"def processFile(filename,length = 256,q=1,fs_in=8000,divide=4,plot=False):\n length = length*divide\n #fs = sample rate, sound = multichannel sound signal\n try:\n fs1, sound = wavfile.read(filename)\n except ValueError:\n print(str(filename) + ' failed to process')\n return 'failed'\n if fs1 != fs_in:\n raise ValueError('Sampling rate should be ' + str(fs_in) + ' for: ' + filename)\n sig1 = sound[:0] #left channel\n pre_emphasis = 0.97\n sig1 = np.append(sig1[0], sig1[1:] - pre_emphasis * sig1[:-1])\n\n \n fs2, sig2 = downsample(sig1,fs1,q)\n N2 = len(sig2)\n sig3 = sig2[N2//2-length:N2//2+length]\n #print(len(sig3))\n\n FFT = abs(scipy.fft(sig3))\n FFT_side = FFT[range(len(FFT)//2)]\n #freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n #plt.plot(freqs,FFT)\n if len(FFT_side) != length:\n print('ERROR MESSAGE DETAILS')\n print('filename: ' + filename)\n print('length = ' + str(length))\n print('fs_in = ' + str(fs_in))\n print('q = ' + str(q))\n print('divide = ' + str(divide))\n total_time = len(sig1)/fs1\n print('total_time = ' + str(total_time))\n print('Please check: length < total_time*fs//(2*q)')\n print('Check: ' + str(length) + ' < ' + str(total_time*fs1//(2*q)))\n raise ValueError('Length FFT_side != length: ' + str(len(FFT_side)) + ' != ' + str(length))\n \n \n FFT_log = []\n # normalize FFT\n for value in FFT_side:\n value = np.log(value)\n FFT_log.append(value)\n max_val = getMax(FFT_log)[1]\n FFT_norm = []\n for value in FFT_log:\n FFT_norm.append(value/max_val)\n \n \n FFT_side = np.array(FFT_norm)\n FFT_divided = FFT_side[range(length//divide)]\n #plot = True\n if plot == True:\n freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n freqs_divided = np.array(freqs[range(len(FFT_divided))])\n plt.plot(freqs_divided,FFT_divided) # plotting the complete fft spectrum\n plt.show()\n \n return FFT_divided",
"def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))",
"def sp_audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n sig = sig.unsqueeze(0)\n sig = hparams[\"speed_perturb\"](sig)\n sig = sig.squeeze(0)\n return sig",
"def wav2mfcc(file_path, max_len=44, n_mfcc=20):",
"def cqt_one(input_file, output_file, cqt_params=None, audio_params=None,\n harmonic_params=None, skip_existing=True):\n input_exists, output_exists = [os.path.exists(f)\n for f in (input_file, output_file)]\n if not input_exists:\n logger.warning(\"[{0}] Input file doesn't exist, skipping: {1}\"\n \"\".format(time.asctime(), input_file))\n return input_exists\n\n if skip_existing and output_exists:\n logger.info(\"[{0}] Output file exists, skipping: {1}\"\n \"\".format(time.asctime(), output_file))\n return output_exists\n\n logger.debug(\"[{0}] Starting {1}\".format(time.asctime(), input_file))\n if not cqt_params:\n cqt_params = CQT_PARAMS.copy()\n\n if not audio_params:\n audio_params = AUDIO_PARAMS.copy()\n\n if not harmonic_params:\n harmonic_params = HARMONIC_PARAMS.copy()\n\n logger.debug(\"[{0}] Audio conversion {1}\".format(\n time.asctime(), input_file))\n try:\n x, fs = claudio.read(input_file, **audio_params)\n if len(x) <= 0:\n logger.error(\"Bad Input signal length={} for audio {}\".format(\n len(x), input_file))\n return False\n logger.debug(\"[{0}] Computing features {1}\".format(\n time.asctime(), input_file))\n cqt_spectra = np.array([np.abs(librosa.cqt(x_c, sr=fs, **cqt_params).T)\n for x_c in x.T])\n\n cqt_params.update(**harmonic_params)\n harm_spectra = harmonic_cqt(x, fs, **cqt_params)\n\n frame_idx = np.arange(cqt_spectra.shape[1])\n time_points = librosa.frames_to_time(\n frame_idx, sr=fs, hop_length=cqt_params['hop_length'])\n logger.debug(\"[{0}] Saving: {1}\".format(time.asctime(), output_file))\n np.savez(\n output_file, time_points=time_points,\n cqt=np.abs(cqt_spectra).astype(np.float32),\n harmonic_cqt=np.abs(harm_spectra).astype(np.float32))\n except AssertionError as e:\n logger.error(\"Failed to load audio file: {} with error:\\n{}\".format(\n input_file, e))\n logger.debug(\"[{0}] Finished: {1}\".format(time.asctime(), output_file))\n return os.path.exists(output_file)",
"def chisq_cphase_fft(vis_arr, A, clphase, sigma, order=FFT_INTERP_DEFAULT):\n\n im_info, (uv1, uv2, uv3) = A\n clphase = clphase * DEGREE\n sigma = sigma * DEGREE\n clphase_samples = np.angle(sampler(vis_arr, im_info, [uv1, uv2, uv3], order=order, sample_type=\"bs\"))\n chisq = (2.0/len(clphase)) * np.sum((1.0 - np.cos(clphase-clphase_samples))/(sigma**2))\n return chisq",
"def compute_chroma_bpm(filepath):\n\t\n\ty, sr = librosa.load(filepath)\n\t# Compute chroma features from the harmonic signal\n\tchromagram = librosa.feature.chroma_stft(y=y,sr=sr)\n\tchromaDF = pd.DataFrame(chromagram)\n\n\t# Filter intensity values less than 1\n\tchromaDF[chromaDF < 1] = 0\n\tchroma_f = chromaDF.sum(axis = 1)\n\n\t# Calculate chroma distribution\n\tchroma_p = [i / sum(chroma_f) for i in chroma_f]\n\t\n\t# Beat track on the percussive signal\n\ttempo, beat_frames = librosa.beat.beat_track(y=y,sr=sr)\n\t\n\tresults = [tempo]\n\tresults.append(chroma_p)\n\t\n\treturn results",
"def chi_c_real(params):\n Qi = Q_i(params)\n Qc = params['Q_e_real'].value\n return ((4 * Qc * Qi) /\n (Qc + Qi) ** 2)",
"def chisqdata_cphase_fft(Obsdata, Prior, fft_pad_frac=1):\n clphasearr = Obsdata.c_phases(mode=\"all\", count=\"min\")\n uv1 = np.hstack((clphasearr['u1'].reshape(-1,1), clphasearr['v1'].reshape(-1,1)))\n uv2 = np.hstack((clphasearr['u2'].reshape(-1,1), clphasearr['v2'].reshape(-1,1)))\n uv3 = np.hstack((clphasearr['u3'].reshape(-1,1), clphasearr['v3'].reshape(-1,1)))\n clphase = clphasearr['cphase']\n sigma = clphasearr['sigmacp']\n\n npad = fft_pad_frac * np.max((Prior.xdim, Prior.ydim))\n\n im_info = (Prior.xdim, Prior.ydim, npad, Prior.psize, Prior.pulse)\n\n A = (im_info, [uv1, uv2, uv3])\n\n return (clphase, sigma, A)",
"def get_signal(self, audio, magnitudes):\n return core.frequency_filter(\n audio, magnitudes, window_size=self.window_size\n )",
"def read_cospectrum(path,d):\r\n spec = []\r\n timeseries = []\r\n for i in range(len(d)):\r\n filename = path + d[i]\r\n\r\n with open(filename, \"r\") as f:\r\n reader = csv.reader(f,delimiter=',')\r\n ct=1\r\n for row in reader:\r\n if ct==6:\r\n Hz = float(row[0].split('_')[-1])\r\n elif ct==7:\r\n height = float(row[0].split('_')[-1])\r\n elif ct==8:\r\n ws = float(row[0].split('_')[-1])\r\n elif ct==9:\r\n avg_period = float(row[0].split('_')[-1])\r\n elif ct==13:\r\n header = row\r\n elif ct>13:\r\n break\r\n ct+=1\r\n \r\n meta = [Hz,height,ws,avg_period]\r\n \r\n thisspec = np.genfromtxt(filename,delimiter=',',skip_header=13)\r\n spec.append(thisspec)\r\n thistime = re.findall('\\d{8}-\\d{4}',filename)[0]\r\n thisdate = datetime.strptime(thistime,'%Y%m%d-%H%M')\r\n timeseries.append(thisdate) \r\n \r\n return spec, timeseries, header, meta",
"def chisq_cphase(cphase, Z, F_cphase, sigma_cphase):\n cphase_samples = compute_cphase(Z, F_cphase)\n chisq= (2.0/len(cphase)) * np.sum((1.0 - np.cos(cphase-cphase_samples))/(sigma_cphase**2))\n return chisq",
"def d_q_hisano(self, mchi):\n w = self.MW**2/mchi**2\n def gAV(x):\n bx = np.sqrt(1-x/4+0*1j)\n out = np.real_if_close(1/(24*bx) * np.sqrt(x) * (8 - x - x**2) * np.arctan(2*bx/np.sqrt(x))\\\n - 1/24 * x * (2 - (3+x)*np.log(x)))\n return out\n return (self.alpha)**2/(self.MW**2*self.sw**4) * ((self.dchi**2 - 1)/8 * gAV(w))",
"def get_wave(q):\n\n approximant = 'SEOBNRv4'\n chi1 = [0,0,0]\n chi2 = [0,0,0]\n deltaTOverM = 0.1\n omega0 = 2e-2\n\n t, h = LALPy.generate_LAL_waveform(approximant, q, chi1, chi2, deltaTOverM, omega0)\n\n Amp = np.abs(h)\n peakIdx = np.argmax(Amp)\n\n t -= t[peakIdx]\n\n tmin = -500\n if min(t) > tmin:\n raise Exception('Data not long enough, decrease omega0.')\n keepIdx = t - tmin > -1e-3 # simple hack to ensure t_vec is always nearly the same\n t = t[keepIdx]\n h = h[keepIdx]\n\n tmax = 100\n keepIdx = t - tmax < 1e-3\n t = t[keepIdx]\n h = h[keepIdx]\n\n return t, h",
"def get_duration_sox_s(audio_file_path: str) -> float:\n global FS_HZ\n assert FS_HZ is not None\n duration_n = get_duration_sox_n(audio_file_path)\n return duration_n / FS_HZ",
"def get_audio(filepath, restrict=restrict_range, use_librosa=False, normalize=True):\n try:\n audio, fs = librosa.load(path=filepath, sr=22050)\n except Exception as e:\n fs, audio_ro = scipy.io.wavfile.read(filepath)\n audio = np.copy(audio_ro) / 32767\n if fs != 22050:\n print(\"incorrect fs\")\n return None\n # frame-wise calculation\n if restrict:\n start = start_sec * fs\n end = end_sec * fs\n audio = np.array(audio[start:end], dtype=np.float32)\n if normalize is True:\n audio = (cqt_params['normalizing_constant'] * audio) / np.std(audio[np.abs(audio > 0.00001)])\n return audio",
"def get_beat_sync_spectrums(audio):\n y, sr = core.load(audio, sr=44100)\n eql_y = EqualLoudness()(y)\n tempo, framed_dbn = self_tempo_estimation(y, sr)\n np.append(framed_dbn, np.array(len(y)/sr))\n band1 = (0, 220)\n band2 = (220, 1760)\n band3 = (1760, sr / 2)\n band1list = []\n band2list = []\n band3list = []\n for i in range(1, len(framed_dbn)):\n fft_eq = abs(np.fft.fft(eql_y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n freqs = np.fft.fftfreq(len(fft_eq), 1 / sr)\n band1list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band1[0], freqs < band1[1]))]**2))))\n band2list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band2[0], freqs < band2[1]))]**2))))\n band3list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band3[0], freqs < band3[1]))]**2))))\n\n band1list = np.array(band1list).transpose()\n band2list = np.array(band2list).transpose()\n band3list = np.array(band3list).transpose()\n return np.vstack([band1list, band2list, band3list])",
"def music(idx, n_music=200):\n f = freqs[idx]\n Rxx = np.dot(X[:, idx], X[:, idx].H)\n lam, V = eig_sorted(Rxx)\n En = V[:, 1:] # Noise subspace for one source\n\n theta_range = np.linspace(0, 2*np.pi, n_music)\n P_music = np.zeros(n_music)\n for i in range(n_music):\n sv = ma.steering_vector(theta_range[i], f)\n vec = np.dot(En.H, ma.steering_vector(theta_range[i], f))\n P_music[i] = 1/np.linalg.norm(vec)**2\n\n vv = V[:, 0].flatten()\n print('----------')\n print('Performing MUSIC at {:.5} Hz'.format(f))\n print('-----------------------------')\n print('Steering vector subspace check:\\n')\n print('At the correct angle of {:.3}, '.format(theta*180/np.pi) +\n 'the real parts of the eigenvalues of R_xx are:')\n print('\\n'.join(' {:.3}'.format(np.real(l)) for l in lam))\n print('\\nSteering vector / eigenvector of max eigenvalue:')\n print((ma.steering_vector(theta, f) / vv).T)\n return P_music, theta_range",
"def filter_audio(audio):\n\n # Calculate voice energy for every 123 ms block\n apower = lr.amplitude_to_db(np.abs(lr.stft(audio, n_fft=2048)), ref=np.max)\n\n # Summarize energy of every rate, normalize\n apsums = np.sum(apower, axis=0) ** 2\n apsums -= np.min(apsums)\n apsums /= np.max(apsums)\n\n # Smooth the graph for saving short spaces and pauses, remove sharpness\n apsums = np.convolve(apsums, np.ones((9,)), 'same')\n # Normalize again\n apsums -= np.min(apsums)\n apsums /= np.max(apsums)\n\n # Set noise limit to 35% over voice\n apsums = np.array(apsums > 0.35, dtype=bool)\n\n # Extend the blocks every on 125ms\n # before separated samples (2048 at block)\n apsums = np.repeat(apsums, np.ceil(len(audio) / len(apsums)))[:len(audio)]\n\n return audio[apsums]",
"def stimulus_response_coherence(filename, segment_length):\n data, stimulus, sampling_interval, time = load_data(filename)\n nyquist = 1./(sampling_interval * 2.)\n f_step = 1./(sampling_interval * segment_length)\n f = np.arange(0, nyquist + f_step, f_step)\n noOfSamples = data.shape[0]\n noOfSegments = int(np.floor(noOfSamples/segment_length))\n kernel = gauss_kernel(0.001, 1./sampling_interval, 0.01)\n window = np.hanning(segment_length)\n coherence_spectra = np.zeros((segment_length, data.shape[1]), dtype=np.complex_)\n exp_coherence_spectra = np.zeros((segment_length, data.shape[1]), dtype=np.complex_)\n # we will need the psth for the expected coherence \n psth = np.zeros(data.shape[0])\n for i in range(data.shape[1]):\n psth = psth + np.convolve(data[:,i], kernel, mode='same') * (1./sampling_interval)\n psth = psth/data.shape[1]\n # go and calculate the spectra\n for i in range(data.shape[1]):\n trace = data[:,i]/sampling_interval\n trace = np.convolve(trace, kernel, mode=\"same\")\n f_resp = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n f_psth = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n f_stim = np.zeros((segment_length, noOfSegments), dtype=np.complex_)\n for n in range(noOfSegments):\n start\t= n * segment_length\n end \t= start + segment_length\n resp_segment = trace[start:end]\n resp_segment = resp_segment - np.mean(resp_segment)\n resp_segment = resp_segment * window\n psth_segment = psth[start:end]\n psth_segment = psth_segment - np.mean(psth_segment)\n psth_segment = psth_segment * window\n stim_segment = stimulus[start:end]\n stim_segment = stim_segment - np.mean(stim_segment)\n stim_segment = stim_segment * window\n \n f_resp[:, n] = np.fft.fft(resp_segment, segment_length)\n f_stim[:, n] = np.fft.fft(stim_segment, segment_length)\n f_psth[:, n] = np.fft.fft(psth_segment, segment_length)\n\n f_resp_conj = np.conjugate(f_resp) # complex conjugate spectrum of response segments\n f_stim_conj = np.conjugate(f_stim) # complex conjugate spectra of stimulus segments\n f_psth_conj = np.conjugate(f_psth) # complex conjugate spectra of psth segments\n\n sr_cross_spectrum = np.mean(f_stim_conj * f_resp, axis=1) # cross spectrum S*R\n ss_auto_spectrum = np.mean(f_stim_conj * f_stim, axis=1) # auto spectrum S*S\n\n rs_cross_spectrum = np.mean(f_resp_conj * f_stim, axis=1) # cross spectrum R*S\n rr_auto_spectrum = np.mean(f_resp_conj * f_resp, axis=1) # auto spectrum R*R\n \n pr_cross_spectrum = np.mean(f_psth_conj * f_resp, axis=1) # cross spectrum PSTH*R\n pp_auto_spectrum = np.mean(f_psth_conj * f_psth, axis=1) # auto spectrum PSTH*PSTH\n rp_cross_spectrum = np.mean(f_resp_conj * f_psth, axis=1) # cross spectrum R*PSTH\n \n coherence_spectra[:, i] = (sr_cross_spectrum * rs_cross_spectrum) / (ss_auto_spectrum * rr_auto_spectrum)\n exp_coherence_spectra[:, i] = (pr_cross_spectrum * rp_cross_spectrum) / (pp_auto_spectrum * rr_auto_spectrum)\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(f, np.mean(coherence_spectra[:len(f),:], axis=1), color='dodgerblue', label=\"r-s coherence\")\n ax.plot(f, np.mean(exp_coherence_spectra[:len(f),:], axis=1), color='silver', label=\"r-r coherence\")\n ax.set_xlim([0, 300])\n ax.set_ylim([0, 1])\n ax.set_xlabel('frequency [Hz]')\n ax.set_ylabel('coherence')\n ax.legend(fontsize=9)\n plt.show()",
"def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]",
"def oscillator_bank(frequency, amplitude, sample_rate):\n # constrain frequencies\n frequency = torch.clamp(frequency, 20., sample_rate / 2.)\n\n # translate frequencies in hz to radians\n omegas = frequency * (2 * np.pi)\n omegas = omegas / sample_rate\n\n\n phases = torch.cumsum(omegas, dim=-1)\n wavs = torch.sin(phases)\n\n audio = wavs * amplitude\n audio = torch.sum(audio, dim=1)\n return audio",
"def get_wavelength(self):\n E = -self.E0*(1.0/self.n_low**2 - 1.0/self.n_high**2)\n return SI['hc']*1e12/(E*SI['keV'])",
"def get_cw_freq(self):\n return self.get_frequency(self.synth)",
"def chisqdata_cphase(Obsdata, Prior, mask):\n\n clphasearr = Obsdata.c_phases(mode=\"all\", count=\"min\")\n uv1 = np.hstack((clphasearr['u1'].reshape(-1,1), clphasearr['v1'].reshape(-1,1)))\n uv2 = np.hstack((clphasearr['u2'].reshape(-1,1), clphasearr['v2'].reshape(-1,1)))\n uv3 = np.hstack((clphasearr['u3'].reshape(-1,1), clphasearr['v3'].reshape(-1,1)))\n clphase = clphasearr['cphase']\n sigma = clphasearr['sigmacp']\n\n A3 = (ftmatrix(Prior.psize, Prior.xdim, Prior.ydim, uv1, pulse=Prior.pulse, mask=mask),\n ftmatrix(Prior.psize, Prior.xdim, Prior.ydim, uv2, pulse=Prior.pulse, mask=mask),\n ftmatrix(Prior.psize, Prior.xdim, Prior.ydim, uv3, pulse=Prior.pulse, mask=mask)\n )\n return (clphase, sigma, A3)"
] | [
"0.66460973",
"0.6453836",
"0.636979",
"0.6216629",
"0.60845083",
"0.58505565",
"0.5831372",
"0.5564791",
"0.55225974",
"0.55209",
"0.55197704",
"0.55066806",
"0.54329526",
"0.5429815",
"0.5427289",
"0.5414956",
"0.53904355",
"0.5377025",
"0.53763163",
"0.5364629",
"0.53513414",
"0.53459346",
"0.5335954",
"0.53178215",
"0.52841175",
"0.5279315",
"0.52693415",
"0.5256854",
"0.52519065",
"0.52430815"
] | 0.758912 | 0 |
Get the hcqt frequency grid | def get_freq_grid():
(bins_per_octave, n_octaves, _, _, f_min, _) = get_hcqt_params()
freq_grid = librosa.cqt_frequencies(
bins_per_octave*n_octaves, f_min, bins_per_octave=bins_per_octave
)
return freq_grid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_frequency(self):\r\n # print '*********in get freq'\r\n self.cntr.run('FREQ 1')\r\n f_0_ = self.cntr.get_measurements(1)\r\n self.f_0 = f_0_[0]\r\n self.cntr.run('FREQ 2')\r\n f_rep_ = self.cntr.get_measurements(1)\r\n self.f_rep = f_rep_[0]",
"def get_freq_array(bandwidth, n_chans):\n return numpy.arange(n_chans)*float(bandwidth)/n_chans",
"def TH(self, full=False):\n\t\treturn (arange(self.thbins + 3*self.thbins*(full==True)) + 0.5) * (pi / 2) / self.thbins",
"def getHFtableData(self, ep=None):\n HFdict = {}\n if self.hfMode == 'limiter':\n HFdict['Heat Flux Mode'] = 'Limiter'\n if self.lqCNmode == 'eich':\n HFdict[\"\\u03BB Near Mode\"] = 'Eich Regression #15'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqEich\n else:\n HFdict[\"\\u03BB Near Mode\"] = 'User Defined'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqCN\n if self.lqCFmode == 'horacek':\n HFdict[\"\\u03BB Far Mode\"] = 'Horacek Figure 6a'\n HFdict[\"Common Region Far Heat Flux Width (\\u03BBq CF) [mm]\"] = self.lqCF\n else:\n HFdict[\"\\u03BB Far Mode\"] = 'User Defined'\n HFdict[\"Common Region Far Heat Flux Width (\\u03BBq CF) [mm]\"] = self.lqCF\n\n HFdict[\"Common Region Near Power Fraction\"] = self.fracCN\n HFdict[\"Common Region Far Power Fraction\"] = self.fracCF\n\n elif self.hfMode == 'multiExp':\n HFdict['Heat Flux Mode'] = 'Multiple (4) Exponentials'\n if self.lqCNmode == 'eich':\n HFdict[\"\\u03BB Near Mode\"] = 'Eich Regression #15'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqEich\n else:\n HFdict[\"\\u03BB Near Mode\"] = 'User Defined'\n HFdict[\"Common Region Near Heat Flux Width (\\u03BBq CN) [mm]\"] = self.lqCN\n\n if self.lqCFmode == 'horacek':\n HFdict[\"\\u03BB Far Mode\"] = 'Horacek Figure 6a'\n else:\n HFdict[\"\\u03BB Far Mode\"] = 'User Defined'\n\n\n\n HFdict[\"Common Region Far Heat Flux Width (\\u03BBq CF) [mm]\"] = self.lqCF\n HFdict[\"Private Region Near Heat Flux Width (\\u03BBq PN) [mm]\"] = self.lqPN\n HFdict[\"Private Region Far Heat Flux Width (\\u03BBq PF) [mm]\"] = self.lqPF\n HFdict[\"Common Region Near Power Fraction\"] = self.fracCN\n HFdict[\"Common Region Far Power Fraction\"] = self.fracCF\n HFdict[\"Private Region Near Power Fraction\"] = self.fracPN\n HFdict[\"Private Region Far Power Fraction\"] = self.fracPF\n\n elif self.hfMode == 'qFile':\n HFdict[\"Heat Flux Mode\"] = 'Read HF from qFile'\n HFdict['qFilePath'] = self.qFilePath\n HFdict['qFileTag'] = self.qFileTag\n\n elif self.hfMode == 'eich':\n HFdict['Heat Flux Mode'] = 'Gaussian Spreading'\n if self.lqCNmode == 'eich':\n HFdict[\"\\u03BB Mode\"] = 'Eich Regression #15'\n HFdict[\"Heat Flux Width (\\u03BBq) [mm]\"] = self.lqEich\n else:\n HFdict[\"\\u03BB Mode\"] = 'User Defined'\n HFdict[\"Heat Flux Width (\\u03BBq) [mm]\"] = self.lqCN\n\n if self.SMode == 'makowski':\n HFdict['Greenwald Density Fraction'] = self.fG\n HFdict['Spreading (S) Mode'] = 'Makowski Figure 6'\n else:\n HFdict['Spreading (S) Mode'] = 'User Defined'\n HFdict['Greenwald Density Fraction'] = 'Only used for Makowski S Mode'\n HFdict['S [mm]'] = self.S\n HFdict['Background Heat Flux'] = self.qBG\n\n if self.hfMode != 'qFile':\n HFdict[\"Power Injected (Pinj) [MW]\"] = self.Pinj\n HFdict[\"Radiated Fraction of Injected Power\"] = self.coreRadFrac\n HFdict[\"Power Crossing Separatrix (Psol) [MW]\"] = self.Psol\n HFdict[\"Upper Inner Divertor Power Fraction\"] = self.fracUI\n HFdict[\"Upper Outer Divertor Power Fraction\"] = self.fracUO\n HFdict[\"Lower Inner Divertor Power Fraction\"] = self.fracLI\n HFdict[\"Lower Outer Divertor Power Fraction\"] = self.fracLO\n\n return HFdict",
"def freq():",
"def get_hbls_hbbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n u = self.u\n v = self.v\n \n v_upts = TTTW_func.v2u(v)\n Hz = z_u_w[:,1:] - z_u_w[:,:-1]\n\n\n\n # CALCULATE swr_frac\n self.swr_frac = TTTW_func.lmd_swr_frac(self.grid_dict)\n\n\n # WHOLE THING HAPPENS IN j loop through y-indices\n \n # INITIALIZE ARRAYS\n self.kmo = np.zeros([Ly])\n self.Cr = np.zeros([Ly])\n self.kbl = np.empty([Ly],dtype='int')\n self.C_h_MO = np.zeros([Ly])\n self.Cr = np.zeros([Ly,N+1]) # sum term\n self.FC = np.zeros([Ly,N+1])\n self.swdk_r = np.zeros([Ly,N+1])\n \n self.zscale = np.zeros([Ly,N])\n self.Kern = np.zeros([Ly,N])\n\n \n # --> LOOP THROUGH Y-INDICES\n for j in range(Ly):\n if self.LIMIT_MO_DEPTH:\n self.kmo[j] = 0\n self.C_h_MO[j] = self.C_MO *self.ustar[j]**3/self.vonKar\n \n self.kbl[j] = 0\n self.Cr[j,-1] = 0 # set top Cr\n self.Cr[j,0] = 0 # set bottom Cr\n \n # SEARCH FOR MIXED LAYER DEPTH\n self.FC[j,-1] = 0.\n\n\n # ---> LOOP TOP TO BOTTOM (FORTRAN ==> k=N-1,1,-1)\n for k in range(N-1,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n \n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n self.zscale[j,k_w] = zscale\n if self.LMD_KPP:\n if self.LMD_BKPP:\n zscaleb = z_u_r[j,k_r] - z_u_w[j,0]\n Kern = zscale * zscaleb**2 / ( (zscale + self.epssfcs*self.hbls_old[j]) * (zscaleb**2+(self.epssfcb**2*self.hbbl_old[j]**2)))\n else:\n Kern = zscale / (zscale + (self.epssfcs*self.hbls_old[j]))\n else:\n Kern = 1.\n \n\n\n self.Kern[j,k_w] = Kern\n self.FC[j,k_w] = self.FC[j,k_w+1] + Kern * (\\\n ( ( u[j,k_r+1] - u[j,k_r] )**2 + ( v_upts[j,k_r+1] - v_upts[j,k_r])**2 ) \\\n / (Hz[j,k_r] + Hz[j,k_r+1]) \\\n - 0.5 * ( Hz[j,k_r] + Hz[j,k_r+1]) * (self.Ri_inv * self.bvf[j,k_w] + self.C_Ek*self.f[j]*self.f[j]))\n\n\n #\t\tLOOP THAT FINDS BL DEPTH ##\n #----> LOOP TOP TO BOTTOM (start at free surface, w-level surface) \n \n if self.LMD_KPP:\n #swdk_r only used in this function so don't need to be class attribute\n # but for testing make it an attribute to see what it is\n \n # fortran equivlanet ===> k=N,1,-1 \n for k in range(N,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n ###################################################################### \n self.swdk_r[j,k_w] = np.sqrt( self.swr_frac[j,k_w] * self.swr_frac[j,k_w-1])\n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n Bfsfc = self.Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])\n \n self.bvf_max = np.sqrt(np.max([0,self.bvf[j,k_w-1]]))\n \n # CALCULATE TURBULENT VELOCITY SCALE FOR TRACERS\n \t\t\t self.ws = self.lmd_wscale_ws_only(Bfsfc, zscale,self.hbls_old[j],self.ustar[j])\n \n self.Vtsq = self.Vtc * self.ws* self.bvf_max + self.V0\n \n\n self.Cr[j,k_w] = self.FC[j,k_w] + self.Vtsq\n \n\n #######################################################################\n \n # SEARCH FOR hbls vertical level #\n '''\n kbl is specified at vertical w-level (via Cr which is at\n vertical w-levels)\n '''\n if self.kbl[j] == 0 and self.Cr[j,k_w] < 0:\n self.kbl[j] = k_w\n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] == 0 and Bfsfc*(z_u_w[j,N] - z_u_r[j,k_r]) > self.C_h_MO[j]:\n self.kmo[j] = k_w\n\n \n #--> still in j-loop\n #######################################################\n \n # \t\tGET SURFACE BOUNDARY LAYER DEPTH # \n self.hbls[j] = z_u_w[j,N] - z_u_w[j,0] + self.eps # set hbls as depth of entire water column\n if self.kbl[j] > 0:\n k_w = self.kbl[j]\n k_r = k_w - 1 \n if k_w == N: # set hbls at the surface btwn w- and rho-levels at surface\n self.hbls[j] = z_u_w[j,N] - z_u_r[j,N-1]\n \n else:\n self.hbls[j] = z_u_w[j,N] - ( z_u_r[j,k_r] * self.Cr[j,k_w+1] - z_u_r[j,k_r+1] * self.Cr[j,k_w]) / \\\n (self.Cr[j,k_w+1] - self.Cr[j,k_w])\n \n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] > 0:\n k_w = self.kmo[j]\n k_r = k_w-1\n if k_w == N:\n z_up = z_u_w[j,N]\n cff_up = np.max([0,Bo[j]])\n else:\n z_up = z_r[j,k_w+1]\n cff_up = np.max([0, Bo[j] + self.Bosol[j]*(1-self.swdk_r[j,(k_w-1)+1])])\n \n cff_dn = np.max([0,Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])]) \n h_MO = z_u_w[j,N] + self.C_h_MO[j] * ( cff_up*z_up - cff_dn * z_u_r[j,k_r] ) \\\n / ( cff_up * cff_dn * (z_up - z_u_r[j,k_r]) ) \\\n + self.C_h_MO[j] * (cff_dn - cff_up)\n\n self.hbls[j] = np.min([self.hbls[j],np.max([h_MO,0])])\n\n\n\n #### GET BOTTOM BOUNDARY LAYER DEPTH #######\n if self.LMD_BKPP:\n self.kbl[j] = 0 # reset Cr at bottom and kbl for BKPP\n self.Cr[j,0] = 0.\n self.FC[j,0] = 1.5 * self.FC[j,1] - 0.5 * self.FC[j,2] # linear extrapolation\n \n #---> LOOP BOTTOM TO TOP\n # FIND kbl for BBL\n for k in range(1,N+1):\n k_r = k-1\n k_w = k \n self.Cr[j,k_w] = self.FC[j,k_w] - self.FC[j,0]\n \n # LOOK FOR FIRST ZERO CROSSING FROM BOTTOM UP\n if self.kbl[j] == 0 and self.Cr[j,k_w] > 0:\n self.kbl[j] = k_w \n \n\n self.hbbl[j] = z_u_w[j,N] - z_u_w[j,0] # total depth\n if self.kbl[j] > 0 :\n k_w = self.kbl[j] \n k_r = k_w -1\n if k_w == 1: # NO BBL CASE\n self.hbbl[j] = z_u_r[j,0] - z_u_w[j,0] #in between bottom rho and w-level\n else:\n self.hbbl[j] = ( z_u_r[j,k_r-1] * self.Cr[j,k_w] - z_u_r[j,k_r] * self.Cr[j,k_w-1]) / \\\n (self.Cr[j,k_w] - self.Cr[j,k_w-1]) - z_u_w[j,0]",
"def get_frequency(self,):\n\n # TODO: Find way to appropriately reconvert the frequency to its initial\n # TODO: Value or alert that the value is APPROXIMATE\n FTW = int (0)\n freq = int(0)\n\n FTW_bytes = self._read('CFTW0')\n FTW = FTW.from_bytes(FTW_bytes,'big')\n freq = FTW*self.clock_freq/2**32\n\n print('Latest frequency set: ', \"{:.2e}\".format(freq), 'Hz')\n print(['%.2e' % elem for elem in self.frequencies])\n\n return self.frequencies",
"def h_spec(k, He, h):\r\n return np.array(((k**3/np.pi**2) * h))",
"def test_get_H(self):\n self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)\n Nfreq = self.ds.Nfreqs\n multiplicative_tolerance = 1.\n key1 = (0, 24, 38)\n key2 = (1, 25, 38)\n\n for input_data_weight in ['identity','iC', 'dayenu']:\n self.ds.set_weighting(input_data_weight)\n if input_data_weight == 'dayenu':\n pytest.raises(ValueError,self.ds.R, key1)\n rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}\n self.ds.set_r_param(key1,rpk)\n self.ds.set_r_param(key2,rpk)\n for taper in taper_selection:\n self.ds.set_taper(taper)\n\n self.ds.set_Ndlys(Nfreq//3)\n H = self.ds.get_H(key1, key2)\n self.assertEqual(H.shape, (Nfreq//3, Nfreq//3)) # Test shape\n\n self.ds.set_Ndlys()\n H = self.ds.get_H(key1, key2)\n self.assertEqual(H.shape, (Nfreq, Nfreq)) # Test shape",
"def _Fqt_comp(vh,q):\n r_scale = 6.45/60\n edges,count,x_lim = vh\n # make sure that vh is normalized\n count = count/np.sum(count)\n\n return np.sum(count * np.exp(1j*q*edges*r_scale))",
"def num_55():\n import itertools as IT\n axis=0\n cs = [-1, 0, 1]\n bins = [-1, 0, 1, 2]\n n = len(cs)\n a = np.array([i for i in IT.combinations_with_replacement(cs, n)])\n r = np.vstack([np.histogram(a[i], bins)[0] for i in range(len(a))])\n r_t = np.vstack([np.histogram(a.T[i], bins)[0] for i in range(len(a.T))])\n frmt = \"\"\"\n {}\n :classes: {}\n :values (a):\n {}\n :frequency for 'a' by row, axis=0\n {}\n :values (a_t)\n {}\n :frequency for 'r_t', by col, axis=1\n : Note... a.T = a_t\n : transform, a from axis 0 to axis 1 orientation\n {}\n \"\"\"\n p = \" . \"\n args = [num_55.__doc__, cs, \n indent(str(a), prefix=p),\n indent(str(r), prefix=p),\n a.T, r_t]\n print(dedent(frmt).format(*args))\n return a, r, r.T",
"def get_all_HEA_measurements(self):\n pass",
"def get_time_grid(n_time_frames):\n (_, _, _, sr, _, hop_length) = get_hcqt_params()\n time_grid = librosa.core.frames_to_time(\n range(n_time_frames), sr=sr, hop_length=hop_length\n )\n return time_grid",
"def Q(self):\n return np.array(list(self.center_frequencies)) \\\n / np.array(list(self.bandwidths))",
"def gen_freqs(ndata, dt):\n dn = 2 # if you like the central frequency to be negative, change dn to 1\n return 1/(ndata*dt) * np.hstack((np.arange(0, (ndata+dn)//2),\n np.arange(-(ndata+dn)//2+dn, 0)))",
"def get_cw_freq(self):\n return self.get_frequency(self.synth)",
"def get_freqs(Fs, n):\r\n\r\n return np.linspace(0, float(Fs) / 2, float(n) / 2 + 1)",
"def compute_hcqt(audio_fpath):\n (bins_per_octave, n_octaves, harmonics,\n sr, f_min, hop_length) = get_hcqt_params()\n y, fs = librosa.load(audio_fpath, sr=sr)\n\n cqt_list = []\n shapes = []\n for h in harmonics:\n cqt = librosa.cqt(\n y, sr=fs, hop_length=hop_length, fmin=f_min*float(h),\n n_bins=bins_per_octave*n_octaves,\n bins_per_octave=bins_per_octave\n )\n cqt_list.append(cqt)\n shapes.append(cqt.shape)\n\n shapes_equal = [s == shapes[0] for s in shapes]\n if not all(shapes_equal):\n min_time = np.min([s[1] for s in shapes])\n new_cqt_list = []\n for i in range(len(cqt_list)):\n new_cqt_list.append(cqt_list[i][:, :min_time])\n cqt_list = new_cqt_list\n\n log_hcqt = ((1.0/80.0) * librosa.core.amplitude_to_db(\n np.abs(np.array(cqt_list)), ref=np.max)) + 1.0\n\n return log_hcqt",
"def compute_chisq(hmodel, hdata, nbins=95):\n chisqs = [0.0]*4\n dofs = [0]*4\n for i in range(4):\n hmodel[i].Scale(hdata[i].Integral())\n for j in range(nbins):\n for k in range(nbins):\n valDat = hdata[i].GetBinContent(j+1, k+1)\n if valDat == 0.0:\n continue\n valMod = hmodel[i].GetBinContent(j+1, k+1)\n if valMod < valDat:\n errDat = hdata[i].GetBinErrorLow(j+1, k+1)\n else:\n errDat = hdata[i].GetBinErrorUp(j+1, k+1)\n dofs[i] += 1\n chisqs[i] += ((valDat-valMod)/errDat)**2\n return chisqs, dofs",
"def ChoppinessIndex(self, timeperiod = 14):\r\n return ta.C",
"def get_frequency_array(self):\n\t\treturn np.logspace(np.log10(self.converted_range[0]), np.log10(\n\t\t\tself.converted_range[1]), num=129)[:self.maximum_frequency]",
"def getkGrid(self, scaled=True):\n if scaled:\n return np.meshgrid(fft.fftshift(self.k_axis_scaled),\n fft.fftshift(self.k_axis_scaled))\n else:\n return np.meshgrid(fft.fftshift(self.k_axis_unscaled),\n fft.fftshift(self.k_axis_scaled))",
"def mce_filter(freq, f_raw, params):\n\tz = np.exp(-2j*np.pi*freq/f_raw)\n\tb11, b12, b21, b22 = np.array(params[:4])*0.5**14\n\tH = (1+z)**4 / (1-b11*z+b12*z**2) / (1-b21*z+b22*z**2)\n\tH /= 2**4 / (1-b11+b12) / (1-b21+b22)\n\treturn H",
"def compute_frequency_lines(self) -> list:\n freq = sorted(self.hash_map.get_key_value_pairs(),\n key=lambda e: e[1], reverse=True)\n result = map(lambda e:\n str(e[0]) + '\\t' + str(e[1][1]) + '\\t' + e[1][0],\n zip(range(1, 501), freq))\n return list(result)",
"def populate_grid(self):\n from cemc_cpp_code import hoshen_kopelman\n self.bins[:, :, :] = 0\n for atom in self.atoms:\n if atom.symbol in self.track_elements:\n n = self.get_bin(atom.index)\n self.bins[n[0], n[1], n[2]] += 1\n\n # Run the Hoshen-Kopelman algorithm to label the \n # bins into clusters\n self.clusters = hoshen_kopelman(self.bins)",
"def frequencies(self):\r\n\r\n self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)\r\n NFFT = self.method.get('NFFT', 64)\r\n Fs = self.method.get('Fs')\r\n freqs = tsu.get_freqs(Fs, NFFT)\r\n lb_idx, ub_idx = tsu.get_bounds(freqs, self.lb, self.ub)\r\n\r\n return freqs[lb_idx:ub_idx]",
"def get_channel_h_unit(self)->float: \n return self.__channel_h_unit",
"def freqspace(dim):\n f1 = [] \n if dim % 2 == 0: \n for i in range(-dim, dim-1, 2):\n ft = float(i) / float(dim)\n f1.append(ft) \n else: \n for i in range(-dim+1, dim, 2):\n ft = float(i) / float(dim)\n f1.append(ft) \n return f1",
"def harmonicOscillator_heatCapacity(T, freq):\n x = freq / (0.695039 * T) # kB = 0.695039 cm^-1/K\n exp_x = math.exp(x)\n one_minus_exp_x = 1.0 - exp_x\n return x * x * exp_x / one_minus_exp_x / one_minus_exp_x",
"def frequencies(self):\n radii = self.radii\n freqs = (1 / (self.shape[0] * self.pixel[0])) * radii\n return freqs"
] | [
"0.59496003",
"0.59361404",
"0.5881349",
"0.5839214",
"0.57178015",
"0.56668127",
"0.5663322",
"0.5657716",
"0.5628096",
"0.56117433",
"0.56011665",
"0.5592596",
"0.5519088",
"0.55124384",
"0.5502585",
"0.5500028",
"0.5481276",
"0.54767233",
"0.54718333",
"0.54570484",
"0.5456351",
"0.5451613",
"0.5442907",
"0.54351705",
"0.5434033",
"0.5418046",
"0.5399632",
"0.5393573",
"0.5385596",
"0.53697246"
] | 0.82124424 | 0 |
Get the hcqt time grid | def get_time_grid(n_time_frames):
(_, _, _, sr, _, hop_length) = get_hcqt_params()
time_grid = librosa.core.frames_to_time(
range(n_time_frames), sr=sr, hop_length=hop_length
)
return time_grid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def time_grid(time_step=30):\n if time_step < 1 or time_step > 60:\n raise ValueError('Time resolution should be between 0 and 60 [s]')\n half_step = time_step/SECONDS_PER_HOUR/2\n return np.arange(half_step, 24+half_step, half_step*2)",
"def timinggrid(self):\n\n gelem = Element(\"g\") # create a group\n for i in range(int(self.cycles)):\n\n lelem = Element(\"line\")\n lelem.attrib['x1'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y1'] = str(0);\n lelem.attrib['x2'] = str(i*self.period + self.period/2.0 + self.xzero)\n lelem.attrib['y2'] = str(self.signalcnt*(self.height + self.signalspacing) + self.signalspacing)\n lelem.attrib['stroke'] = \"grey\"\n lelem.attrib['stroke-width'] = \"0.5\"\n gelem.append(lelem)\n\n \n self.svgelem.append(gelem)\n self.svgelem.append(self.signalselem)",
"def time(self):\n return self[self.time_columns]",
"def time(self):\n return self[self.time_columns]",
"def rangeselector_time():\n return {\n \"bgcolor\": \"rgb(35, 149, 86)\",\n \"activecolor\": \"rgb(25, 108, 62)\",\n \"buttons\": [\n {\"count\": 12, \"label\": \"12h\", \"step\": \"hour\", \"stepmode\": \"backward\"},\n {\"count\": 24, \"label\": \"24h\", \"step\": \"hour\", \"stepmode\": \"backward\"},\n {\"count\": 48, \"label\": \"48h\", \"step\": \"hour\", \"stepmode\": \"backward\"},\n {\"count\": 3, \"label\": \"3d\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"count\": 7, \"label\": \"7d\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"step\": \"all\"},\n ],\n }",
"def get_time(self):\n return numpy.linspace(self.header.time_gate_start, \\\n self.header.time_gate_stop, self.num_time_bins())",
"def get_hourly(self):\n pass",
"def getTimes():",
"def getTimes():",
"def getTimes():",
"def timingColumns(self, results):\n \n pass",
"def hourly_table(self):\n htable = [0 for i in range(24)]\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[3]\n htable[evtime] += 1\n return htable",
"def load_overlaplcwp_timegrid(self, blockid=0, key=(\"ov\", \"ovkin\", \"ovpot\")):\n tg = []\n for item in key:\n if item == \"ov\":\n pathtg = \"/\" + self._prefixb + str(blockid) + \"/overlaplcwp/timegrid\"\n tg.append(self._srf[pathtg][:])\n elif item == \"ovkin\":\n pathtg = \"/\" + self._prefixb + str(blockid) + \"/overlaplcwp/timegridkin\"\n tg.append(self._srf[pathtg][:])\n elif item == \"ovpot\":\n pathtg = \"/\" + self._prefixb + str(blockid) + \"/overlaplcwp/timegridpot\"\n tg.append(self._srf[pathtg][:])\n else:\n raise ValueError(\"Unknown key value {}\".format(item))\n\n if len(tg) == 1:\n print(tg)\n return tg[0]\n else:\n return tuple(tg)",
"def show_time(self):\n hour = str(datetime.datetime.now().strftime(\"%H\"))\n minute = str(datetime.datetime.now().strftime(\"%M\"))\n\n hour1 = int(hour[0])\n hour2 = int(hour[1])\n minute1 = int(minute[0])\n minute2 = int(minute[1])\n\n self.light_number(self.numbers[hour1], [0, 5])\n self.light_number(self.numbers[hour2], [0, 0])\n self.light_number(self.numbers[minute1], [5, 5])\n self.light_number(self.numbers[minute2], [5, 0])",
"def get_time_col(self):\n return self.time_col",
"def get_time_slices(self):\n tot = []\n for clu in self._clusters:\n tot.extend(self._clusters[clu].to_dict()[:])\n #tot.sort()\n return tot",
"def getHourColumn(self): \n return self.hourcol",
"def constructTimeLineItem(self):\n\t\treturn",
"def get_grid_list (self):\n return [\n 'area', 'ald', 'poi', 'ice', 'lake pond',\n 'drainage', 'degree-day','climate event'\n ]",
"def get_times(self):\n raise NotImplementedError(\"Abstract method not implemented.\")",
"def queriesInEachHour(self):\n hours = 0\n\n #prints out each element (with number of DB Queries) of array\n while hours < 24:\n print (hours,'to',hours+1, ' : ', self.arrayOfTimes[hours])\n hours += 1",
"def _draw_hours(self):\n tmp_str_list = []\n for i in range(0, self._g_width, self._min_grid):\n if i % self._hour_grid == 0:\n tmp_str_list.append('<polyline class=\"FullHour\" points=\"%d,%d, %d,%d\" />' % (\n i + .5 + 20, 20, i + .5 + 20, self._g_height))\n tmp_str_list.append('<text class=\"Label\" x=\"%d\" y=\"%d\">%d</text>' % (\n i + 20, 20, (i / self._hour_grid + self._offset) % 24))\n else:\n tmp_str_list.append('<polyline class=\"SubHour\" points=\"%d,%d,%d,%d\" />' % (\n i + .5 + 20, 20, i + .5 + 20, self._g_height))\n return \"\".join(tmp_str_list)",
"def lick_times():\n lick_timestamps = read_npy_file('licks.times.npy')\n lick_ts = TimeSeries(\n name='lick_times',\n timestamps=np.ravel(lick_timestamps),\n data=np.full(len(lick_timestamps), True),\n unit='',\n description='Extracted times of licks, from the lickPiezo signal.'\n )\n lick_bev = BehavioralEvents(lick_ts)\n behavior_module.add_data_interface(lick_bev)",
"def get_licks(dlc, dlc_t):\r\n lick_times = get_feature_event_times(dlc, dlc_t, ['tongue_end_l_x', 'tongue_end_l_y',\r\n 'tongue_end_r_x', 'tongue_end_r_y'])\r\n return lick_times",
"def time_axis(self):\n if self.axes_wcs.wcs.ctype[0] not in ['TIME', 'UTC']:\n raise cu.CubeError(1, 'No time axis present')\n delta = self.axes_wcs.wcs.cdelt[0]\n crpix = self.axes_wcs.wcs.crpix[0]\n crval = self.axes_wcs.wcs.crval[0]\n start = crval - crpix * delta\n stop = start + len(self.data) * delta\n cunit = u.Unit(self.axes_wcs.wcs.cunit[0])\n return np.linspace(start, stop, num=self.data.shape[-1]) * cunit",
"def getHour(self, parent):\r\n self.now = datetime.now()\r\n self.current_time = self.now.strftime(\"%H:%M:%S\")\r\n self.lineEditWidgets[\"HORA\"].setText(self.current_time)",
"def get_time_step_values(self):\n return OcTreeReader.get_time_step_values(self)",
"def get_time_table(self,day):\n output = []\n for link in self.data[day]:\n df = self.data[link][day]\n for row in df:\n output.append({'actualtime_arr_from':row[0],'acutaltime_arr_to':row[1],\\\n 'routeid':row[2],'link':route})\n from operator import itemgetter\n return sorted(output, key=itemgetter('actualtime_arr_from'))",
"def get_time(self):\n return self._ticks",
"def time_slot(self):\n pass"
] | [
"0.60058314",
"0.5995702",
"0.592634",
"0.592634",
"0.5893274",
"0.58358717",
"0.5755237",
"0.57121754",
"0.57121754",
"0.57121754",
"0.57034177",
"0.5683962",
"0.5615493",
"0.55705327",
"0.5541471",
"0.55200374",
"0.55005306",
"0.5473635",
"0.54452884",
"0.5405167",
"0.5399208",
"0.53882086",
"0.53853494",
"0.53769934",
"0.5375642",
"0.5370734",
"0.5366733",
"0.5364664",
"0.5327723",
"0.53112864"
] | 0.67356974 | 0 |
Compute the bin numbers from a given grid | def grid_to_bins(grid, start_bin_val, end_bin_val):
bin_centers = (grid[1:] + grid[:-1])/2.0
bins = np.concatenate([[start_bin_val], bin_centers, [end_bin_val]])
return bins | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bincalc(nbin=0.1,bmin=5,bmax=2000):\n\n logbmin=np.log10(bmin)\n logbmax=np.log10(bmax)\n\n logbins=np.arange(logbmin,logbmax,nbin)\n\n bins=10**logbins\n\n #bins=np.linspace(bmin,bmax,60)\n return (bins)",
"def _bin_numbers(col1, col2, bin_n):\n col1 = col1[~col1.map(lambda x: is_null_flag(x))].reset_index(drop=True)\n col2 = col2[~col2.map(lambda x: is_null_flag(x))].reset_index(drop=True)\n comb = pd.Series(pd.np.concatenate([col1, col2])).sort_values(inplace=False).reset_index(drop=True)\n bin_size = int(len(comb) / bin_n)\n bin_dict1, bin_dict2 = {}, {}\n for i in range(bin_n - 1): # last bin only needs bin_min\n bin_low = comb[i*bin_size]\n bin_high = comb[(i+1)*bin_size]\n bin_dict1[i] = sum((col1 >= bin_low) & (col1 < bin_high))\n bin_dict2[i] = sum((col2 >= bin_low) & (col2 < bin_high))\n # print bin_low, bin_high\n # Highest bin\n bin_dict1[i+1] = sum(col1 >= bin_high)\n bin_dict2[i+1] = sum(col2 >= bin_high)\n return bin_dict1, bin_dict2",
"def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1",
"def binning():\n def r(x):\n return 1 << (x & 7)\n\n def w(x):\n return 0x11 * (x >> 1)\n return r, w",
"def regrid(old_grid):\n bins = np.floor((np.log10(old_grid) - l_min) / dl).astype(int)\n w = (bins >= 0) & (bins < nbins)\n\n return bins, w",
"def test_bins(self):\n\n \n for filename in ['data/population_padang_1.asc', \n 'data/test_grid.asc']: \n \n R = read_coverage(filename)\n \n min, max = R.get_extrema() #use_numeric=True)\n \n for N in [2,3,5,7,10,16]:\n linear_intervals = R.get_bins(N=N, quantiles=False) \n \n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max \n \n d = (max-min)/N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i*d) \n \n \n quantiles = R.get_bins(N=N, quantiles=True)\n\n A = R.get_data(nan=True).flat[:] \n \n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask) \n l2 = len(A)\n \n if filename == 'data/test_grid.asc':\n # Check that NaN's were removed\n \n assert l1 == 35\n assert l2 == 30\n \n \n # Assert that there are no NaN's \n assert not numpy.alltrue(numpy.isnan(A))\n \n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements/N\n \n # Count elements in each bin and check\n\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n \n \n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no more than 1\n assert abs(count - refcount) <= 1 \n assert abs(count - average_elements_per_bin) <= 3\n \n \n else:\n # The last bin is allowed vary by more\n pass\n \n i0 = i1",
"def bin_matrix(x, binning):\n\n N = len(x)\n B = []\n for i in range(len(binning) - 1):\n\n line = np.zeros(N)\n for j in range(N):\n\n if x[j] >= binning[i] and x[j] < binning[i+1]:\n\n size = binning[i + 1] - binning[i]\n line[j] = 1 / size\n\n B.append(line)\n\n B = np.array(B)\n\n return(B)",
"def generate_binned_values( lower_lim, upper_lim, chr_length, snps_per_chr, indels_per_chr, resolution ):\n\t\n\tsnp_data = []\n\tindel_data = []\n\twhile True:\n\t\tif upper_lim >= chr_length:\n\t\t\tbreak\n\t\telse:\n\t\t\tsnp_tmp = []\n\t\t\tindel_tmp = []\n\t\t\tfor SNP in snps_per_chr:\n\t\t\t\tif SNP <= upper_lim and SNP > lower_lim:\n\t\t\t\t\tsnp_tmp.append( 'X' )\n\t\t\tfor indel in indels_per_chr:\n\t\t\t\tif indel <= upper_lim and indel > lower_lim:\n\t\t\t\t\tindel_tmp.append( 'X' )\n\t\t\tsnp_data.append( len( snp_tmp ) )\n\t\t\tindel_data.append( len( indel_tmp ) )\n\t\tupper_lim += resolution\n\t\tlower_lim += resolution\n\treturn max( snp_data ), max( indel_data ), snp_data, indel_data",
"def compute_bin_indices(X_part, bin_limits=None, n_bins=20):\n if bin_limits is None:\n bin_limits = []\n for variable_data in range(X_part.shape[1]):\n bin_limits.append(numpy.linspace(numpy.min(variable_data), numpy.max(variable_data), n_bins + 1)[1: -1])\n\n bin_indices = numpy.zeros(len(X_part), dtype=numpy.int)\n for axis, bin_limits_axis in enumerate(bin_limits):\n bin_indices *= (len(bin_limits_axis) + 1)\n bin_indices += numpy.searchsorted(bin_limits_axis, X_part[:, axis])\n\n return bin_indices",
"def find_bin_edges(bin_centres):\n\n if not isinstance(bin_centres, np.ndarray):\n bin_centres = np.asarray(bin_centres)\n\n edges = bin_centres[:-1] + 0.5 * (bin_centres[1:] - bin_centres[:-1])\n bins = np.concatenate(([2 * bin_centres[0] - edges[0]], edges,\n [2 * bin_centres[-1] - edges[-1]]))\n\n return bins",
"def getLogBins(nbins, low, high):\n\n x = float(low)\n dx = pow(high/low, 1.0/nbins);\n \n return np.array([x*pow(dx,i) for i in range(nbins+1)], dtype=float)",
"def eqf_binning(t, n_bins):\n t_bins= []\n t= sorted(t)\n n_items= int(len(t)/n_bins)\n\n for i in range(1, n_bins):\n t_bins.append(t[int(i*n_items)])\n t_bins.append(np.max(t) + 0.01)\n t_binning= np.digitize(t, t_bins)\n return t_binning",
"def bin_indices(self, coordinates, fractional=True):\n coords = numpy.asarray(coordinates).transpose()\n indices = [numpy.interp(coo, cen, range(n))\n for (coo, cen, n) in zip(coords, self.centers, self.shape)]\n index_arr = numpy.atleast_2d(numpy.array(indices).transpose())\n if fractional:\n return index_arr\n return numpy.floor(index_arr + 0.5).astype(numpy.int_)",
"def bin_binarise(self):\n pass",
"def setup_bins(self):\n width = int((self.max - self.min) / self.bin_size)\n bins = {\n i * width + self.min: (idx, idx + self.bin_size)\n for i, idx in enumerate(range(0, len(self.nums), self.bin_size))\n }\n return bins",
"def getLinBins(nbins, low, high):\n x = float(low)\n dx = float(high-low)/nbins\n\n return np.array([x+i*dx for i in range(nbins+1)], dtype=float)",
"def __call__(self, n_bins, segment, elements):\n\n # n_bins\n assert type(n_bins) is int\n assert n_bins > 0\n\n # segment\n assert type(segment) is list or type(segment) is tuple\n assert len(segment) == 2\n assert np.isscalar(segment[0]) and np.isscalar(segment[1])\n assert segment[0] < segment[1]\n\n # elements\n assert type(elements) is np.ndarray, f\"elements should be an np.ndarray, instead of {type(elements)}\"\n assert elements.dtype == np.number\n\n sorted_elements = np.sort(elements)\n\n bin_card = int(floor(elements.shape[0]/n_bins))\n\n bin_boundaries = [segment[0]]\n\n for i in range(1, n_bins):\n boundary_l = sorted_elements[i*bin_card - 1]\n boundary_r = sorted_elements[i * bin_card]\n boundary = (boundary_l+boundary_r)/2\n\n bin_boundaries.append(boundary)\n\n bin_boundaries.append(segment[1])\n\n return np.array(bin_boundaries)",
"def bin_data(data, lat, lon, binsize=1, uv_data=False, pressure=None):\n\n # Create lats and lons based on binsize\n lonlen = 360\n latlen = 180\n\n lon_lowerlim = 0\n lon_upperlim = 360\n\n lat_lowerlim = -90\n lat_upperlim = 90\n\n if latlen % binsize == 0 and lonlen % binsize == 0:\n latbin = int(latlen/binsize)\n lonbin = int(lonlen/binsize)\n n_deg = binsize/2\n\n ll_lats = np.linspace(lat_lowerlim+(n_deg),\n lat_upperlim-(n_deg),\n latbin)\n\n ll_lons = np.linspace(lon_lowerlim+(n_deg),\n lon_upperlim-(n_deg),\n lonbin)\n\n else:\n print('ERROR: Binsize does not work for grid shape (180,360). Please use different binsize.')\n return\n\n paramlist = list(itertools.product(ll_lats, ll_lons))\n\n # Bin Data\n if uv_data == True:\n binned_u_data = np.full((latbin, lonbin), np.nan, dtype=object)\n binned_v_data = np.full((latbin, lonbin), np.nan, dtype=object)\n\n if pressure is not None:\n binned_pressure = np.full((latbin, lonbin), np.nan, dtype=object)\n\n for val in paramlist:\n # Get index of 1x1 grid lat and lon\n latidx = np.where(ll_lats == val[0])\n lonidx = np.where(ll_lons == val[1])\n # values of the 1x1 grid lat and lon\n binnedlons = val[1]\n binnedlats = val[0]\n\n # find instances where data is within 1x1 grid point of orginal data\n data_idx = np.where((lon >= binnedlons - n_deg) & (lon <= binnedlons + n_deg) &\n (lat >= binnedlats - n_deg) & (lat <= binnedlats + n_deg))\n\n latlon_idx = [latidx[0][0], lonidx[0][0]]\n\n # calculate stats if there is data at this grid point, else append np.nan\n if len(data_idx[0]) > 0:\n u = data['u'][data_idx]\n v = data['v'][data_idx]\n\n binned_u_data[latlon_idx[0], latlon_idx[1]] = u\n binned_v_data[latlon_idx[0], latlon_idx[1]] = v\n\n if pressure is not None:\n p = pressure[data_idx]\n binned_pressure[latlon_idx[0], latlon_idx[1]] = p\n\n if pressure is not None:\n return binned_u_data, binned_v_data, binned_pressure\n\n else:\n return binned_u_data, binned_v_data\n\n else:\n binned_data = np.full((latbin, lonbin), np.nan, dtype=object)\n if pressure is not None:\n binned_pressure = np.full((latbin, lonbin), np.nan, dtype=object)\n\n for val in paramlist:\n # Get index of grid lat and lon\n latidx = np.where(ll_lats == val[0])\n lonidx = np.where(ll_lons == val[1])\n # values of the 1x1 grid lat and lon\n binnedlons = val[1]\n binnedlats = val[0]\n\n # find instances where data is within 1x1 grid point of orginal data\n data_idx = np.where((lon >= binnedlons - n_deg) & (lon <= binnedlons + n_deg) &\n (lat >= binnedlats - n_deg) & (lat <= binnedlats + n_deg))\n\n latlon_idx = [latidx[0][0], lonidx[0][0]]\n\n # calculate stats if there is data at this grid point\n if len(data_idx[0]) > 0:\n d = data[data_idx]\n binned_data[latlon_idx[0], latlon_idx[1]] = d\n\n if pressure is not None:\n p = pressure[data_idx]\n binned_pressure[latlon_idx[0], latlon_idx[1]] = p\n\n if pressure is not None:\n return binned_data, binned_pressure\n\n else:\n return binned_data",
"def _bin(self, X):\n H = np.linspace(0, 1, self.Nbin)\n return np.maximum(1 - (abs(X[..., None] - H)) / (H[1] - H[0]) , 0)",
"def _make_bins(start, stop, step):\n bin_edges = np.arange(start, stop + step, step)\n\n return bin_edges",
"def get2DBins(x, y, binSizeX, binSizeY):\n\n result = []\n xlength = len(x)\n ylength = len(y)\n\n i = 0\n xcount = 0\n for i1 in range(0, xlength, binSizeX):\n i2 = i1 + binSizeX\n if i2 >= xlength:\n i2 = xlength - 1\n xcount += 1\n ycount = 0\n for j1 in range(0, ylength, binSizeY):\n j2 = j1 + binSizeY\n if j2 >= ylength:\n j2 = ylength - 1\n result.append((i1, i2, j1, j2))\n ycount += 1\n return result, xcount, ycount",
"def logbin(data, scale = 1.3, zeros = False):\n if scale < 1:\n raise ValueError('Function requires scale >= 1.')\n count = np.bincount(data)\n tot = np.sum(count)\n smax = np.max(data)\n if scale > 1:\n jmax = np.ceil(np.log(smax)/np.log(scale))\n if zeros:\n binedges = scale ** np.arange(jmax + 1)\n binedges[0] = 0\n else:\n binedges = scale ** np.arange(1,jmax + 1)\n # count = count[1:]\n binedges = np.unique(binedges.astype('uint64'))\n x = (binedges[:-1] * (binedges[1:]-1)) ** 0.5\n y = np.zeros_like(x)\n count = count.astype('float')\n for i in range(len(y)):\n y[i] = np.sum(count[binedges[i]:binedges[i+1]]/(binedges[i+1] - binedges[i]))\n # print(binedges[i],binedges[i+1])\n # print(smax,jmax,binedges,x)\n # print(x,y)\n else:\n x = np.nonzero(count)[0]\n y = count[count != 0].astype('float')\n if zeros != True and x[0] == 0:\n x = x[1:]\n y = y[1:]\n y /= tot\n x = x[y!=0]\n y = y[y!=0]\n return x,y",
"def griddata(x, y, z, binsize=0.01, retbin=True, retloc=True, xlim = None, ylim = None, agg = None):\n # get extrema values.\n if xlim:\n xmin, xmax = xlim[0], xlim[1]\n else:\n xmin, xmax = x.min(), x.max()\n \n if ylim:\n ymin, ymax = ylim[0], ylim[1]\n else:\n ymin, ymax = y.min(), y.max()\n\n # make coordinate arrays.\n xi = np.arange(xmin, xmax+binsize, binsize)\n yi = np.arange(ymin, ymax+binsize, binsize)\n xi, yi = np.meshgrid(xi,yi)\n\n # make the grid.\n grid = np.empty((len(agg),xi.shape[0],xi.shape[1]), dtype=x.dtype)*np.nan\n #grid = np.zeros((len(agg),xi.shape[0],xi.shape[1]), dtype=x.dtype)\n nrow, ncol = grid[0].shape\n \n if retbin:\n bins = np.copy(grid[0])\n\n # create list in same shape as grid to store indices\n if retloc:\n wherebin = np.copy(grid[0])\n wherebin = wherebin.tolist()\n\n # Create aggregation functions\n agg_func = []\n if agg:\n for func in agg:\n agg_func.append(eval(\".\".join(['np',func])))\n else:\n agg_func.append(eval(\".\".join(['np','nanmedian'])))\n\n # fill in the grid.\n for row in range(nrow):\n for col in range(ncol):\n xc = xi[row, col] # x coordinate.\n yc = yi[row, col] # y coordinate.\n\n # find the position that xc and yc correspond to.\n posx = np.abs(x - xc)\n posy = np.abs(y - yc)\n ibin = np.logical_and(posx < binsize/2., posy < binsize/2.)\n ind = np.where(ibin == True)[0]\n\n # fill the bin.\n bin = z[ibin]\n if retloc: wherebin[row][col] = ind\n if retbin: bins[row, col] = bin.size\n if bin.size != 0:\n #binval = np.median(bin)\n #grid[row, col] = np.median(bin)\n grid[:,row, col] = [ fun(bin) for fun in agg_func ]\n# else:\n# grid[:, row, col] = np.nan # fill empty bins with nans.\n # return the grid\n if retbin:\n if retloc:\n return grid, bins, wherebin\n else:\n return grid, bins\n else:\n if retloc:\n return grid, wherebin\n else:\n return grid",
"def eqw_binning(t, n_bins):\n \n t_diff= (np.max(t) - np.min(t))/n_bins\n t_bins= np.hstack([np.array([np.min(t) + t_diff*i for i in range(1, n_bins)]), [np.max(t) + 0.01]])\n t_binning= np.digitize(t, t_bins)\n return t_binning",
"def get_bins(size, n, max_value):\n bin_lims = get_bin_lims(n, max_value)\n return sort_by_rows(np.array(list(itertools.product(bin_lims, repeat=size))))",
"def rebin(flux, ivar, w_grid):\n new_grid, w = regrid(w_grid)\n\n fl_iv = flux * ivar\n\n # len(flux) will give number of spectra,\n # len(new_grid) will give number of output bins\n flux_out = np.zeros((len(flux), nbins))\n ivar_out = np.zeros_like(flux_out)\n\n # These lines are necessary for SDSS spectra. For DESI\n # spectra nothing will change here, since the entire DESI grid is contained\n # within the QuasarNET one, but for BOSS/eBOSS the grid can extend out\n # past the QuasarNET grid and give negative bin values. I have tests that\n # confirm this still works on DESI data, don't worry.\n fl_iv = fl_iv[:, w]\n new_grid = new_grid[w]\n ivar_temp = ivar[:, w]\n\n for i in range(len(flux)):\n c = np.bincount(new_grid, weights=fl_iv[i, :])\n flux_out[i, :len(c)] += c\n c = np.bincount(new_grid, weights=ivar_temp[i, :])\n ivar_out[i, :len(c)] += c\n\n return flux_out, ivar_out",
"def create_bin_boundaries(config, epoch_df, data_type, obs_per_bin, verbose=False):\n \n edges = create_edges_set(config, epoch_df, data_type)\n \n boundaries = []\n for edge in edges:\n start, end, freq = edge\n bin_size = freq * obs_per_bin\n boundaries.append(np.arange(start, end, bin_size))\n boundaries = np.concatenate(boundaries)\n \n return boundaries",
"def binning(data, low, high):\n if len(data) == 0: return 1\n\n mask1 = (data >= low)\n mask2 = (data < high)\n mask3 = numpy.logical_and(mask1, mask2)\n data = data[mask3]\n\n if len(data) == 0: return 10\n\n data.sort()\n q1 = data[int(math.floor(0.25*len(data)))]\n q3 = data[int(math.floor(0.75*len(data)))]\n binwidth = 2. * (q3 - q1) / len(data)**(1./3.)\n if binwidth > 0.:\n return max(10, int(math.ceil((high - low)/binwidth)))\n else:\n return 10",
"def _get_bin_edges(a, bins, range):\n # parse the overloaded bins argument\n n_equal_bins = None\n bin_edges = None\n\n if isinstance(bins, str):\n raise NotImplementedError(\n 'only integer and array bins are implemented')\n elif isinstance(bins, cupy.ndarray) or numpy.ndim(bins) == 1:\n # TODO(okuta): After #3060 is merged, `if cupy.ndim(bins) == 1:`.\n if isinstance(bins, cupy.ndarray):\n bin_edges = bins\n else:\n bin_edges = numpy.asarray(bins)\n\n if (bin_edges[:-1] > bin_edges[1:]).any(): # synchronize! when CuPy\n raise ValueError(\n '`bins` must increase monotonically, when an array')\n if isinstance(bin_edges, numpy.ndarray):\n bin_edges = cupy.asarray(bin_edges)\n elif numpy.ndim(bins) == 0:\n try:\n n_equal_bins = operator.index(bins)\n except TypeError:\n raise TypeError(\n '`bins` must be an integer, a string, or an array')\n if n_equal_bins < 1:\n raise ValueError('`bins` must be positive, when an integer')\n\n first_edge, last_edge = _get_outer_edges(a, range)\n else:\n raise ValueError('`bins` must be 1d, when an array')\n\n if n_equal_bins is not None:\n # numpy's gh-10322 means that type resolution rules are dependent on\n # array shapes. To avoid this causing problems, we pick a type now and\n # stick with it throughout.\n bin_type = cupy.result_type(first_edge, last_edge, a)\n if cupy.issubdtype(bin_type, cupy.integer):\n bin_type = cupy.result_type(bin_type, float)\n\n # bin edges must be computed\n bin_edges = cupy.linspace(\n first_edge, last_edge, n_equal_bins + 1,\n endpoint=True, dtype=bin_type)\n return bin_edges",
"def unique_binning(t):\n diff= np.unique(t)\n diff= diff[1:] - diff[:-1]\n diff = np.min(diff)/2\n return np.digitize(t, np.hstack([np.unique(t) + diff]))"
] | [
"0.6988975",
"0.6984424",
"0.67759913",
"0.6577237",
"0.65490645",
"0.6458017",
"0.6443781",
"0.6413207",
"0.63787806",
"0.63331187",
"0.6303565",
"0.62823945",
"0.6276484",
"0.6272296",
"0.6242215",
"0.62306744",
"0.62077826",
"0.6177793",
"0.61308944",
"0.61144274",
"0.60780835",
"0.60677844",
"0.6060504",
"0.6035808",
"0.5983633",
"0.5972553",
"0.59678286",
"0.5953443",
"0.5941458",
"0.59413743"
] | 0.756557 | 0 |
show whole ProducedMsg table on the web | def prod():
query = "SELECT * FROM ProducedMsg;"
tablestr = dbwrapper._query_pretty(query)
result = string.replace(str(tablestr),'\n','<br>')
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_received_table(self, num_disp, old):\n caller = self.caller\n msgtable = PrettyTable(\n [\"{wMsg #\", \"{wSender\", \"{wIC Date\", \"{wOOC Date\", \"{wSave\"]\n )\n mess_num = 1\n old = old[:num_disp]\n for mess in old:\n try:\n name = caller.messages.get_sender_name(mess)\n except AttributeError:\n mess = reload_model_as_proxy(mess)\n print(\n \"Error: Had to reload Msg ID %s as Messenger when displaying received table.\"\n % mess.id\n )\n name = caller.messages.get_sender_name(mess)\n date = caller.messages.get_date_from_header(mess) or \"Unknown\"\n ooc_date = mess.db_date_created.strftime(\"%x\")\n saved = \"{w*{n\" if mess.preserved else \"\"\n msgtable.add_row([mess_num, name, date, ooc_date, saved])\n mess_num += 1\n self.msg(msgtable)",
"def display_sent_table(self, num_disp, old):\n msgtable = PrettyTable([\"{wMsg #\", \"{wReceiver\", \"{wDate\"])\n mess_num = 1\n old = old[:num_disp]\n for mess in old:\n receiver = mess.receivers\n if receiver:\n receiver = receiver[0]\n name = receiver.key\n else:\n name = \"Unknown\"\n try:\n date = self.caller.messages.get_date_from_header(mess) or \"Unknown\"\n except AttributeError:\n mess = reload_model_as_proxy(mess)\n print(\n \"Error: Had to reload Msg ID %s as Messenger when displaying sent table.\"\n % mess.id\n )\n date = self.caller.messages.get_date_from_header(mess) or \"Unknown\"\n msgtable.add_row([mess_num, name, date])\n mess_num += 1\n self.msg(msgtable)\n return",
"def show_messages(self):\n for msg in self.messages:\n print msg['text']",
"def message_table(message):\r\n table = Table(['property', 'value'])\r\n table.align['property'] = 'r'\r\n table.align['value'] = 'l'\r\n\r\n table.add_row(['id', message['id']])\r\n table.add_row(['initial_entry_time', message['initial_entry_time']])\r\n table.add_row(['visibility_delay', message['visibility_delay']])\r\n table.add_row(['visibility_interval', message['visibility_interval']])\r\n table.add_row(['fields', message['fields']])\r\n return [table, message['body']]",
"def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r",
"def print_data_list(self):\n print('\\n{0}'.format(self.webDataFrame))",
"def generate(self):\n \n privateproc = False\n if self.rowcount > 0:\n try:\n table = spss.BasePivotTable(self.tabletitle, self.omssubtype)\n except:\n StartProcedure(_(\"Messages\"), self.procname)\n privateproc = True\n table = spss.BasePivotTable(self.tabletitle, self.omssubtype)\n if self.caption:\n table.Caption(self.caption)\n # Note: Unicode strings do not work as cell values in 18.0.1 and probably back to 16\n if self.columnlabels != []:\n table.SimplePivotTable(self.rowdim, self.rowlabels, self.coldim, self.columnlabels, self.columnvalues)\n else:\n table.Append(spss.Dimension.Place.row,\"rowdim\",hideName=True,hideLabels=True)\n table.Append(spss.Dimension.Place.column,\"coldim\",hideName=True,hideLabels=True)\n colcat = spss.CellText.String(\"Message\")\n for r in self.rowlabels:\n cellr = spss.CellText.String(r)\n table[(cellr, colcat)] = cellr\n if privateproc:\n spss.EndProcedure()",
"def con():\n query = \"SELECT * FROM ConsumedMsg;\"\n tablestr = dbwrapper._query_pretty(query)\n result = string.replace(str(tablestr),'\\n','<br>')\n return result",
"def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list",
"def RenderAsHtml(self):\n html = '<table>'\n\n for p in FrontendJob._properties:\n if p == 'log' or p == 'clovis_task':\n continue\n value = getattr(self, p)\n if value:\n html += '<tr><td>' + p + '</td><td>' + str(value) + '</td></tr>'\n\n html += '</table>'\n return html",
"def getTableHead():\n return [\"Reporter\", \"Reportee\", \"aln. DKIM\", \"aln. SPF\", \"Disposition\",\n \"DKIM result\", \"SPF result\", \"msg#\", \"IP\", \"Country\",\n \"Report Begin\", \"Report End\", \"Report ID\"]",
"def show_data():",
"def list_messages(self):",
"def info(self) -> str:\n return tabulate(self.model_log_msg, self.head, tablefmt=\"presto\")",
"def print_table(emojis):\n if len(emojis) > 0:\n table = []\n for i in emojis:\n table.append([i.get('id'), i.get('title'), i.get('emoji')])\n print(tabulate(table, headers=[\"ID\", \"Title\", \"Emoji\"]))\n else:\n print(\"¯\\_(ツ)_/¯ Nothing to see here...\")",
"def show_data(self, msg):\n\n message = msg\n # self.ECGWin.append(message)\n self.getter.get(message)\n # self.ECGWin.append(msg2)\n # self.ECGWin.append(msg3)",
"def show_table(table):\n # id: string\n # Unique and random generated (at least 2 special char()expect: ';'),\n # 2 number, 2 lower and 2 upper case letter)\n # title: string\n # manufacturer: string\n # price: number (dollars)\n # in_stock: number\n title_list = [\"ID\", \"Title\", \"Manufacturer\",\n \"Price\", \"Number in stock\"]\n ui.print_table(table, title_list)",
"def get_onlineformincoming_html(dbo, collationid):\n h = []\n h.append('<table width=\"100%\">')\n for f in get_onlineformincoming_detail(dbo, collationid):\n label = f[\"LABEL\"]\n if label is None or label == \"\": label = f[\"FIELDNAME\"]\n h.append('<tr>')\n h.append('<td>%s</td>' % label )\n h.append('<td>%s</td>' % f[\"VALUE\"])\n h.append('</tr>')\n h.append('</table>')\n return \"\\n\".join(h)",
"def display_message():",
"def show_message(id):\n session = Session()\n message = session.query(Message).filter_by(id=id).one()\n msg_dct = create_message(message)\n msg_dct[\"size\"] = message.size\n msg_dct[\"recipients\"] = create_recipients(message.recipients)\n return msg_dct",
"def html_data_table(self):\n return \"XXX\"",
"def view_requests(self):\n requests = self.caller.db.scene_requests or {}\n table = EvTable(\"{wName{n\", \"{wSummary{n\", width=78, border=\"cells\")\n for tup in requests.values():\n table.add_row(tup[0], tup[1])\n self.msg(str(table))",
"def print_details(self):\n self.view.print_details()",
"def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))",
"def _printable(self):\n toPrint = \"Qubit ID: \" + str(self.qubit_id) + \" \"\n toPrint = toPrint + \"Outcome: \" + str(self.outcome) + \" \"\n toPrint = toPrint + \"Remote App ID: \" + str(self.remote_app_id) + \" \"\n toPrint = toPrint + \"Remote Node: \" + str(self.remote_node) + \" \"\n toPrint = toPrint + \"Remote Port: \" + str(self.remote_port) + \" \"\n toPrint = toPrint + \"Datetime: \" + str(self.datetime)\n return toPrint",
"def to_html(self):\n return self.serializer.render(self.formatter.formatMessage(self.oldmsg))",
"def view_all_students():\n message = ''\n global conn\n with conn:\n rows = select_all_students(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Student Table', message)",
"def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))",
"def index(self):\n return \"\"\"\n<html><body>\n<pre>\n Hello World!\n\n To send a message, please point to:\n http://server:port/msg?msgType=YOURTYPE&payload=YOURPAYLOAD\n</pre>\n</body></html>\n \"\"\"",
"def showMessage(self):"
] | [
"0.6597587",
"0.6498538",
"0.62005866",
"0.6154198",
"0.59600717",
"0.59149694",
"0.5879777",
"0.58272785",
"0.58070505",
"0.57914764",
"0.5776034",
"0.5752107",
"0.5751061",
"0.5714788",
"0.57005656",
"0.56983024",
"0.5696108",
"0.56184375",
"0.5599567",
"0.5598464",
"0.55942166",
"0.5572155",
"0.556282",
"0.5542458",
"0.55386627",
"0.5535441",
"0.5530374",
"0.55228084",
"0.55173236",
"0.55111235"
] | 0.72538835 | 0 |
show whole ConsumedMsg table on the web | def con():
query = "SELECT * FROM ConsumedMsg;"
tablestr = dbwrapper._query_pretty(query)
result = string.replace(str(tablestr),'\n','<br>')
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display_received_table(self, num_disp, old):\n caller = self.caller\n msgtable = PrettyTable(\n [\"{wMsg #\", \"{wSender\", \"{wIC Date\", \"{wOOC Date\", \"{wSave\"]\n )\n mess_num = 1\n old = old[:num_disp]\n for mess in old:\n try:\n name = caller.messages.get_sender_name(mess)\n except AttributeError:\n mess = reload_model_as_proxy(mess)\n print(\n \"Error: Had to reload Msg ID %s as Messenger when displaying received table.\"\n % mess.id\n )\n name = caller.messages.get_sender_name(mess)\n date = caller.messages.get_date_from_header(mess) or \"Unknown\"\n ooc_date = mess.db_date_created.strftime(\"%x\")\n saved = \"{w*{n\" if mess.preserved else \"\"\n msgtable.add_row([mess_num, name, date, ooc_date, saved])\n mess_num += 1\n self.msg(msgtable)",
"def show():\n configdb = ConfigDBConnector()\n configdb.connect()\n queue_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE')\n port_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PORT')\n port_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PORT_BUFFER_DROP)\n rif_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'RIF')\n queue_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE_WATERMARK')\n pg_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PG_WATERMARK')\n pg_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PG_DROP)\n buffer_pool_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', BUFFER_POOL_WATERMARK)\n acl_info = configdb.get_entry('FLEX_COUNTER_TABLE', ACL)\n tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL')\n trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP')\n route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE')\n\n header = (\"Type\", \"Interval (in ms)\", \"Status\")\n data = []\n if queue_info:\n data.append([\"QUEUE_STAT\", queue_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), queue_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_info:\n data.append([\"PORT_STAT\", port_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), port_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_drop_info:\n data.append([PORT_BUFFER_DROP, port_drop_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), port_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if rif_info:\n data.append([\"RIF_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if queue_wm_info:\n data.append([\"QUEUE_WATERMARK_STAT\", queue_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), queue_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_wm_info:\n data.append([\"PG_WATERMARK_STAT\", pg_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), pg_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_drop_info:\n data.append(['PG_DROP_STAT', pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), pg_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if buffer_pool_wm_info:\n data.append([\"BUFFER_POOL_WATERMARK_STAT\", buffer_pool_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), buffer_pool_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if acl_info:\n data.append([ACL, pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), acl_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if tunnel_info:\n data.append([\"TUNNEL_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if trap_info:\n data.append([\"FLOW_CNT_TRAP_STAT\", trap_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), trap_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if route_info:\n data.append([\"FLOW_CNT_ROUTE_STAT\", route_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC),\n route_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n\n click.echo(tabulate(data, headers=header, tablefmt=\"simple\", missingval=\"\"))",
"def prod(): \n query = \"SELECT * FROM ProducedMsg;\"\n tablestr = dbwrapper._query_pretty(query)\n result = string.replace(str(tablestr),'\\n','<br>')\n return result",
"def display_sent_table(self, num_disp, old):\n msgtable = PrettyTable([\"{wMsg #\", \"{wReceiver\", \"{wDate\"])\n mess_num = 1\n old = old[:num_disp]\n for mess in old:\n receiver = mess.receivers\n if receiver:\n receiver = receiver[0]\n name = receiver.key\n else:\n name = \"Unknown\"\n try:\n date = self.caller.messages.get_date_from_header(mess) or \"Unknown\"\n except AttributeError:\n mess = reload_model_as_proxy(mess)\n print(\n \"Error: Had to reload Msg ID %s as Messenger when displaying sent table.\"\n % mess.id\n )\n date = self.caller.messages.get_date_from_header(mess) or \"Unknown\"\n msgtable.add_row([mess_num, name, date])\n mess_num += 1\n self.msg(msgtable)\n return",
"def show_messages(self):\n for msg in self.messages:\n print msg['text']",
"def list_messages(self):",
"def view_all_students():\n message = ''\n global conn\n with conn:\n rows = select_all_students(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Student Table', message)",
"def show_messages(self):\n if not self.messages:\n u_print(\" Queue.show_messages() ERR - There is no messages or malformed messages on queue. \")\n u_print(json.dumps(self.messages, indent=4))\n sys.exit(1)\n\n try:\n for m in self.messages:\n self.show_message(m.body)\n except:\n raise",
"async def statusinfo(self, astable):\n cmd = subprocess.check_output([\"birdc\", \"show\", \"proto\", \"all\", str(astable)])\n for page in chat_formatting.pagify(cmd.decode(), ['\\n', ' '], shorten_by=12):\n await self.bot.say(chat_formatting.box(page))",
"def show_messages(self):\n self.masterlog.revealme()",
"def get_messages(self):\n res = self.conn.cursor().execute(\"SELECT * FROM messages\")\n return res.fetchall()",
"def get_status_messages(self):\n\n try:\n subContext = conf.EHST_MESSAGES\n connHandler = self._tap._TapPlus__getconnhandler()\n response = connHandler.execute_tapget(subContext, verbose=False)\n if response.status == 200:\n for line in response:\n string_message = line.decode(\"utf-8\")\n print(string_message[string_message.index('=') + 1:])\n except OSError:\n print(\"Status messages could not be retrieved\")",
"def select_all_messages(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM LED_MESSAGE\")\n\n rows = cur.fetchall()\n\n all_m = []\n for row in rows:\n l_m = { \n \"ts\": row[0],\n \"elapsed\": float(row[1]), \n \"font\": row[2], \n \"type\": row[3], \n \"body\": row[4], \n \"color\": row[5], \n \"behavior\": row[6]\n }\n all_m.append({ \"message\":{ \"payload\": l_m }})\n\n return all_m",
"def get_all_msgs(self):\n data = self.database.select(self.tname)\n msgs = []\n for item in data:\n msgs.append((item[0], self.data_to_msg(item)))\n return msgs",
"def get_messages():\n dynamodb = boto3.client('dynamodb')\n messages = []\n _messages = []\n paginator = dynamodb.get_paginator('scan')\n for page in paginator.paginate(TableName=os.environ.get('MESSAGE_TABLE_NAME')):\n _messages.extend(page['Items'])\n\n if not _messages:\n return _messages\n\n for message in _messages:\n m = {\n message['timestamp']['N']: message['data']['S']\n }\n messages.append(m)\n\n # sort list of dict by timestamp\n messages = list(map(dict, sorted(list(i.items()) for i in messages)))\n\n _messages = []\n for message in messages:\n _, v = list(message.items())[0]\n _messages.append(v)\n\n return _messages",
"def message_table(message):\r\n table = Table(['property', 'value'])\r\n table.align['property'] = 'r'\r\n table.align['value'] = 'l'\r\n\r\n table.add_row(['id', message['id']])\r\n table.add_row(['initial_entry_time', message['initial_entry_time']])\r\n table.add_row(['visibility_delay', message['visibility_delay']])\r\n table.add_row(['visibility_interval', message['visibility_interval']])\r\n table.add_row(['fields', message['fields']])\r\n return [table, message['body']]",
"async def status(self):\n cmd = subprocess.check_output([\"birdc\", \"show\", \"proto\"])\n for page in chat_formatting.pagify(cmd.decode(), ['\\n', ' '], shorten_by=12):\n await self.bot.say(chat_formatting.box(page))",
"def show_tables(self, timeout):\n _abstract()",
"def show_tables(self, timeout):\n _abstract()",
"def show_logs():\n nodes=hl.getAllNodes();\n\n return render_template('logs.html',nodes = nodes)",
"def view_requests(self):\n requests = self.caller.db.scene_requests or {}\n table = EvTable(\"{wName{n\", \"{wSummary{n\", width=78, border=\"cells\")\n for tup in requests.values():\n table.add_row(tup[0], tup[1])\n self.msg(str(table))",
"def display_messages(self, layout):",
"def info(self) -> str:\n return tabulate(self.model_log_msg, self.head, tablefmt=\"presto\")",
"def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r",
"def show_message(id):\n session = Session()\n message = session.query(Message).filter_by(id=id).one()\n msg_dct = create_message(message)\n msg_dct[\"size\"] = message.size\n msg_dct[\"recipients\"] = create_recipients(message.recipients)\n return msg_dct",
"async def report(self, ctx):\n try:\n members = self.bot.get_all_members()\n online, offline, other = 0,0,0\n for member in members:\n if member.status.online:\n online += 1\n elif member.status.offline:\n offline += 1\n else:\n other += 1\n message = discord.Embed(title='Server report',type='rich', colour=discord.Color(0xffb6c1))\n message.add_field(name='Online',value='**{}** online members'.format(online))\n message.add_field(name='Offline',value='**{}** offline members'.format(offline))\n message.add_field(name='Other',value='**{}** other members'.format(other))\n await self.bot.say(embed=message)\n\n except Exception as error:\n await self.bot.say('The report has failed !')\n self.logger.error(error)",
"def getTableHead():\n return [\"Reporter\", \"Reportee\", \"aln. DKIM\", \"aln. SPF\", \"Disposition\",\n \"DKIM result\", \"SPF result\", \"msg#\", \"IP\", \"Country\",\n \"Report Begin\", \"Report End\", \"Report ID\"]",
"def show_data(self, msg):\n\n message = msg\n # self.ECGWin.append(message)\n self.getter.get(message)\n # self.ECGWin.append(msg2)\n # self.ECGWin.append(msg3)",
"def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))",
"def displayDiscarded(self):\n print(\"Discarded :\")\n if len(self.discarded) == 0:\n print(\"*no discard yet*\")\n else:\n for card in self.discarded:\n print(card.toString(), end=\" \")\n print()"
] | [
"0.63120323",
"0.6159939",
"0.61489886",
"0.6135532",
"0.6122139",
"0.59666145",
"0.58795524",
"0.56598026",
"0.56134933",
"0.55946493",
"0.558144",
"0.55531895",
"0.55498475",
"0.5525264",
"0.54190505",
"0.5415315",
"0.5398365",
"0.53646106",
"0.53646106",
"0.534758",
"0.53410304",
"0.5335471",
"0.52984345",
"0.5287166",
"0.52771634",
"0.5247632",
"0.5245507",
"0.5236971",
"0.5213126",
"0.52129513"
] | 0.6740566 | 0 |
Initialize the variable module. This adds ``pgfkeys`` to the list of LaTeX packages and adds the setup code to the preamble | def init_vars():
da_vinci.base.usepackage("pgfkeys")
da_vinci.base.add_preamble(setup_script) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_latex_preamble():\n from sage.misc.latex import latex\n latex.add_package_to_preamble_if_available('tikz')\n latex.add_to_mathjax_avoid_list(\"tikz\")\n if latex.has_file(\"tikz.sty\"):\n latex.add_to_preamble(r'\\usetikzlibrary{automata}')",
"def _init_tkvars(self,PO):\n for name,param in PO.params().items():\n self._create_tkvar(PO,name,param)",
"def _initialize_variables(self):\n\n self.font = Font()\n self.BibTerm = ''\n\n self.illegalChars = [chr(i) for i in range(1, 0x20)]\n self.illegalChars.extend([chr(0x7F), '\"', '*', '/', ':', '<', '>', \\\n '?', '\\\\', '|'])\n\n #define all StringVar(), BooleanVar(), etc… needed to hold info\n self.current_project = StringVar()\n self.dict_in = StringVar()\n self.terms_in = StringVar()\n self.old_dict = StringVar()\n self.dict_in_changed = IntVar()\n self.terms_in_changed = IntVar()\n self.old_dict_changed = IntVar()\n self.add_cldr_fields = IntVar()\n self.accept_regional_digits = IntVar()\n self.selected_lang = StringVar()\n self.int_var = IntVar()\n self.preferred = StringVar()\n self.PrefChar = StringVar()",
"def init_tkvars(self):\n\n for key in self.defaultprefs:\n value = self.defaultprefs[key]\n if type(value) is types.IntType:\n var = self.__dict__[key] = IntVar()\n elif type(value) is types.StringType:\n var = self.__dict__[key] = StringVar()\n var.set(value)\n\n self.resnum = IntVar()\n self.resnum.set(1)\n # Method for calculating Tm of primers\n self.Tm_method = StringVar()\n self.Tm_method.set('Stratagene')\n if 'Darwin' in self.currplatform:\n self.seqfontsize.set(16)\n else:\n self.seqfontsize.set(14)\n return",
"def main():\n init_latex()",
"def init_vars(self):\n # type: () -> None\n raise NotImplementedError",
"def initialize():\n #carga las fuente del usuario\n for family in USER_FONTS:\n for font in USER_FONTS[family]:\n name, path = USER_FONTS[family][font]\n pdfmetrics.registerFont(TTFont(name, path))",
"def kv_esx_init():\n disk_lib_init()",
"def init_extensions(self, package, module):\n\n pass",
"def init():",
"def init():\n defaults = _project_defaults()\n\n if Project.prompt:\n defaults['name'] = prompt(\"Enter the project's name:\", defaults['name'])\n defaults['package'] = prompt(\"Enter the project's package:\", defaults['package'])\n defaults['author'] = prompt(\"Enter the project's author:\", defaults['author'])\n defaults['author_email'] = prompt(\"Enter the project's author's email:\", defaults['author_email'])\n defaults['description'] = prompt(\"Enter the project's description:\", defaults['description'])\n\n # print(\"defaults:\\n{defaults}\".format(defaults=pformat(defaults)))\n\n if Project.use_templates:\n\n template = Template()\n\n for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))\n for herringlib in HerringFile.herringlib_paths]:\n\n info(\"template directory: %s\" % template_dir)\n # noinspection PyArgumentEqualDefault\n template.generate(template_dir, defaults, overwrite=False)",
"def init_package(self):\n package_name = self.args.name\n if package_name is None:\n msg = 'an package name must provide for enzi init'\n logging.error(msg)\n raise SystemExit(BASE_ESTRING + msg)\n\n initializer = ProjectInitialor(package_name)\n initializer.init()",
"def init_hotkeys(self):\n\n\t\tself._interface.init_hotkeys()",
"def _set_templates(spm_dir=SPM_DIR):\n global EPI_TEMPLATE, T1_TEMPLATE, GM_TEMPLATE, WM_TEMPLATE, CSF_TEMPLATE\n\n spm_version = _get_version_spm(SPM_DIR)\n\n # Set the tpm and template paths according to SPM version\n if spm_version == 'spm12':\n template_path = 'toolbox/OldNorm'\n tpm_path = 'toolbox/OldSeg'\n else:\n template_path = 'templates'\n tpm_path = 'tpm'\n\n # configure template images\n EPI_TEMPLATE = os.path.join(SPM_DIR, template_path, 'EPI.nii')\n SPM_T1_TEMPLATE = os.path.join(SPM_DIR, template_path, 'T1.nii')\n T1_TEMPLATE = \"/usr/share/data/fsl-mni152-templates/avg152T1.nii\"\n if not os.path.isfile(T1_TEMPLATE):\n T1_TEMPLATE += '.gz'\n if not os.path.exists(T1_TEMPLATE):\n T1_TEMPLATE = SPM_T1_TEMPLATE\n GM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'grey.nii')\n WM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'white.nii')\n CSF_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'csf.nii')",
"def setup(self):\n self.build_serverkeyhash()\n self.build_agent_pubkey()\n self.load_registration_key()",
"def initialization(self):\n super().initialization()\n self.register_uniq_ids([self.id_modal, self.id_modal_close, self.id_wip_button])\n\n # Register modules\n self.modules = [self.mod_table, self.mod_cache, self.mod_upload]",
"def _init_vars(self):\n if not self._has(\"vars\"):\n if self._has(\"p\"):\n self._.vars = self._.p.variables()\n elif self._has(\"q\"):\n self._.vars = self._.q.variables()\n elif self._has(\"P\"):\n self._.vars = variables(self._.P)\n elif self._has(\"Q\"):\n self._.vars = variables(self._.Q)\n self._.vars_ordered = len(self._.vars) <= 1",
"def setup_templates(self):\n self.libs[\"template\"] = (\"#libs/templates/include\", None, \"\")\n self[\"CPPPATH\"].append(\"#libs/templates/include\")",
"def __init__(self, \n root_dir: Path = Path('.'),\n variable_files: List[Union[Path,str]] = list(), \n inline_variables = dict(),\n yasha_extensions_files: List[Union[Path,str]] = list(), \n template_lookup_paths: List[Union[Path,str]] = list(), \n mode: Union[Literal['pedantic'], Literal['debug'], None] = None,\n encoding: str = ENCODING, \n **jinja_configs):\n self.root = root_dir\n self.parsers = PARSERS.copy()\n self.template_lookup_paths = [Path(p) for p in template_lookup_paths]\n self.yasha_extensions_files = [Path(p) for p in yasha_extensions_files]\n self.variable_files = [Path(f) for f in variable_files]\n self.encoding = encoding\n self.env = Environment()\n if mode == 'pedantic': self.env.undefined = StrictUndefined\n if mode == 'debug': self.env.undefined = DebugUndefined\n self.env.filters.update(FILTERS)\n self.env.tests.update(TESTS)\n for jinja_extension in CLASSES:\n self.env.add_extension(jinja_extension)\n if jinja_configs:\n for config, value in jinja_configs.items():\n setattr(self.env, config, value)\n for ext in self.yasha_extensions_files:\n self._load_extensions_file(ext)\n self.env.loader = FileSystemLoader(self.template_lookup_paths)\n self._load_data_files(self.variable_files) # data from the data files becomes the baseline for jinja global vars\n self.env.globals.update(inline_variables) # data from inline variables / directly-specified global variables overrides data from the data files",
"def init():\n pass",
"def __init__(self):\n self.ext_folder = ckan_config.get('ckanext.needupdate.ext_folder', '/usr/lib/ckan/default/src')\n self.ext_prefix = ckan_config.get('ckanext.needupdate.ext_folder', 'ckanext-')\n self.ext_sufix = ckan_config.get('ckanext.needupdate.ext_folder', '')",
"def x_init(self):\n pass",
"def initialise(self):\n self.set_up()",
"def initialise(self):",
"def _setup(self):",
"def _setup(self):",
"def init_locals(self):\n pass",
"def __init__(self):\n self._init_key_settings()\n self._init_misc_extensions()\n self.minVersion = (3, 1)\n self.maxVersion = (3, 4)\n self.versions = [(3, 4), (3, 3), (3, 2), (3, 1)]\n self.cipherNames = list(CIPHER_NAMES)\n self.macNames = list(MAC_NAMES)\n self.keyExchangeNames = list(KEY_EXCHANGE_NAMES)\n self.cipherImplementations = list(CIPHER_IMPLEMENTATIONS)",
"def preload_defs(self):\n for d in (self.module.search(\"grouping\") +\n self.module.search(\"typedef\")):\n self.install_def(self.unique_def_name(d))",
"def __init__(self):\n super(sppasSymbolSettings, self).__init__()\n\n self.__dict__ = dict(\n unk=\"<UNK>\",\n phone=sppasSymbolSettings.__phone_symbols(),\n ortho=sppasSymbolSettings.__ortho_symbols(),\n all=sppasSymbolSettings.__all_symbols()\n )"
] | [
"0.57580215",
"0.5727887",
"0.5618655",
"0.55456275",
"0.55201256",
"0.54513633",
"0.5410531",
"0.5400405",
"0.536209",
"0.5352983",
"0.5259593",
"0.52424645",
"0.52378637",
"0.5234449",
"0.52188027",
"0.5215052",
"0.5214246",
"0.5212781",
"0.5194066",
"0.5182458",
"0.51729494",
"0.5165755",
"0.5151608",
"0.5137626",
"0.51276326",
"0.51276326",
"0.5122942",
"0.51228863",
"0.5110015",
"0.5107818"
] | 0.8417244 | 0 |
Declare a variable namespace. | def declare_namespace(namespace):
return "\\declare{%s}" % namespace | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def define_variable(self, var, value):\n self.namespace[var] = value",
"def define_vars(vars, namespace=None):\n\t# TODO: support namespacing via nested dictionaries\n\tif namespace is None:\n\t\tprefix = \"\"\n\telse:\n\t\tprefix = namespace + \"/\"\n\treturn \"\\\\setvalue{%s}\" % \", \".join([\n\t\t\"%s = %s\" % (prefix + key, value)\n\t\tfor (key, value) in vars.items()\n\t])",
"def register_variable_namespace(self, name, namespace, t=None):\n if name in self.variable_namespace:\n old = self.variable_namespace[name]\n if old != namespace:\n msg = (\"overwriting namespace for variable {0}:\\n\"\n \" old: {1}\\n\"\n \" new: {2}\")\n warn(msg.format(name, old, namespace), RuntimeWarning)\n self.variable_namespace[name] = namespace\n if self.isenum(t):\n t = self.canon(t)\n for n, _ in t[1][2][2]:\n self.register_variable_namespace(n, namespace)",
"def new_declaration (var_names) :\r\n\r\n\ttokens = [\"::\"]\r\n\tfor n in var_names :\r\n\t\ttokens += tokenizer.tokenize(n) + [\",\"]\r\n\tdel tokens[-1]\r\n\r\n\tresult = declaration (tokens)\r\n\r\n\treturn result",
"def mk_var(self, name, type_):\n # type: (str, ty.Type) -> expr.Var\n\n var = expr.Var(name, type_)\n self.var_scopes[0].appendleft((name, var))\n return var",
"def declare_variable(var, bound_variables):\n varname = var.name\n vartype = var.vartype\n\n # check if it is bound and has already been seen\n if bound_variables is not None and varname in bound_variables:\n yvar = bound_variables[varname].yices_term\n var.bound = True\n return yvar\n\n # check if it has already been seen\n yvar = Terms.get_by_name(varname)\n if yvar is not None:\n #now we need to see if it is free or bound\n tag = Terms.constructor(yvar)\n if tag == Constructor.VARIABLE:\n var.bound = True\n bound_variables[varname] = var\n return yvar\n\n type_term = vartype.yices_term\n type_name = vartype.name\n\n var_term = None\n\n if bound_variables is not None:\n # we need to make a yices variable not an uninterpreted term\n var_term = Terms.new_variable(type_term, varname)\n if var_term is None:\n sys.stderr.write(f'declare_variable: Term.new_variable failed {Yices.error_string()}\\n')\n return None\n bound_variables[varname] = var\n var.bound = True\n else:\n var_term = Terms.new_uninterpreted_term(type_term, varname)\n if var_term is None:\n sys.stderr.write(f'declare_variable: Term.new_uninterpreted_term failed {Yices.error_string()}\\n')\n return None\n\n YicesSignature.types_to_variables[type_name].add(var)\n\n return var_term",
"def declare_variable(self, v):\n scope = self.get_default_declaration_scope()\n scope.set(v.args[0])\n self.resolve_to(v, RESOLUTION_NAKED)",
"def define_var(self, var, value):\n self.binding[var] = value",
"def var(name, indices=None, namespace=None):\n return ExprVariable(name, indices, namespace)",
"def declare_variable(\n self,\n identifier: str,\n typ: types.Type,\n *,\n is_transient: t.Optional[bool] = None,\n initial_value: t.Optional[expressions.ValueOrExpression] = None,\n comment: t.Optional[str] = None,\n ) -> None:\n value = None\n if initial_value is not None:\n value = expressions.ensure_expr(initial_value)\n self.add_declaration(\n VariableDeclaration(\n identifier,\n typ,\n is_transient=is_transient,\n initial_value=value,\n comment=comment,\n )\n )",
"def init_namespace():\n global namespace\n with open('/run/secrets/kubernetes.io/serviceaccount/namespace') as f:\n namespace = f.read()",
"def make_variable(self, name = None):\r\n return self.Variable(self, name = name)",
"def _var(self,\n name,\n shape,\n collections=None,\n initializer=None,\n trainable=True,\n device='/cpu:0',\n dtype=tf.float32):\n collections = set(collections)\n collections.add(GKeys.GLOBAL_VARIABLES)\n var = tf.contrib.framework.variable(\n name=name,\n shape=shape,\n collections=list(collections),\n initializer=initializer,\n trainable=trainable,\n device=device,\n dtype=dtype\n )\n if GKeys.TRAIN_OP not in collections:\n tf.contrib.framework.add_model_variable(var)\n return var",
"def make_variable(self, name=None):\r\n return self.Variable(self, name=name)",
"def make_variable(self, name=None):\r\n return self.Variable(self, name=name)",
"def set_namespace(key, dic):\n\tnew_namespace(key)\n\tREGISTRY[key] = Namespace(dic)",
"def _init_temporary_namespace(cls, nsObj):\n pass",
"def var(*args, **kwargs):\n return Variable(*args, **kwargs)",
"def __setattr__(self, name, value):\n if not isinstance(name, str):\n raise ValueError('Namespace label must be a string')\n if name.startswith('_'):\n raise ValueError('Namespace cannot start with an underscore')\n\n if name in self._namespaces:\n raise ValueError('Namespaces cannot be redefined')\n\n self._namespaces[name] = Namespace(name, label=value)",
"def declare_variables(self):\n\n\t\tvar_prefixes = ['W_in', 'W_rnn', 'b_rnn', 'W_out', 'b_out']\n\t\tself.var_dict = {}\n\n\t\twith tf.variable_scope('network'):\n\t\t\tfor p in var_prefixes:\n\t\t\t\tself.var_dict[p] = tf.get_variable(p, initializer=par[p+'_init'])",
"def __init__(self, name: str, namespace: str):\n self.name = name\n self.namespace = namespace",
"def Variable(name):\n placeholder_node = placeholder_op()\n placeholder_node.name = name\n return placeholder_node",
"def __init__(self, variables, values, parent):\n \n self.namespace = dict(zip(variables, values))\n self.parent = parent",
"def push_local_ns(self, name, value):\n self.interpreter.locals[name] = value",
"def _declaration_variable(self, node: ET.Element):\n # variable names\n variables_and_values = self.transform_all_subnodes(\n self.get_one(node, './variables'), skip_empty=True,\n ignored={'entity-decl-list__begin', 'entity-decl-list','attr-spec' })\n if not variables_and_values:\n _LOG.error('%s', ET.tostring(node).decode().rstrip())\n raise SyntaxError('at least one variable expected in variables list')\n variables = [var for var, _ in variables_and_values]\n # base type of variables\n base_type = self.transform_one(self.get_one(node, './type'))\n\n # dimensionality information (only for array types)\n dimensions_node = node.find('./dimensions')\n variable_dimensions = [getattr(var, 'fortran_metadata', {}).get('dimensions', None)\n for var in variables]\n has_variable_dimensions = any([_ is not None for _ in variable_dimensions])\n if has_variable_dimensions and not self._split_declarations:\n raise NotImplementedError('inline dimensions not implemented yet')\n if dimensions_node is not None and has_variable_dimensions:\n raise SyntaxError(\n 'declaration dimension data as well as per-variable dimension data present')\n if dimensions_node is not None:\n dimensions = self.transform_one(dimensions_node)\n assert len(dimensions) >= 1\n self.ensure_import('static_typing', 'st')\n annotation = make_st_ndarray(base_type, dimensions)\n annotations = [annotation for _ in variables]\n elif has_variable_dimensions:\n self.ensure_import('static_typing', 'st')\n annotations = [base_type if _ is None else make_st_ndarray(base_type, _)\n for _ in variable_dimensions]\n else:\n annotations = [base_type for _ in variables]\n\n # initial values\n if dimensions_node is not None:\n values = [None if val is None else make_numpy_constructor('array', val, base_type)\n for _, val in variables_and_values]\n elif has_variable_dimensions:\n assert len(variables_and_values) == len(variable_dimensions)\n values = [None if val is None\n else (val if dim is None else make_numpy_constructor('array', val, base_type))\n for (_, val), dim in zip(variables_and_values, variable_dimensions)]\n else:\n values = [val for _, val in variables_and_values]\n\n metadata = {'is_declaration': True}\n intent_node = node.find('./intent')\n if intent_node is not None:\n metadata['intent'] = intent_node.attrib['type']\n\n attributes = ('allocatable', 'asynchronous', 'external', 'intrinsic', 'optional',\n 'parameter', 'pointer', 'protected', 'save', 'target', 'value', 'volatile')\n for attribute in attributes:\n if node.find('./attribute-{}'.format(attribute)) is not None:\n metadata['is_{}'.format(attribute)] = True\n\n if metadata:\n metadata_node = horast_nodes.Comment(\n value=ast.Str(' Fortran metadata: {}'.format(repr(metadata))), eol=False)\n\n _handled = {'variables', 'type', 'dimensions', 'intent'}\n extra_results = self.transform_all_subnodes(node, ignored={\n 'type-declaration-stmt'} | _handled | {'attribute-{}'.format(_) for _ in attributes})\n if extra_results:\n _LOG.warning('ignoring additional information in the declaration:\\n%s', extra_results)\n\n if not self._split_declarations:\n raise NotImplementedError()\n assignments = [{\"name\":var, \"type\":ann, \"value\":val}\n for var, ann, val in zip(variables, annotations, values)]\n if metadata:\n new_assignments = []\n for assignment in assignments:\n assignment.update({\"metadata\":metadata})\n new_assignments.append(assignment)\n new_assignments.append(metadata_node)\n assignments = new_assignments\n\n return assignments",
"def __init__(self, default_ns, namespaces=[]):\n self.document = prov.ProvDocument ()\n self.default_ns = default_ns\n self.document.set_default_namespace (self.default_ns)\n self.namespaces = namespaces\n self.subspaces = {}\n for namespace in self.namespaces:\n self.subspaces[namespace] = self.add_namespace (self.default_ns, namespace)",
"def register_var(tiling_var, val):\n globals()[tiling_var] = val",
"def create_simplenamespace():\n obj1 = _(foo=1)\n obj1.random = \"Whoa\"\n print(obj1)\n obj2 = _(foo=2, bar=\"Yipee!\")\n print(obj2)\n obj3 = _(foo=5, bar=4.0, boo=[\"list\", \"with\", \"strings\"])\n print(obj3)",
"def define_variable(var, val, env):\n frame = first_frame(env)\n def scan(vars, vals):\n if isNull(vars):\n return addBindingToFrame(var, val, frame)\n elif var == car(vars):\n return set_car(vals, val)\n else:\n return scan(cdr(vars), cdr(vals))\n return scan(frame_variables(frame), frame_values(frame))",
"def set_test_namespace_value(namespace_name=None):\r\n global namespace_value\r\n namespace_value = namespace_name"
] | [
"0.7665097",
"0.65671283",
"0.6502325",
"0.6340259",
"0.6307674",
"0.6245326",
"0.6205705",
"0.62035084",
"0.5977103",
"0.597564",
"0.58726317",
"0.5862272",
"0.5831435",
"0.58100283",
"0.58100283",
"0.58069617",
"0.5781008",
"0.57711345",
"0.57605255",
"0.5754003",
"0.5741313",
"0.5733034",
"0.57290334",
"0.57241845",
"0.5714161",
"0.5709721",
"0.5703338",
"0.5688943",
"0.5678536",
"0.567497"
] | 0.7276759 | 1 |
Set prompt to active task | def set_prompt(self, ps1=''):
if not ps1:
task = self.db.get_active_task()
if task:
ps1 = ('%s#%s' % (task['tname'], task['pname'])).encode('utf8')
else:
ps1 = self.bloody_prompt
self.prompt = ('(%s)> ' % ps1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prompt(self):\n self.prompt_flag = True",
"def prompt(self, task, text='', print_=False):\n template = self.prompts[task]['prompt']\n res = self.format_prompt(task, template, text)\n if print_:\n print(res)\n else:\n return res",
"def do_prompt(self, line):\n self.prompt = line + ': '",
"def do_prompt(self, line):\n if line:\n self.prompt = \"(%s) \" %line\n\n else:\n print 'Please specify a prompt text'",
"def prompt(self, upstream_name):\n self.prompt_events[upstream_name].set()",
"def showPrompt(self):\r\n self.terminal.nextLine()\r\n self.terminal.write(self.ps[self.pn])",
"def local_task_entry(text=\"> \"):\n task_completer = localTaskCompleter()\n while True:\n result = prompt(text, completer=task_completer)\n if result not in task_completer.tasks:\n print(\"Invalid task, try again.\")\n else:\n return result",
"def main_task_entry(text=\"> \"):\n task_completer = mainTaskCompleter()\n while True:\n result = prompt(text, completer=task_completer)\n if result not in task_completer.tasks:\n print(\"Invalid task, try again.\")\n else:\n return result",
"def passPrompt(title, prompt):\n answer = tkSimpleDialog.askstring(title, prompt, show=\"*\")\n print answer",
"def prompt():\n sys.stdout.write('>> ')\n sys.stdout.flush()",
"def Prompt(self,message):\n\t\tself.acad.ActiveDocument.Utility.Prompt(message)",
"def prompt(self, question):\n self.output(' ')\n self.output(question)\n self.output(self.parse_response(str(self.ui())))",
"def on_start(self, session):\n self.put_prompt(session)",
"def __window_prompt(self, text):\n return True",
"def show_prompt(self, prompt=None):\n\n if prompt is None:\n prompt = self.prompt\n\n # Only insert the prompt if we don't have one already:\n #\n if self.find_prompt(prompt) == sublime.Region(-1, -1):\n self._write(0, prompt)",
"def put_prompt(self, session):\n self.reply_text(session, self._prompt, False)",
"def ask(self, prompt: str) -> str:\n raise NotImplementedError",
"def _getPrompt(self):\n return Prompt(\n task=AnsibleTask(),\n connection=None,\n play_context=AnsiblePlayContext(),\n loader=None,\n templar=None,\n shared_loader_obj=None\n )",
"def ask(prompt):\n\n return renpy.exports.invoke_in_new_context(renpy.store.layout.yesno_prompt, None, prompt)",
"def prompt(self):\r\n super().prompt_number()\r\n self.email = str(input(\"Email: \"))",
"def set_unique_prompt (self, optional_prompt=None):\r\n if optional_prompt is not None:\r\n self.prompt = optional_prompt\r\n self.sendline (self.PROMPT_SET_SH) # sh-style\r\n i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)\r\n if i == 0: # csh-style\r\n self.sendline (self.PROMPT_SET_CSH)\r\n i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)\r\n if i == 0:\r\n return 0\r\n return 1",
"def set_ssh_prompt(self, new_prompt):\n old_prompt = self._current._prompt\n self._current._prompt = new_prompt\n #self._current.set_prompt(new_prompt)\n #print \"current prompt:\", repr(self._current._prompt)\n return old_prompt",
"def prompt(self, value):\n if \"++\" not in value:\n m = re.match(r\"^(.*\\w)(\\s*\\W\\s*)?$\", value)\n if m:\n value = \"{}++{}\".format(*m.groups(\"\"))\n self._prompt = value",
"def _prompt_main_menu(self, update, context, message='Please choose an option:'):\n id = context.user_data['id']\n email = context.user_data['email']\n email = 'Not supplied' if email == '' else email\n self._reply_message(update,\n f'ID: {id}\\n'\n f'Email: {email}\\n'\n f'{message}',\n keyboard=self.MAIN_STATE_OPTIONS,\n inline_keyboard=True)",
"def add_prompt(self, prompt, echo=True):\n self.prompts.append((prompt, echo))",
"def waitprompt(c):\n c.expect('\\n> ')\n time.sleep(0.1)",
"def change_task(self):\n sel_task = self.find_task()\n if sel_task is False:\n return\n\n # We have a valid task, let's change it.\n self.clear_screen()\n self.display_task(sel_task)\n print \"\\n'd': Mark this task done\"\n print \"'t': Change tags of this task\"\n print \"'x': Remove this task permanently (cannot be undone)\"\n print \"'c': Cancel and return to main menu.\"\n selection = None\n\n # Continue until user cancels\n while selection != 'c':\n selection = raw_input(\n \"Enter command for selected task > \").strip().lower()\n\n if selection == 'd':\n sel_task.mark_done(self.user)\n self.current_collection.archive()\n break\n\n if selection == 't':\n user_input = raw_input(\n \"Overwrite existing tags? y/n > \"\n ).strip().lower()\n if user_input in ('y', 'yes'):\n del sel_task.tags\n user_tags = raw_input(\n \"Enter new tags (comma separated) (optional). > \")\n sel_task.tags = [\n tag.strip() for tag in user_tags.split(',')]\n break\n\n if selection == 'x':\n if raw_input(\"Delete this task? y/n > \") in ('y', 'Y'):\n delete = self.current_collection.delete(sel_task)\n if delete:\n raw_input(\"Task deleted. Press Enter\")\n break\n else:\n raw_input(\"Task not deleted. Try again.\")\n continue\n else:\n print \"Please enter valid command.\"\n return",
"def __init__(self, bot: commands.Bot):\n\n super().__init__(bot)\n self.current_prompt = ''",
"def prompt() -> None:\n\n username = click.prompt(\n text=\"Please enter a username\",\n type=click.STRING\n )\n password = click.prompt(\n text=\"Please enter a new password\",\n hide_input=True,\n confirmation_prompt=True\n )\n newsletter_subscription = click.prompt(\n text=\"Would you like to subscribe to our newsletter?\",\n default=False,\n type=click.BOOL\n )\n favorite_color=click.prompt(\n text=\"What is your favorite color?\",\n type=click.Choice([\"blue\", \"green\", \"yellow\"], case_sensitive=False)\n )\n\n click.echo(\n f\"Username: {username} | Password: {'*' * len(password)} | \"\n + f\"Newsletter: {newsletter_subscription} | Favorite color: \"\n + click.style(favorite_color, fg=favorite_color)\n )",
"def Reset(self):\n self.prompt_str = self.prompt_ev.FirstPromptEvaluator()"
] | [
"0.74394417",
"0.70223904",
"0.6675487",
"0.6625416",
"0.66070217",
"0.65539664",
"0.6529011",
"0.6466097",
"0.63974833",
"0.6393468",
"0.6381298",
"0.63400286",
"0.63384306",
"0.6331718",
"0.6326696",
"0.6280308",
"0.6241637",
"0.61691356",
"0.6162123",
"0.6134465",
"0.6131904",
"0.61165524",
"0.6053357",
"0.60220546",
"0.5984902",
"0.5958391",
"0.5958055",
"0.59514856",
"0.59193087",
"0.5890549"
] | 0.79408383 | 0 |
Command project project's related commands Usage project | || Description The command displays project information, creates a new project, updates and deletes one. | def do_project(self, arg):
def _usage():
self.do_help('project')
args = shlex.split(arg)
if not args:
_usage()
return
commands = ['create', 'delete', 'update']
first_arg = args[0].lower()
is_project_info = first_arg not in commands
if is_project_info:
# Get the project info
project_name = args[0].decode('utf8')
self.display_project_info(project_name)
return
if first_arg == 'create':
# Create a new project
self.create_project()
return
if len(args) == 1:
print(self.error_wrong_parameters)
_usage()
return
project_name = args[1].decode('utf8')
if first_arg == 'update':
# Update a project
self.update_project(project_name)
elif first_arg == 'delete':
# Delete a project
self.delete_project(project_name)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def project():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n\n menu = M(c=\"project\")(\n M(\"Projects\", f=\"project\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Locations\", f=\"location\")(\n M(\"Map\", m=\"map\"),\n M(\"Contacts\", f=\"location_contact\"),\n ),\n M(\"Reports\", f=\"location\", m=\"report\")(\n M(\"3W\", f=\"location\", m=\"report\"),\n M(\"Beneficiaries\", f=\"beneficiary\", m=\"report\"),\n #M(\"Indicators\", f=\"indicator\", m=\"report\",\n # check=indicators,\n # ),\n #M(\"Indicators over Time\", f=\"indicator\", m=\"timeplot\",\n # check=indicators,\n # ),\n M(\"Funding\", f=\"organisation\", m=\"report\"),\n ),\n M(\"Import\", f=\"project\", m=\"import\", p=\"create\", restrict=[ADMIN])(\n M(\"Import Projects\", m=\"import\", p=\"create\"),\n M(\"Import Project Organizations\", f=\"organisation\",\n m=\"import\", p=\"create\"),\n M(\"Import Project Communities\", f=\"location\",\n m=\"import\", p=\"create\"),\n ),\n M(\"Activity Types\", f=\"activity_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Beneficiary Types\", f=\"beneficiary_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Sectors\", f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Themes\", f=\"theme\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )\n\n return menu",
"def project():",
"def project():",
"def project():",
"def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def main(type, project, author, email):\n while os.path.exists(project):\n click.echo('The project has been exists. Would you want to rebuild the project?\\n')\n click.echo('> {:<12}\\tfor\\tcontinue'.format('YES'))\n click.echo('> {:<12}\\tfor\\tbreak'.format('NO'))\n click.echo('> {:<12}\\tfor\\tbuilding another project\\n'.format('PROJECT NAME'))\n confirm_info = input('> ').strip().lower()\n if confirm_info == 'yes':\n shutil.rmtree(project)\n elif confirm_info == 'no':\n return\n else:\n project = confirm_info\n my_project = CreateNewProject(type, project, author, email)\n my_project.run()",
"def project():\n\n return M(c=\"project\", f=\"task\")(\n M(\"Tasks\", f=\"task\")(\n M(\"Create\", m=\"create\"),\n M(\"My Open Tasks\", vars={\"mine\":1}),\n ),\n )",
"def help():\n\ttext = \"\"\"\n\n\t\t DELETE_PROJECT\n\t\t\t USAGE:\n\n\t%s project [project [project]]...\n\n\t\n\t\\n\"\"\" % (os.path.basename(sys.argv[0]))\n\tprint text",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def projects(args):\n _projects = lib.get_projects(\n args.target, username=args.username, password=args.password\n )\n if _projects:\n print(\"\\n\".join(_projects))",
"def delete_project(\n name\n):\n\n cmd = dict()\n cmd[\"type_\"] = \"delete_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)",
"def add_project(project, taglist):\n if anonymize:\n import random\n project['name'] = 'Anonimized Project ' + str(project['id'])[-3:]\n project['client'] = 'Anonimized Client'\n\n wf.add_item(title=project['name'],\n subtitle='Client: ' +\n project['client'] +\n ' Hit ENTER to show menu, press ALT for more info.',\n modifier_subtitles={\n 'alt': 'Tags: ' + ', '.join(taglist),\n },\n arg=str(project['id']),\n valid=True,\n icon='icons/project_{0}.png'.format(\n project['project_state']).lower(),\n copytext=project['name'])",
"def project_show(ctx, args):\n for project_id in args:\n data = ctx.obj.get_project_by_project_id(project_id)\n output_json_data(data)",
"def newproject():\n log('Criando novo projeto', yellow)\n log('Cria a conta no bitbucket com o nome do projeto vázio que o script se encarregará do resto', red)\n\n conta = raw_input('Digite o nome do projeto: ')\n\n local('echo \"clonando projeto %s\"' % bitbucket_repository)\n local('git clone {0} {1}{2}'.format(bitbucket_repository, folder_project_local, conta))\n local('cd {0}{1}'.format(folder_project_local, conta))\n local('mkvirtualenv {0}'.format(conta))\n local('setvirtualenvproject')\n local('pip install -r requirements.txt')\n local('rm -rf {0}{1}/.git'.format(folder_project_local, conta))\n local('rm -rf README.md')\n local('git init')\n local('git remote add origin [email protected]:{0}/{1}.git'.format(bitbucket_user, conta))",
"def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)",
"def do_project_show(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n _, project = cs.projects.get(id)\n utils.print_dict(project)",
"def update_project(self, project_id, project):\n\n with self._transaction.cursor() as cur:\n # ensure this project exists\n cur.execute(\n \"SELECT project_id \"\n \"FROM barcodes.project \"\n \"WHERE project_id=%s;\",\n (project_id,))\n\n row = cur.fetchone()\n if row is None:\n raise NotFound(\"No project with ID %s\" % project_id)\n\n query = f\"\"\"\n UPDATE barcodes.project\n SET {p.DB_PROJ_NAME_KEY}=%s,\n {p.SUBPROJECT_NAME_KEY}=%s,\n {p.ALIAS_KEY}=%s,\n {p.IS_MICROSETTA_KEY}=%s,\n {p.SPONSOR_KEY}=%s,\n {p.COORDINATION_KEY}=%s,\n {p.CONTACT_NAME_KEY}=%s,\n {p.ADDTL_CONTACT_NAME_KEY}=%s,\n {p.CONTACT_EMAIL_KEY}=%s,\n {p.DEADLINES_KEY}=%s,\n {p.NUM_SUBJECTS_KEY}=%s,\n {p.NUM_TIMEPOINTS_KEY}=%s,\n {p.START_DATE_KEY}=%s,\n {p.BANK_SAMPLES_KEY}=%s,\n {p.PLATING_START_DATE_KEY}=%s,\n {p.DISPOSITION_COMMENTS_KEY}=%s,\n {p.COLLECTION_KEY}=%s,\n {p.IS_FECAL_KEY}=%s,\n {p.IS_SALIVA_KEY}=%s,\n {p.IS_SKIN_KEY}=%s,\n {p.IS_BLOOD_KEY}=%s,\n {p.IS_OTHER_KEY}=%s,\n {p.DO_16S_KEY}=%s,\n {p.DO_SHALLOW_SHOTGUN_KEY}=%s,\n {p.DO_SHOTGUN_KEY}=%s,\n {p.DO_RT_QPCR_KEY}=%s,\n {p.DO_SEROLOGY_KEY}=%s,\n {p.DO_METATRANSCRIPTOMICS_KEY}=%s,\n {p.DO_MASS_SPEC_KEY}=%s,\n {p.MASS_SPEC_COMMENTS_KEY}=%s,\n {p.MASS_SPEC_CONTACT_NAME_KEY}=%s,\n {p.MASS_SPEC_CONTACT_EMAIL_KEY}=%s,\n {p.DO_OTHER_KEY}=%s,\n {p.BRANDING_ASSOC_INSTRUCTIONS_KEY}=%s,\n {p.BRANDING_STATUS_KEY}=%s\n WHERE project_id=%s;\"\"\"\n\n cur.execute(query,\n (\n project.project_name,\n project.subproject_name,\n project.alias,\n project.is_microsetta,\n project.sponsor,\n project.coordination,\n project.contact_name,\n project.additional_contact_name,\n project.contact_email,\n project.deadlines,\n project.num_subjects,\n project.num_timepoints,\n project.start_date,\n project.bank_samples,\n project.plating_start_date,\n project.disposition_comments,\n project.collection,\n project.is_fecal,\n project.is_saliva,\n project.is_skin,\n project.is_blood,\n project.is_other,\n project.do_16s,\n project.do_shallow_shotgun,\n project.do_shotgun,\n project.do_rt_qpcr,\n project.do_serology,\n project.do_metatranscriptomics,\n project.do_mass_spec,\n project.mass_spec_comments,\n project.mass_spec_contact_name,\n project.mass_spec_contact_email,\n project.do_other,\n project.branding_associated_instructions,\n project.branding_status,\n project_id\n ))\n return cur.rowcount == 1",
"def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html",
"def prompt_project(arguments):\r\n projects = Project.all()\r\n\r\n # Do not prompt -- and auto select the one project if a account only has one project\r\n if len(projects) == 1:\r\n return projects[0]\r\n\r\n if arguments['--project-index'] is not None:\r\n try:\r\n idx = int(arguments['--project-index']) - 1\r\n project = projects[idx]\r\n return project\r\n except:\r\n print 'Yikes, that did not work -- try again?'\r\n exit()\r\n\r\n while True:\r\n print \"Select a Project:\"\r\n for idx, project in enumerate(projects):\r\n print \"[{}] {}\".format(idx+1, project.name)\r\n s = raw_input('>> ')\r\n\r\n try:\r\n project = projects[int(s) - 1]\r\n except:\r\n print 'Hmmm, that did not work -- try again?'\r\n continue\r\n\r\n break\r\n\r\n return project",
"def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))",
"def delete_project(self, project_name):\n # type(project_name) == unicode\n project = self.db.get_project_by_name(project_name)\n if not project:\n print(u\"*** Error: The project '{}' was not found.\"\n \"\".format(project_name))\n return\n print('Caution! The related tracking will be deleted as well.{eol}'\n 'Do you really want to delete the project? [y/N] '\n ''.format(eol=os.linesep), end='')\n if not helpers.get_yes_no(default='n'):\n return\n self.db.delete_project_by_name(project_name)\n print(u\"The project '%s' has been deleted.\" % project_name)\n self.set_prompt()",
"def project_create(project):\n client.project.create(project)",
"def add_project(project):\n print('add_project: ' + str(project))\n try_insert_or_update(models.projects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n name=project['name'], path=project['name'], active=True, user_id=current_user.id)])\n return",
"def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)",
"def delete_project(request, project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n project = get_object_or_404(GameProject, pk=project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(GameProject, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('all_projects'))",
"def projectdetails(http_request, project_id=0):\n\tp = get_object_or_404(Project, pk=project_id)\n\treturn render_to_response('project_detail.html', {'project': p})",
"def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def project(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"project\")",
"def delete_project(arn=None):\n pass",
"def project_command(\n self,\n id: Optional[ProjectCommandID] = None,\n ) -> ProjectCommand:\n _args = [\n Arg(\"id\", id, None),\n ]\n _ctx = self._select(\"projectCommand\", _args)\n return ProjectCommand(_ctx)"
] | [
"0.70812523",
"0.67117447",
"0.67117447",
"0.67117447",
"0.65857023",
"0.6532705",
"0.65131515",
"0.6459868",
"0.64476234",
"0.63825434",
"0.63591444",
"0.63481617",
"0.63410133",
"0.6323259",
"0.6273819",
"0.6262763",
"0.6189054",
"0.61842257",
"0.6142908",
"0.6128155",
"0.6077349",
"0.6031112",
"0.6027237",
"0.6019211",
"0.59988934",
"0.59939903",
"0.59832585",
"0.59747773",
"0.59576267",
"0.594457"
] | 0.7934942 | 0 |
Command task task's related commands. Usage task | | Description The command displays general info, updates or deletes a task. | def do_task(self, arg):
def _usage():
self.do_help('task')
args = arg.split()
if not len(args):
print(self.error_wrong_parameters)
return
commands = ['delete', 'update']
first_arg = args[0].lower()
if first_arg not in commands:
# Display the task info
self.display_task_info(first_arg.decode('utf-8'))
return
if len(args) == 1:
print("*** Error: The task is not specified.")
return
if first_arg == 'update':
self.update_task(args[1].decode('utf-8'))
self.set_prompt()
elif first_arg == 'delete':
self.delete_task(args[1].decode('utf-8')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def command(task_id, tail, wip, limit):\n if task_id:\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n tasks = [task]\n else:\n tasks = storage.all(limit=limit, reverse=tail, wip=wip)\n\n print_header()\n for task in tasks:\n show_task(task)",
"def usage(self, task, verbose=False):\n print(\"GBTIDL> this command is deprecated, just use the ipython methods\")\n # if task is a string, find the function name\n _task = task\n help(_task)",
"def help_command(self, sender, *args):\n helptext = '\\n'.join([cmd.__doc__ or \"%s - no docstring\" % cmd.__name__ for _, cmd in self.patterns])\n return \"\"\"`text` - new task \\n%s\n \"\"\" % helptext",
"def help(ctx):\n print(\"\"\" Usage invoke <task>\nTasks:\n build:\n build an artifact. NOTE - do not put spaces in the author name\n release:\n mark a Release Candidate or a Hotfix as released to production\n usage: invoke release <release name>\n \"\"\")",
"def command(task_id, message, time, project, category, links):\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n new_values = {\n 'message': message,\n 'time': time,\n 'project': project,\n 'category': category,\n 'links': links,\n }\n\n fields_changed = task.edit(new_values)\n\n if not fields_changed:\n click.echo(f\"No changes made to the task {task.id}.\")\n sys.exit(1)\n\n storage.save(task)\n\n fields_name = [field_name for field_name, *_ in fields_changed]\n click.echo(\n f\"The task {task_id} was edited with success. \"\n f\"Fields changed: {fields_name}\"\n )",
"def __call__(self, argv, help):\n parser = argparse.ArgumentParser(\n prog=\"%s do\" % self.ctrl.progname,\n description=help)\n parser.add_argument(\"instance\", nargs=1,\n metavar=\"instance\",\n help=\"Name of the instance from the config.\",\n type=str,\n choices=self.get_completion())\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"task\", nargs='?',\n help=\"The task to run.\")\n group.add_argument(\"-l\", \"--list\",\n action='store_true',\n help=\"List available tasks.\")\n parser.add_argument(\"task_args\",\n metavar=\"arg|key=value\", nargs=\"*\",\n help=\"Arguments for the task.\")\n args = parser.parse_args(argv)\n\n instance = self.ctrl.instances[args.instance[0]]\n if args.list:\n print(\"Available commands:\")\n print()\n with callables(instance) as tasks:\n for name in sorted(tasks):\n print(\" %s\" % name)\n return\n task_args = []\n task_kwargs = {}\n for arg in args.task_args:\n parts = arg.split('=', 1)\n if len(parts) == 1:\n task_args.append(arg)\n else:\n task_kwargs[parts[0]] = parts[1]\n instance.do(args.task, *task_args, **task_kwargs)",
"def task():\n\n\tprint('Example task executed.')",
"def help(cls, entry: \"TaskEntry\"):\n executor = entry.executor\n\n if cls.__doc__:\n out = fmt.FormatList(executor)\n out.add(fmt.Header(f\"Help: {cls.name}\"))\n out.add(fmt.Line(cls.__doc__))\n out.add(fmt.Footer())\n executor.send(out)\n else:\n executor.msg(text=\"Help is not implemented for this command.\")",
"def change_task(self):\n sel_task = self.find_task()\n if sel_task is False:\n return\n\n # We have a valid task, let's change it.\n self.clear_screen()\n self.display_task(sel_task)\n print \"\\n'd': Mark this task done\"\n print \"'t': Change tags of this task\"\n print \"'x': Remove this task permanently (cannot be undone)\"\n print \"'c': Cancel and return to main menu.\"\n selection = None\n\n # Continue until user cancels\n while selection != 'c':\n selection = raw_input(\n \"Enter command for selected task > \").strip().lower()\n\n if selection == 'd':\n sel_task.mark_done(self.user)\n self.current_collection.archive()\n break\n\n if selection == 't':\n user_input = raw_input(\n \"Overwrite existing tags? y/n > \"\n ).strip().lower()\n if user_input in ('y', 'yes'):\n del sel_task.tags\n user_tags = raw_input(\n \"Enter new tags (comma separated) (optional). > \")\n sel_task.tags = [\n tag.strip() for tag in user_tags.split(',')]\n break\n\n if selection == 'x':\n if raw_input(\"Delete this task? y/n > \") in ('y', 'Y'):\n delete = self.current_collection.delete(sel_task)\n if delete:\n raw_input(\"Task deleted. Press Enter\")\n break\n else:\n raw_input(\"Task not deleted. Try again.\")\n continue\n else:\n print \"Please enter valid command.\"\n return",
"def run_task(self, cmd):\n # Not initialized here...must be overloaded from outside\n raise NotImplementedError()",
"def run_task(self, cmd):\n # Not initialized here...must be overloaded from outside\n raise NotImplementedError()",
"def edit_task():\n # get task label from user\n responses = accept_inputs([\"Task label\"])\n label = responses[\"Task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [label])\n if len(results) == 0:\n print(\"No task found with label '%s'.\" % label)\n return\n # the task exists, so ask the user for the new description\n responses = accept_inputs([\"New description\"])\n # update db\n query_no_results(\"update task set description = ? where label = ?\", [responses[\"New description\"], label])\n print(\"Task with label '%s' updated.\" % label)",
"def display_task(self, task):\n # Visual check for completed tasks\n checked = \" \"\n if task.done is True:\n checked = \"X\"\n # print a formated task\n print \"[{0}] {1}\\n*Tags* | {2} |\\n\".format(\n checked, task._entry, ' | '.join(task.tags))",
"def _run_system(task):\n\n cmd = task.task.format(*task.get_args(), **task.get_kwargs())\n\n print(\"Running: {}\".format(cmd))\n os.system(cmd)",
"def task(self, name):\n pass",
"def delete(self, task=None):\n if task is None:\n print(\"\\n*** Delete Task ***\\n\\nSelect a task index to delete:\")\n self.show()\n while 1:\n try:\n i = int(input(\"\\nIndex? (0 to cancel): \")) - 1\n if i >= 0:\n print(\"Deleted task \\\"\" + self.tasks.pop(i).name + \"\\\".\")\n self.save()\n elif i == -1:\n print(\"Deletion canceled. \")\n else:\n raise IndexError\n break\n except (ValueError, IndexError) as e:\n print(\"\\n\\\"\" + str(i+1) + \"\\\" is not a valid task index.\", type(e))\n print(\"*\"*19)\n else:\n pass",
"def help(self):\n return {\n 'text': 'Available Commands: \\n `/ranti my-task e.g /ranti my-task` \\n To get task assigned to you.\\n'\n ' \\n `/ranti show-task [date]{dth-month-year} e.g /ranti show-task 5th-june-2018` \\n Show all tasks for a particular date \\n'\n '\\n `/ranti show-task [today] e.g /ranti show-task today` \\n Show all tasks for today \\n'\n '\\n `/ranti show-task [tomorrow] e.g /ranti show-task tomorrow` \\n Show all tasks for tomorrow \\n'\n '\\n `/ranti help` \\n This help information \\n \\n Ranti ver: 1.0'\n }",
"def command():\n pass",
"def doTask(self, *args):\n taskId = self.task.get()\n document = self.document_uuid.get()\n visitor = self.visitor_uuid.get()\n self.output.set(str(self.taskEx.executeTask(visitor, document, taskId)))",
"def view_tasks(self):\n if self.db_link.get_num_tasks() > 0:\n self.print_tasks()\n else:\n self.display.print_error('You don\\'t have any tasks! Add a task by calling `python-todo -a`.')",
"def command_short():\n pass",
"def list_tasks(ctx):\n ctx.run(\"invoke --list\")",
"def CommandHelp(paser):\n\n\tprint \"\\n===============Commands List===============\\n\"\n\t\t\n\tprint \"NewProject - {}\".format(NewProject.__doc__)\n\tprint \"DelProject - {}\".format(DelProject.__doc__)\n\tprint \"ShareProject - {}\".format(ShareProject.__doc__)\n\tprint \"StopProject - {}\".format(StopProject.__doc__)\n\tprint \"Help - {}\".format(CommandHelp.__doc__)\n\tprint \"Exit - Finaliza la sesion en la terminal.\"",
"def airflow_commands():\n pass",
"def octopus_task(self, msg, args):\r\n self.tasks.send_task_by_id(msg, args)",
"def add_task(name, func, help, is_default=False):\n cmd = click.Command(name=name, callback=func, help=help)\n cli.add_command(cmd)\n\n if is_default:\n # Store all functions here without name.\n DEFAULT_TASKS_KEY.append(func)\n\n return cli",
"def delete_task(self, args):\n try:\n task = self.validate_alias(args)\n except ValueError, msg:\n print(msg)\n return\n print('Caution! The related tracking will be deleted as well.{eol}'\n 'Do you really want to delete the task? [y/N] '\n ''.format(eol=os.linesep))\n if not helpers.get_yes_no(default='n'):\n return\n self.db.delete_task(task['tid'])\n print(\"The task '%s' has been deleted .\" % args)\n self.set_prompt()",
"def view_task(self, task):\n self.layout.clear_widgets()\n self.add_AdDescription(task.url, task.description)\n self.add_CheckBox(task.checkbox_rating)\n self.add_Slider(task.slider_rating)\n self.add_Toggle_Button(task.toggle_button_rating)\n self.layout.add_widget(TextInput(hint_text = 'Add a comment...', multiline = True))\n self.add_NextButton()\n self.add_Exit_Button()",
"def show_task(self, task_id):\n\n\t\ttask_id = self._validate_task_id(task_id)\n\t\tif task_id:\n\t\t\ttask = self.tasklist.find_task(task_id)\n\t\t\tif task:\n\t\t\t\tif task.priority == 'L':\n\t\t\t\t\tpriority = Fore.YELLOW + Style.BRIGHT + ' ' + task.priority + ' ' + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'M':\n\t\t\t\t\tpriority = Fore.BLUE + Style.BRIGHT + ' ' + task.priority + ' ' + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'H':\n\t\t\t\t\tpriority = Fore.RED + Style.BRIGHT + ' ' + task.priority + ' ' + Fore.RESET + Style.NORMAL\n\t\t\t\telse:\n\t\t\t\t\tpriority = ''\n\t\t\t\ttemplate = '{0:^3} {1:^3} {2:20} {3:40}'\n\t\t\t\tprint template.format('\\nID', ' Pri', 'Description', 'Note')\n\t\t\t\tprint template.format('---', '---', '--------------------',\n\t\t\t\t '----------------------------------------')\n\t\t\t\tprint template.format(task.id, priority, task.task, task.note)",
"def generate_tasks(self, task):"
] | [
"0.7051473",
"0.66475296",
"0.66164476",
"0.65424675",
"0.64694047",
"0.6421388",
"0.6408608",
"0.6323527",
"0.6208247",
"0.6188428",
"0.6188428",
"0.61856204",
"0.6156182",
"0.6142588",
"0.6068784",
"0.5998007",
"0.5959546",
"0.59573334",
"0.59241444",
"0.5922902",
"0.5920165",
"0.5901189",
"0.5888109",
"0.5881045",
"0.587005",
"0.5868731",
"0.5860865",
"0.5860801",
"0.586052",
"0.5849267"
] | 0.7619803 | 0 |
Command projects display a list of projects. Usage projects [] Description Displays a list of projects for a date piriod. The command displays last 10 projects unless the period is specified. Period parameter | def do_projects(self, arg):
args = shlex.split(arg)
limit = 10
from_date = to_date = ''
if args:
limit = 0
try:
from_date, to_date = helpers.parse_date_parameters(args)
except ValueError, msg:
print(msg)
return
projects = self.db.get_projects_with_activity_field(
from_date, to_date, limit=limit)
refined = map(lambda x: [
x['pid'], x['name'],
'[Active]' if x['active'] else '[closed]',
datetime.datetime.strftime(x['created'], '%c').decode('utf8'),
x['description']], projects)
print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',
'Description'])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects",
"def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)",
"def projects(args):\n _projects = lib.get_projects(\n args.target, username=args.username, password=args.password\n )\n if _projects:\n print(\"\\n\".join(_projects))",
"def list_namespaced_project(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_namespaced_project\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/projects'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ProjectList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def list_projects(self):\n data = self._run(\n url_path=\"projects/list\"\n )\n projects = data['result'].get('projects', [])\n return [self._project_formatter(item) for item in projects]",
"def list_projects(arn=None, nextToken=None):\n pass",
"def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")",
"def project_show(ctx, args):\n for project_id in args:\n data = ctx.obj.get_project_by_project_id(project_id)\n output_json_data(data)",
"def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )",
"def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)",
"def project_list(ctx, parent_project_id, output_format, columns):\n data = ctx.obj.get_projects(parent_project_id=parent_project_id)\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['project'])\n elif output_format == 'json':\n output_json_data(data)",
"def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]",
"def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()",
"def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)",
"def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]",
"def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()",
"def get_project_list():\n return parse_list_output(Popen(\n 'openstack project list'.split(), stdout=STDOUT, stderr=STDERR\n ).communicate()[0])",
"def project():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n\n menu = M(c=\"project\")(\n M(\"Projects\", f=\"project\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Locations\", f=\"location\")(\n M(\"Map\", m=\"map\"),\n M(\"Contacts\", f=\"location_contact\"),\n ),\n M(\"Reports\", f=\"location\", m=\"report\")(\n M(\"3W\", f=\"location\", m=\"report\"),\n M(\"Beneficiaries\", f=\"beneficiary\", m=\"report\"),\n #M(\"Indicators\", f=\"indicator\", m=\"report\",\n # check=indicators,\n # ),\n #M(\"Indicators over Time\", f=\"indicator\", m=\"timeplot\",\n # check=indicators,\n # ),\n M(\"Funding\", f=\"organisation\", m=\"report\"),\n ),\n M(\"Import\", f=\"project\", m=\"import\", p=\"create\", restrict=[ADMIN])(\n M(\"Import Projects\", m=\"import\", p=\"create\"),\n M(\"Import Project Organizations\", f=\"organisation\",\n m=\"import\", p=\"create\"),\n M(\"Import Project Communities\", f=\"location\",\n m=\"import\", p=\"create\"),\n ),\n M(\"Activity Types\", f=\"activity_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Beneficiary Types\", f=\"beneficiary_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Sectors\", f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Themes\", f=\"theme\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )\n\n return menu",
"def list(self, *args, **kwargs):\n projects = Project.objects.all()\n return self.list_by(projects, self.serializer_class)",
"def projects(self):\r\n return p.Projects(self)",
"def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li",
"def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))",
"def _page_projects(self):\n return self._open(self.app.page_projects)",
"def get_all_projects(engine): \n # Query db\n# sql = (\"SELECT a.project_id, \"\n# \" b.o_number, \"\n# \" a.project_name, \"\n# \" a.project_description \"\n# \"FROM nivadatabase.projects a, \"\n# \" nivadatabase.projects_o_numbers b \"\n# \"WHERE a.project_id = b.project_id \"\n# \"ORDER BY a.project_id\")\n sql = (\"SELECT project_id, \"\n \" project_name, \"\n \" project_description \"\n \"FROM nivadatabase.projects \"\n \"ORDER BY project_id\")\n df = pd.read_sql(sql, engine)\n\n return df",
"def get_projects():\n return Project.query.all()",
"def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]",
"def query_project(self, project_query_options):\n\n query = \"select * from project where \"\n row_names = [\"Proj_ID\", \"Cus_ID\", \"Emp_ID\", \"Proj_Date\",\n \"Proj_Descrpt\", \"Proj_EstDateSt\", \"Proj_EstDateEnd\",\n \"Proj_EstBudget\", \"Proj_ActDateSt\",\n \"Proj_ActDateEnd\", \"Proj_ActCost\"]\n\n entries = project_query_options\n options_index = []\n arguments = []\n\n index = 0\n for item in entries:\n if item is not None:\n arguments.append(item)\n options_index.append(index)\n index += 1\n\n count = 0\n for arg in arguments:\n if count == 0:\n query = query + \"{}='{}' \".format(\n row_names[options_index[count]],\n arg)\n else:\n query = query + \"and {}='{}' \".format(\n row_names[options_index[count]],\n arg)\n count += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)",
"def test_list_project(self):\n pass",
"def project():"
] | [
"0.6390584",
"0.6339536",
"0.63156116",
"0.625599",
"0.62274384",
"0.61688644",
"0.61150175",
"0.60160017",
"0.60131425",
"0.5933992",
"0.59230304",
"0.5894313",
"0.5874425",
"0.5789164",
"0.57819515",
"0.5771557",
"0.57586426",
"0.5741603",
"0.5733803",
"0.5732437",
"0.5715285",
"0.56815773",
"0.5677081",
"0.5663899",
"0.56562185",
"0.5642602",
"0.5597576",
"0.5571438",
"0.55640817",
"0.5558524"
] | 0.71809304 | 0 |
Command tasks display a list of tasks. Usage tasks [] Description Displays a list of tasks for a date piriod. The command displays last 10 tasks unless the period is specified. Period parameter | def do_tasks(self, arg):
args = shlex.split(arg)
if not args:
# TODAY
started = datetime.date.fromtimestamp(0)
finished = datetime.date.today()
limit = 10
else:
limit = 0
try:
started, finished = helpers.parse_date_parameters(args)
except ValueError, err:
print(err)
return
tasks = self.db.get_profiled_tasks(started, finished, limit)
def _display_fields(task):
return [
task['tid'],
u'{task}#{project}'.format(
task=task['tname'], project=task['pname']),
u'{delta} / {started}'.format(
delta=helpers.timedelta_to_human(datetime.datetime.now() -
task['started']),
started=datetime.datetime.strftime(
task['started'], '%c').decode('utf8')
) if not task['finished'] else '[closed]',
task['description'].decode('utf8')
]
refined = map(_display_fields, tasks)
print(tabulate(refined, ['ID', 'Task', 'Activity', 'Description'])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_tasks(self, tasks=None, date_format=None):\n\n\t\tif not tasks:\n\t\t\ttasks = self.tasklist.tasks\n\n\t\tif len(tasks) > 0:\n\n\t\t\ttemplate = '{0:^3} {1:20} {2:^3} {3:20} {4:15} {5:20}'\n\t\t\tprint template.format('\\nID', 'Description', ' Pri', 'Due', 'Created', 'Tags')\n\t\t\tprint template.format('---', '--------------------', '---', '--------------------', '---------------',\n\t\t\t '--------------------')\n\t\t\tfor task in tasks:\n\t\t\t\tif task.priority == 'L':\n\t\t\t\t\tpriority = Fore.YELLOW + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'M':\n\t\t\t\t\tpriority = Fore.BLUE + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telif task.priority == 'H':\n\t\t\t\t\tpriority = Fore.RED + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telse:\n\t\t\t\t\tpriority = ''\n\n\t\t\t\tif task.due_date is None:\n\t\t\t\t\tdue_date = ''\n\t\t\t\telse:\n\t\t\t\t\tif date_format:\n\t\t\t\t\t\tdue_date = task.due_date.rsplit(' ', 1)[0].ljust(20)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdue_date = (arrow.get(task.due_date, task.due_date_format).humanize()).ljust(20)\n\n\t\t\t\t\tif not task.completed:\n\t\t\t\t\t\ttoday = arrow.now()\n\t\t\t\t\t\tdiff = arrow.get(task.due_date, task.due_date_format) - today\n\t\t\t\t\t\tif diff.days >= 1 and diff.seconds > 0:\n\t\t\t\t\t\t\tdue_date = Fore.CYAN + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL\n\t\t\t\t\t\telif diff.days >= 0:\n\t\t\t\t\t\t\tdue_date = Fore.BLUE + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL\n\t\t\t\t\t\telif diff.days <= 0:\n\t\t\t\t\t\t\tdue_date = Fore.RED + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL\n\n\t\t\t\tif date_format:\n\t\t\t\t\tage = (str(task.creation_date).split()[0]).ljust(15) # drop the time zone\n\t\t\t\telse:\n\t\t\t\t\tage = (arrow.get(task.creation_date, 'MM/DD/YYYY h:mm:ss A ZZ').humanize()).ljust(15)\n\n\t\t\t\tif task.note:\n\t\t\t\t\tdesc = task.task + ' *'\n\t\t\t\telse:\n\t\t\t\t\tdesc = task.task\n\n\t\t\t\tif task.completed:\n\t\t\t\t\tif task.priority:\n\t\t\t\t\t\tpriority = task.priority\n\t\t\t\t\telse:\n\t\t\t\t\t\tpriority = ''\n\t\t\t\t\ttask_id = Fore.WHITE + Style.BRIGHT + Back.WHITE + str(task.id).center(3)\n\t\t\t\t\ttags = str(task.tags) + Fore.RESET + Style.NORMAL + Back.RESET\n\t\t\t\t\tprint template.format(task_id, desc, priority, due_date, age, tags)\n\t\t\t\telse:\n\t\t\t\t\tprint template.format(task.id, desc, priority, due_date, age, task.tags)\n\n\t\t\tprint self.legend\n\t\telse:\n\t\t\tprint('\\nThere are no tasks to display!\\n')",
"def command(task_id, tail, wip, limit):\n if task_id:\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n tasks = [task]\n else:\n tasks = storage.all(limit=limit, reverse=tail, wip=wip)\n\n print_header()\n for task in tasks:\n show_task(task)",
"def show_all_tasks(self):\n tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}. {task.deadline.strftime(\"%d %b\")}')\n else:\n print('Nothing to do!')\n print()",
"def show_tasks():\n\n task = Task(connection=connection, cursor=cursor)\n\n all_tasks = task.get_all_tasks()\n\n context = {\n 'all_tasks': all_tasks\n }\n\n return render_template('pages/tables/tasks.html', **context)",
"def print_tasks(self):\n unformatted_rows = self.db_link.get_tasks()\n formatted_rows = self.display.format_row(unformatted_rows)\n self.display.print_task_list_formatted(formatted_rows)",
"def print_tasks(self, tasks):\n\n index = 0\n\n while True:\n self.print_task(index, tasks)\n\n self.paging_options(index, tasks)\n\n user_choice = input(\"\\nPlease select one of the paging options: \").lower().strip()\n\n if index == 0 and user_choice == 'n':\n index += 1\n elif 0 < index < len(tasks) - 1 and user_choice == 'n':\n index += 1\n elif 0 < index <= len(tasks) - 1 and user_choice == 'p':\n index -= 1\n elif user_choice == 'e':\n self.edit_task(index, tasks)\n elif user_choice == 'd':\n self.task.delete_task(tasks[index])\n self.main_menu()\n elif user_choice == 'm':\n self.main_menu()\n else:\n input(\"\\nInvalid choice, please try again :\")",
"def list_tasks(ctx):\n ctx.run(\"invoke --list\")",
"def show_weeks_tasks(self):\n for day in [datetime.today() + timedelta(days=i) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print(f'{day.strftime(\"%A\")} {day.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()",
"def list_tasks(self, toplevel=True, keys=None):\n\n message = {}\n # show all tasks by default\n if keys == None:\n keys = self.list_task_keys()\n\n for key in keys:\n try:\n last_run_instance = TaskInstance.objects.filter(task_key=key).exclude(completed=None).order_by('-completed').values_list('completed','task_key')[0]\n last_run = time.mktime(last_run_instance[0].timetuple())\n #no instances\n except (KeyError, IndexError):\n last_run = None\n\n # render the form if the task has one\n task = self.registry[key, None].tasks[key]\n if task.form:\n t = loader.get_template('task_parameter_form.html')\n c = Context ({'form':task.form()})\n rendered_form = t.render(c)\n else:\n rendered_form = None\n\n message[key] = {'description':task.description ,\n 'last_run':last_run,\n 'form':rendered_form}\n\n return message",
"def show(self):\n i = 0\n print()\n for task in self.tasks:\n print(\"\\t\", i + 1, \". \", task.name, \"(\", task.priority, \")\")\n i += 1",
"def getTasks():\n\ttasks = open(\"todo.txt\").readlines()\n\tif len(tasks):\n\t for num in range(len(tasks) - 1, -1, -1):\n\t print(\"[%d] %s\" % (num + 1, tasks[num]), end=\"\")\n\telse:\n\t print(\"There are no pending todos!\")",
"def view_tasks(self):\n if self.db_link.get_num_tasks() > 0:\n self.print_tasks()\n else:\n self.display.print_error('You don\\'t have any tasks! Add a task by calling `python-todo -a`.')",
"def show_tasks():\n top_level_tasks = query_with_results(\"select label, description from task where parent = ''\", [])\n for task in top_level_tasks:\n _show_task(task)",
"def main(to_be_scheduled):\n\n tasks = order_by_ftime(to_be_scheduled)\n print select_activity(tasks)",
"def show_today_tasks(self):\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline == today.strftime('%Y-%m-%d')).all()\n print(f'Today {today.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()",
"def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)",
"def view_tasks():\n task_list = []\n incomplete_task_list = Tasks.objects.filter(is_complete=False)\n for task in incomplete_task_list:\n tasks = [] #create data structure\n tasks.append(task.id) #add ID \n tasks.append(task.task_text) #add text\n task_list.append(tasks) #append data structure\n\n return task_list",
"async def list_tasks():",
"def list_tasks(q = None):\n to = {\"p\":{}, \"v\":{}}\n for k, v in to.items():\n pin = HeaterController.pin_ids[k]\n state = subprocess.check_output([\"gpio\", 'read', pin]).strip()\n to[k][\"state\"] = \"on\" if state==\"0\" else \"off\"\n to[k][\"on_id\"] = \"\"\n to[k][\"on_time\"] = \"\"\n to[k][\"off_id\"] = \"\"\n to[k][\"off_time\"] = \"\"\n\n tasks = []\n if q is None:\n output = subprocess.check_output([\"atq\"])\n else:\n output = subprocess.check_output([\"atq\", \"-q\", q])\n for t in output.split(\"\\n\"):\n m = HeaterController.task_parse.match(t.strip())\n if m is not None:\n task_id = m.group(1)\n task_time = datetime.strptime(m.group(2), r'%a %b %d %H:%M:%S %Y').strftime(r'%y%m%d%H%M')\n q_name = m.group(3)\n tasks.append((task_id, task_time, q_name))\n tasks = sorted(tasks, key=lambda x: x[2] + x[1])\n while len(tasks):\n task_id, task_time, q_name = tasks.pop(0)\n output = subprocess.check_output([\"at\", \"-c\", task_id])\n # get last line of the output\n lines = output.strip().split(\"\\n\")\n # find value of -o parameter that specifies operation\n m = HeaterController.cmd_parse.match(lines[-1].strip())\n if m is not None:\n cmd = m.group(1)\n if cmd == r'on':\n to[q_name][\"on_id\"] = task_id\n to[q_name][\"on_time\"] = task_time\n elif cmd == r'off':\n to[q_name][\"off_id\"] = task_id\n to[q_name][\"off_time\"] = task_time\n else:\n assert False, \"Unexpected value of -o parameter: {}\".format(cmd)\n\n return {\"tasks\":to}",
"def show_tasks(self):\n task_ids = [\n t and t['id'] for t in self.controller.selected_tasks\n ]\n\n if self._check_cluster():\n self.print_list(\n ('id', 'status'), self.controller.get_tasks(),\n lambda x: task_ids.index(x['id'])\n )",
"def show_tasks_by_priority(self, tasks=None, date_format=None):\n\n\t\tlow_dict_o = OrderedDict()\n\t\tmed_dict_o = OrderedDict()\n\t\thigh_dict_o = OrderedDict()\n\t\tno_dict_o = OrderedDict()\n\t\tcompleted_dict_o = OrderedDict()\n\n\t\tlow_dict = {}\n\t\tmed_dict = {}\n\t\thigh_dict = {}\n\t\tno_dict = {}\n\t\tcompleted_dict = {}\n\n\t\ttemp_dict = {}\n\n\t\tif not tasks:\n\t\t\ttasks = self.tasklist.tasks\n\n\t\tif len(tasks) > 0:\n\t\t\tfor task in tasks:\n\t\t\t\tif task.due_date is None:\n\t\t\t\t\tdue_date = ''\n\t\t\t\telse:\n\t\t\t\t\tif date_format:\n\t\t\t\t\t\tdue_date = task.due_date.rsplit(' ', 1)[0].ljust(20)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdue_date = (arrow.get(task.due_date, task.due_date_format).humanize()).ljust(20)\n\n\t\t\t\tage = (str(task.creation_date).split()[0]).ljust(15) # drop the time zone\n\n\t\t\t\tif task.note:\n\t\t\t\t\tdesc = task.task + ' *'\n\t\t\t\telse:\n\t\t\t\t\tdesc = task.task\n\n\t\t\t\tif task.completed:\n\t\t\t\t\tcompleted_dict[task.id] = task.priority, due_date, age, desc, task.tags\n\t\t\t\telif task.priority == 'L':\n\t\t\t\t\tlow_dict[task.id] = [task.priority, due_date, age, desc, task.tags]\n\t\t\t\telif task.priority == 'M':\n\t\t\t\t\tmed_dict[task.id] = [task.priority, due_date, age, desc, task.tags]\n\t\t\t\telif task.priority == 'H':\n\t\t\t\t\thigh_dict[task.id] = [task.priority, due_date, age, desc, task.tags]\n\t\t\t\telse:\n\t\t\t\t\tno_dict[task.id] = [task.priority, due_date, age, desc, task.tags]\n\n\t\telse:\n\t\t\tprint('\\nThere are no tasks to display!\\n')\n\t\t\treturn\n\n\t\tfor key, value in sorted(no_dict.items(), key=lambda e: e[1][1]):\n\t\t\tif value[1] is not '':\n\t\t\t\tno_dict_o[key] = value\n\t\t\telse:\n\t\t\t\ttemp_dict[key] = value\n\n\t\tfor key in temp_dict:\n\t\t\tno_dict_o[key] = temp_dict[key]\n\n\t\ttemp_dict.clear()\n\n\t\tfor key, value in sorted(low_dict.items(), key=lambda e: e[1][1]):\n\t\t\tif value[1] is not '':\n\t\t\t\tlow_dict_o[key] = value\n\t\t\telse:\n\t\t\t\ttemp_dict[key] = value\n\n\t\tfor key, value in temp_dict.items():\n\t\t\tlow_dict_o[key] = value\n\n\t\ttemp_dict.clear()\n\n\t\tfor key, value in sorted(med_dict.items(), key=lambda e: e[1][1]):\n\t\t\tif value[1] is not '':\n\t\t\t\tmed_dict_o[key] = value\n\t\t\telse:\n\t\t\t\ttemp_dict[key] = value\n\n\t\tfor key, value in temp_dict.items():\n\t\t\tmed_dict_o[key] = value\n\n\t\ttemp_dict.clear()\n\n\t\tfor key, value in sorted(high_dict.items(), key=lambda e: e[1][1]):\n\t\t\tif value[1] is not '':\n\t\t\t\thigh_dict_o[key] = value\n\t\t\telse:\n\t\t\t\ttemp_dict[key] = value\n\n\t\tfor key, value in sorted(temp_dict.items(), key=lambda e: e[1][1]):\n\t\t\thigh_dict_o[key] = value\n\n\t\ttemp_dict.clear()\n\n\t\tfor key, value in sorted(completed_dict.items(), key=lambda e: e[1][1]):\n\t\t\tif value[1] is not '':\n\t\t\t\tcompleted_dict_o[key] = value\n\t\t\telse:\n\t\t\t\ttemp_dict[key] = value\n\n\t\tfor key, value in temp_dict.items():\n\t\t\tcompleted_dict_o[key] = value\n\n\t\ttemp_dict.clear()\n\n\t\tdel low_dict\n\t\tdel med_dict\n\t\tdel high_dict\n\t\tdel no_dict\n\t\tdel completed_dict\n\n\t\ttoday = arrow.now()\n\n# TODO: Figure out why the key is a tuple instead of a list\n\n\t\tfor dict in [low_dict_o, med_dict_o, high_dict_o, no_dict_o]:\n\t\t\tfor key, value in dict.items():\n\t\t\t\tdict[key] = list(dict[key]) # hack - how is this key a tuple!?!\n\t\t\t\tif value[0] == 'L':\n\t\t\t\t\tdict[key][0] = Fore.YELLOW + Style.BRIGHT + value[0].center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telif value[0] == 'M':\n\t\t\t\t\tdict[key][0] = Fore.BLUE + Style.BRIGHT + value[0].center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telif value[0] == 'H':\n\t\t\t\t\tdict[key][0] = Fore.RED + Style.BRIGHT + value[0].center(3) + Fore.RESET + Style.NORMAL\n\t\t\t\telse:\n\t\t\t\t\tdict[key][0] = ''\n\n\t\t\t\ttask = self.tasklist.find_task(key)\n\t\t\t\tif task.due_date:\n\t\t\t\t\tdiff = arrow.get(task.due_date, task.due_date_format) - today\n\t\t\t\t\tif diff.days >= 1 and diff.seconds > 0:\n\t\t\t\t\t\tdict[key][1] = Fore.CYAN + Style.BRIGHT + value[1] + Fore.RESET + Style.NORMAL\n\t\t\t\t\telif diff.days >= 0:\n\t\t\t\t\t\tdict[key][1] = Fore.BLUE + Style.BRIGHT + value[1] + Fore.RESET + Style.NORMAL\n\t\t\t\t\telif diff.days <= 0:\n\t\t\t\t\t\tdict[key][1] = Fore.RED + Style.BRIGHT + value[1] + Fore.RESET + Style.NORMAL\n\n\t\ttemplate = '{0:^3} {1:20} {2:^3} {3:20} {4:15} {5:20}'\n\t\tprint template.format('\\nPri', 'Description', 'ID', 'Due', 'Created', 'Tags')\n\t\tprint template.format('---', '--------------------', '---', '--------------------', '---------------',\n\t\t '--------------------')\n\n\t\tif len(high_dict_o) > 0:\n\t\t\tfor key in high_dict_o:\n\t\t\t\tprint template.format(high_dict_o[key][0], high_dict_o[key][3], key, high_dict_o[key][1],\n\t\t\t\t high_dict_o[key][2], high_dict_o[key][4])\n\t\tif len(med_dict_o) > 0:\n\t\t\tfor key in med_dict_o:\n\t\t\t\tprint template.format(med_dict_o[key][0], med_dict_o[key][3], key, med_dict_o[key][1],\n\t\t\t\t med_dict_o[key][2], med_dict_o[key][4])\n\t\tif len(low_dict_o) > 0:\n\t\t\tfor key in low_dict_o:\n\t\t\t\tprint template.format(low_dict_o[key][0], low_dict_o[key][3], key, low_dict_o[key][1],\n\t\t\t\t low_dict_o[key][2], low_dict_o[key][4])\n\t\tif len(no_dict_o) > 0:\n\t\t\tfor key in no_dict_o:\n\t\t\t\tprint template.format(no_dict_o[key][0], no_dict_o[key][3], key, no_dict_o[key][1],\n\t\t\t\t no_dict_o[key][2], no_dict_o[key][4])\n\n\t\tcompleted_template = Fore.WHITE + Style.BRIGHT + Back.WHITE + '{0:^3} {1:20} {2:^3} {3:20} {4:15} {5:20}' + \\\n\t\t Fore.RESET + Style.NORMAL + Back.RESET\n\t\tif len(completed_dict_o) > 0:\n\t\t\tfor key in completed_dict_o:\n\t\t\t\tif completed_dict_o[key][0]:\n\t\t\t\t\tpriority = completed_dict_o[key][0]\n\t\t\t\telse:\n\t\t\t\t\tpriority = ''\n\t\t\t\tprint completed_template.format(priority, completed_dict_o[key][3], key, completed_dict_o[key][1],\n\t\t\t\t completed_dict_o[key][2], completed_dict_o[key][4])\n\t\tprint self.legend",
"def task_list(request):\n ip = get_ip(request)\n tasks = Task.objects.filter(ip=ip).order_by(\"-start_time\")\n # pager\n paginator = Paginator(tasks, 15)\n num_pages = paginator.num_pages\n page_list = paginator.page_range\n page_number = request.GET.get(\"page\", 1)\n page_obj = paginator.get_page(page_number)\n\n current_page = page_obj.number\n display_page_list = []\n if len(page_list) <= MAX_PAGE_NUM:\n for i in page_list:\n display_page_list.append((i, f\"?page={i}\"))\n else:\n if current_page <= num_pages - MAX_PAGE_NUM:\n for i in range(current_page, current_page + 4):\n display_page_list.append((i, f\"?page={i}\"))\n display_page_list.append((\"...\", \"#\"))\n for i in range(1, 0 - 1, -1):\n t = num_pages - i\n display_page_list.append((t, f\"?page={t}\"))\n pass\n else:\n for i in range(num_pages - MAX_PAGE_NUM, num_pages + 1):\n display_page_list.append((i, f\"?page={i}\"))\n\n return render(request, \"ponsol2web/task_list.html\",\n {\"count\": num_pages, \"page_obj\": page_obj, \"page_list\": display_page_list})",
"def list(ctx, id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n if id != None:\n return ctx.invoke(show, id=id, json=json)\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.list()\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"Fail: error response\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n try:\n task.print_list(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))",
"def human_firendly_print_repository_scheduled_tasks(scheduled):\n name_pad = 5\n for name in scheduled:\n if len(name) > name_pad:\n name_pad = len(name)\n name_pad += 1\n\n header = f'{\"Name\":<{name_pad}}| Task type | Next run'\n print('Scheduled tasks:')\n print(header)\n print('-' * (len(header) + 5))\n\n for task in scheduled.values():\n print(f'{task[\"name\"]:<{name_pad}}| {task[\"task_type\"].title():<10}| {task[\"next_run\"]}')",
"def see_tasks(self, widget):\n my_task_list = tasklistwindow.TaskListWindow(self.task_list)",
"def getTasks(server, appId, maxNumberTasks, completedOnly, oper = 0, fileName = 'data/jsonTasksInfo.dat'):\n if oper == 0:\n if completedOnly == 1:\n JSONdata = urllib2.urlopen(url=server+\"/api/task?app_id=\"+ \\\n str(appId)+\"&state=completed&limit=\"+ \\\n str(maxNumberTasks)).read()\n else:\n JSONdata = urllib2.urlopen(url=server+\"/api/task?app_id=\"+ \\\n str(appId)+\"&limit=\"+str(maxNumberTasks)).read()\n data = json.loads(JSONdata)\n with open(fileName,'w') as outfile:\n json.dump(data, outfile)\n outfile.close()\n elif oper == 1:\n with open(fileName,'r') as outfile:\n data = json.load(outfile)\n outfile.close()\n numberTasks = len(data)\n tasksInfo = []\n for item in range(numberTasks):\n tasksInfo.append({'taskId':data[item]['id'], \\\n 'area':data[item]['info']['tile']['restrictedExtent']})\n print 'number of total completed tasks: ', len(tasksInfo)\n return tasksInfo",
"def human_friendly_print_running_tasks(one_off, scheduled):\n all_vals = []\n name_pad = 5\n if one_off:\n for name in one_off:\n if len(name) > name_pad:\n name_pad = len(name)\n all_vals += one_off.values()\n\n if scheduled:\n for name in scheduled:\n if len(name) > name_pad:\n name_pad = len(name)\n all_vals += scheduled.values()\n\n name_pad += 1\n\n header = f'{\"Name\":<{name_pad}}| Task type | Status | Start'\n print(header)\n print('-' * (len(header) + 5))\n for task in all_vals:\n print(f'{task[\"name\"]:<{name_pad}}| {task[\"type\"].title():<10}| {task[\"status\"]:<8} | {task[\"start\"]}')",
"def get_completed_tasks_in_tod():\n try:\n tod_file_data = load_data(os.getenv('TOD_FP'))\n except FileNotFoundError:\n return []\n completed_tasks = []\n tod_file_data = tod_file_data.split('\\n')\n\n for line in tod_file_data:\n if line == '' or line[0] != '[' or line[1] != 'X':\n continue\n completed_task = (f\"{line[4:-7]} {line[-6:]}\"\n if line[-6:] != '(0:00)'\n else line[4:-7])\n completed_tasks.append(completed_task)\n\n return completed_tasks",
"async def list_tasks(fields: Set[str] = None):\n tasks = celery_app.describe_tasks()\n tasks = [TaskOut(**task).dict(include=fields) for task in tasks]\n return tasks",
"async def list_tasks_periodically() -> None:\n while True:\n if sys.version_info >= (3, 8): # The task introspection API we use is not available before Python 3.8\n print(\n \"\\nRunning tasks:\\n\"\n + \"\\n\".join(f\"{i:4}: {t.get_coro()}\" for i, t in enumerate(asyncio.all_tasks())),\n file=sys.stderr,\n )\n else:\n print(f\"\\nRunning {len(asyncio.all_tasks())} tasks\")\n await asyncio.sleep(10)"
] | [
"0.7118641",
"0.65293884",
"0.6405276",
"0.6308683",
"0.62239516",
"0.6132152",
"0.61177206",
"0.61094075",
"0.6094835",
"0.6088848",
"0.6059728",
"0.6005066",
"0.5996612",
"0.5913682",
"0.59072435",
"0.5889187",
"0.5888562",
"0.58667386",
"0.5772023",
"0.570772",
"0.5666861",
"0.56510377",
"0.5645847",
"0.5626777",
"0.5615121",
"0.56005275",
"0.55841905",
"0.55724716",
"0.5524925",
"0.5519414"
] | 0.6915537 | 1 |
Command active display an active task Usage active Description Display an active task if there is one. | def do_active(self, arg):
task = self.db.get_active_task()
if not task:
print("There is not any active task yet.")
return
refined = [[
task['tid'],
'#'.join([task['tname'], task['pname']]),
datetime.datetime.strftime(task['started'], '%c').decode('utf8'),
helpers.seconds_to_human(
(datetime.datetime.now() - task['started']).total_seconds()),
task['description']
]]
print(tabulate(refined, ['ID', 'Task', 'Started at', 'Spent', 'Description'])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def usage(self, task, verbose=False):\n print(\"GBTIDL> this command is deprecated, just use the ipython methods\")\n # if task is a string, find the function name\n _task = task\n help(_task)",
"def display_task(self, task):\n # Visual check for completed tasks\n checked = \" \"\n if task.done is True:\n checked = \"X\"\n # print a formated task\n print \"[{0}] {1}\\n*Tags* | {2} |\\n\".format(\n checked, task._entry, ' | '.join(task.tags))",
"def print_usage(self):\n print((\"@brief Usage is not defined for command \" + self.command))",
"def usage():",
"def usage():",
"def execute_usage_command() -> MarkdownString:\n return execute_help_command(\"usage\")",
"def print_usage_command(self):\n print self.get_usage_command()",
"def print_usage_command(self):\n print self.get_usage_command()",
"def usage():\n pass",
"def help(ctx):\n print(\"\"\" Usage invoke <task>\nTasks:\n build:\n build an artifact. NOTE - do not put spaces in the author name\n release:\n mark a Release Candidate or a Hotfix as released to production\n usage: invoke release <release name>\n \"\"\")",
"def get_usage_command(self):\n return textwrap.fill(self.expand_prog_name(\"Type '%prog help' for usage information.\"), 78)",
"def usage():\n return _usage",
"def get_usage_command(self):\n return textwrap.fill(self.sbtools.parser.expand_prog_name(\"Type '%prog help %s' for usage.\") % (self.tool.get_command()), 78)",
"def showUsage():\n None",
"def help_help(self):\n print(\"List commands or print details about a command\")",
"def usage(self, subcommand):\r\n if len(self.option_list) > 0:\r\n usage = '%%prog %s [options] %s' % (subcommand, self.args)\r\n else:\r\n usage = '%%prog %s %s' % (subcommand, self.args)\r\n if self.help:\r\n return '%s\\n\\n%s' % (usage, self.help)\r\n else:\r\n return usage",
"def usage() :\n\n print usage.__doc__",
"def help(self):\n return {\n 'text': 'Available Commands: \\n `/ranti my-task e.g /ranti my-task` \\n To get task assigned to you.\\n'\n ' \\n `/ranti show-task [date]{dth-month-year} e.g /ranti show-task 5th-june-2018` \\n Show all tasks for a particular date \\n'\n '\\n `/ranti show-task [today] e.g /ranti show-task today` \\n Show all tasks for today \\n'\n '\\n `/ranti show-task [tomorrow] e.g /ranti show-task tomorrow` \\n Show all tasks for tomorrow \\n'\n '\\n `/ranti help` \\n This help information \\n \\n Ranti ver: 1.0'\n }",
"def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)",
"def usage(self, subcommand):\n if subcommand == self.run_alias:\n return self.usage_alias(subcommand)\n return \"\"\"\n ./manage.py alias\n ./manage.py alias name\n ./manage.py alias name[=value]\n ./manage.py unalias name\n\n Save commands and parameters as shortcuts.\n Aliases are stored in '%s' in the same directory of 'manage.py'.\n\n Aliases Usage:\n ./manage.py name\n \"\"\" % CFG_FILE",
"def show_task1(self):\n self._show_task(self.controller.CURRENT)",
"def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()",
"def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"",
"def usage(self, subcommand):\n usage = '%%prog %s [options] %s' % (subcommand, self.args)\n if self.help:\n return '%s\\n\\n%s' % (usage, self.help)\n else:\n return usage",
"def get_inactive_command_text(self):\n command = data_model.COMMAND_INACTIVE_HIT\n text = None\n if self.status == self.STATUS_DISCONNECT:\n text = (\n 'You disconnected in the middle of this HIT and were '\n 'marked as inactive. As these HITs often require real-'\n 'time interaction, it is no longer available for '\n 'completion. Please return this HIT and accept a new one '\n 'if you would like to try again.'\n )\n elif self.status == self.STATUS_DONE:\n command = data_model.COMMAND_INACTIVE_DONE\n text = (\n 'You disconnected after completing this HIT without '\n 'marking it as completed. Please press the done button '\n 'below to finish the HIT.'\n )\n elif self.status == self.STATUS_EXPIRED:\n text = (\n 'You disconnected in the middle of this HIT and the '\n 'HIT expired before you reconnected. It is no longer '\n 'available for completion. Please return this HIT and '\n 'accept a new one if you would like to try again.'\n )\n elif self.status == self.STATUS_PARTNER_DISCONNECT:\n command = data_model.COMMAND_INACTIVE_DONE\n text = (\n 'One of your partners disconnected in the middle of the '\n 'HIT. We won\\'t penalize you for their disconnect, so '\n 'please use the button below to mark the HIT as complete.'\n )\n elif self.status == self.STATUS_PARTNER_DISCONNECT_EARLY:\n command = data_model.COMMAND_INACTIVE_HIT\n text = (\n 'One of your partners disconnected in the middle of the '\n 'HIT. We won\\'t penalize you for their disconnect, but you'\n ' did not complete enough of the task to submit the HIT. '\n 'Please return this HIT and accept a new one if you would '\n 'like to try again.'\n )\n elif self.status == self.STATUS_RETURNED:\n text = (\n 'You disconnected from this HIT and then returned '\n 'it. As we have marked the HIT as returned, it is no '\n 'longer available for completion. Please accept a new '\n 'HIT if you would like to try again'\n )\n else:\n # We shouldn't be getting an inactive command for the other\n # states so consider this a server error\n text = (\n 'Our server was unable to handle your reconnect properly '\n 'and thus this HIT no longer seems available for '\n 'completion. Please try to connect again or return this '\n 'HIT and accept a new one.'\n )\n\n return text, command",
"def help_command(self, sender, *args):\n helptext = '\\n'.join([cmd.__doc__ or \"%s - no docstring\" % cmd.__name__ for _, cmd in self.patterns])\n return \"\"\"`text` - new task \\n%s\n \"\"\" % helptext",
"def command_short():\n pass",
"def __is_active(self, command):\n return True",
"def usage():\n with open(USAGE, 'r') as f:\n for line in f:\n print(line)",
"def menu(task_name):\n task_name = Fore.GREEN + '==== ' + Fore.RESET + task_name + Fore.GREEN + ' ' \n return '{s:=<60}'.format(s=task_name)"
] | [
"0.62046105",
"0.6179543",
"0.6083674",
"0.60648614",
"0.60648614",
"0.6022669",
"0.59849316",
"0.59849316",
"0.5978521",
"0.5974868",
"0.59261936",
"0.58891577",
"0.58439004",
"0.5770843",
"0.5768293",
"0.5751",
"0.5726126",
"0.57212806",
"0.5719769",
"0.57116014",
"0.5666168",
"0.56540984",
"0.56533015",
"0.5600449",
"0.5584917",
"0.5578596",
"0.5558487",
"0.5512353",
"0.5487736",
"0.54558223"
] | 0.70545346 | 0 |
Create a text body with tracks | def create_tracks_contents(self, tracks):
rows = []
# Expose dates for an editor
for track in tracks:
rows.append([
'%s%s' % (u'# ' if not track['is_billed'] else ' ',
u'#'.join([track['tname'], track['pname']])
),
datetime.datetime.strftime(track['started'],
"'%x %X'").decode('utf8'),
datetime.datetime.strftime(track['finished'],
"'%x %X'").decode('utf8')
])
trows = tabulate(rows, ['Task', 'Started', 'Finished',
'Description'], tablefmt='simple')
return trows | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_transcript(speaker_label_transcript):\n with open('main_transcript.txt', 'a') as the_file:\n for t in speaker_label_transcript:\n the_file.write(f\"{t['speaker']}:\\n\")\n the_file.write(f\"{t['content']}\\n\\n\")",
"def news_speech():\n #Fetches data from API and creates global varibles.\n news_handle(news_fetch(config_fetcher('news_region'), config_fetcher('news_key')))\n #Creates a daily breifing using varibles\n news_daily_news = Markup((f\"The top headline for today is entitled: {title_1}, and was \\\nwritten by {author_1}. Here is a second headline, entitled: {title_2}, written by {author_2}.\"))\n return news_daily_news",
"def text(cfg, phase, high=6):\n short = cfg[\"fake\"].sentence(\n nb_words=high, variable_nb_words=True, ext_word_list=None\n )\n return \"{} {}\\n\\n{}\".format(\" \".join(cfg[phase]), short, blurb(cfg))",
"def make_silence_phones_txt(self):\n raise NotImplementedError",
"def create_cue_sheet(tracks):\n for track_index, (track_time, name, performer) in enumerate(tracks):\n minutes = int(track_time.total_seconds() / 60)\n seconds = int(track_time.total_seconds() % 60)\n\n cue_sheet_entry = ''' TRACK {:02} AUDIO\n TITLE {}\n PERFORMER {}\n INDEX 01 {:02d}:{:02d}:00'''.format(track_index, name, performer, minutes,\n seconds)\n yield cue_sheet_entry",
"def makeSong(text):\n song = []\n text = text.replace(\"\\n\", \";\")\n songData = text.split(\";\")\n lineNumber = 1\n for line in songData:\n _parseSongLine(song, line, lineNumber, \"text\")\n lineNumber += 1\n return song",
"def create_body(self, master_fr):\n pass",
"def create_artist_new_music_line(spotify_artist_music):\n body = ''\n for item in spotify_artist_music:\n if item['thumbnail']:\n artist_string = '<p><img src=\"{}\" width=\"{}\" height=\"{}\" /> {} released on {}--{}</p>\\n'\n body += artist_string.format(item['thumbnail'][0]['url'], item['thumbnail'][0]['width'],\n item['thumbnail'][0]['height'], item['name'], item['releaseDate'], item['url'])\n return body",
"def create_audiobook():\n\n f = open(\"static/files/book.txt\", \"r\", encoding=\"utf-8\")\n summary = f.read()\n print('total chars: ', len(summary))\n all_words = summary.split('.')\n aflr.api_key = \"b6b1434676d14bdfbf9f50ca2157ed5c\"\n VOICE=\"Matthew\"\n current, total_chars, chunk_num, TEXT = 0,0,0,[]\n while current < len(all_words) - 1:\n while total_chars <= 4999:\n TEXT.append(all_words[current])\n total_chars += len(all_words[current]) + 1\n current += 1\n if current == len(all_words):\n break\n \n if current < len(all_words):\n TEXT.pop()\n current -= 1\n total_chars = 0\n\n TEXT = \".\".join(TEXT)\n\n SPEED=80\n script = aflr.Script().create(\n scriptText=TEXT,\n projectName=\"may_the_4th\",\n moduleName=\"evil\",\n scriptName=f\"{chunk_num}_evil_{VOICE}\",\n )\n print(f\"Connect to the dev star: \\n {script} \\n\")\n\n scriptId = script[\"scriptId\"]\n\n response = aflr.Speech().create(\n scriptId=scriptId, voice=VOICE, speed=SPEED, #effect=EFFECT\n )\n # print(f\"Response from dev star: \\n {response} \\n\")\n # mastering current\n response = aflr.Mastering().create(\n scriptId=scriptId, #backgroundTrackId=BACKGROUNDTRACK\n )\n # print(f\"Using the force: \\n {response} \\n\")\n\n url = aflr.Mastering().retrieve(scriptId=scriptId)\n #print(f\"url to download the track: \\n {url} \\n\")\n\n # or download\n file = aflr.Mastering().download(\n scriptId=scriptId, destination=MINI_PATH\n )\n # print(f\"Listen to the results of the force: \\n {file} \\n\")\n\n print(\"finished\",chunk_num)\n\n TEXT = []\n chunk_num += 1\n\n play_audio()",
"def generate(self, text):\n self.__params['text']=text\n self._data = requests.get(self.TTS_URL, params=self.__params,\n stream=False).iter_content()",
"def createMessage(self, text):\n myMessage = fadingtext.FadingText(self.guiMediaPath, text, self.messagePositions)\n self.messagePositions.append(myMessage.getMyPosition())\n self.playSound('beep03')",
"def text_plot(self):\n if self.stext is not None:\n # Create text object :\n self.stextmesh = visu.Text(text=self.stext, color=self.stextcolor,\n font_size=self.stextsize, pos=self.xyz,\n bold=True, name='SourcesText')\n\n # Set text texture :\n self.stextmesh.set_gl_state('translucent', depth_test=True)\n\n # Apply a transformation to text elements to not cover sources :\n self.stextmesh.transform = vist.STTransform(\n translate=self.stextshift)\n else:\n self.stextmesh = visu.Text(name='NoneText')",
"def name_id_text(self):\n text = Marker()\n text.header = self._header\n text.type = Marker.TEXT_VIEW_FACING\n text.action = Marker.ADD\n text.scale.z = 0.05\n text.color = self.GREEN\n text.pose = deepcopy(self.POSE)\n text.pose.position.x = self._p1.x\n text.pose.position.y = (self._p1.y + self._p5.y) / 2\n text.pose.position.z = self._p1.z\n text.text = \"{} #{}\".format(self._object.object_name, self._track_id)\n return text",
"def generate_subtitles(\n source_path,\n output=None,\n dst_language=DEFAULT_DST_LANGUAGE,\n debug=False,\n cloud=False,\n disable_time=False,\n min_height=80,\n max_height=100,\n l_v=240\n ):\n # Opens the Video file\n print(f\"starting: using cloud {cloud}, source_path {source_path}\")\n if not cloud:\n ocr = PaddleOCR(lang='ch', use_gpu=False,\n rec_model_dir=r\"C:\\autosub_models\\rec\",\n cls_model_dir=r\"C:\\autosub_models\\cls\",\n det_model_dir=r\"C:\\autosub_models\\det\",\n use_angle_cls=True,\n rec_char_type='ch',\n drop_score=0.8,\n det_db_box_thresh=0.3,\n cls=True)\n\n cap = cv2.VideoCapture(source_path)\n # cap.set(3, 1280)\n # cap.set(4, 720)\n # cv2.createTrackbar(\"L - V\", \"Trackbars\", 0, 100, nothing)\n # cv2.createTrackbar(\"Min height\", \"Trackbars\", 80, 100, nothing)\n # cv2.createTrackbar(\"Max Height\", \"Trackbars\", 100, 100, nothing)\n fps = cap.get(cv2.CAP_PROP_FPS)\n print(f\"fps {fps}\")\n time_per_frame = 1 / fps\n i = 0\n div_frame = 6 # 5 frame /s\n sub_idx = 1\n list_srt = []\n old_des = \"\"\n prev_time = 0\n current_time = 0\n file_name = os.path.basename(source_path)\n extenstion = \".srt\" if not disable_time else \".txt\"\n filesub = f\"{os.path.splitext(file_name)[0]}{extenstion}\"\n if os.path.isfile(filesub):\n os.remove(filesub)\n while (cap.isOpened()):\n ret, frame = cap.read()\n if ret == False:\n break\n\n # min_height = cv2.getTrackbarPos(\"Min height\", \"Trackbars\")\n # max_height = cv2.getTrackbarPos(\"Max Height\", \"Trackbars\")\n # if max_height < min_height:\n # max_height = min_height + 10\n\n # l_v = cv2.getTrackbarPos(\"L - V\", \"Trackbars\")\n\n if i % div_frame == 0:\n prev_time_ts = datetime.utcfromtimestamp(prev_time).strftime('%H:%M:%S,%f')[:-4]\n current_time_ts = datetime.utcfromtimestamp(current_time).strftime('%H:%M:%S,%f')[:-4]\n h, w, c = frame.shape\n crop_img = frame[int(h * min_height/100):int(h * max_height/100), 0:w]\n hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)\n\n # define range of white color in HSV\n # change it according to your need !\n lower_white = np.array([0, 0, 246], dtype=np.uint8)\n upper_white = np.array([157, 21, 255], dtype=np.uint8)\n\n # Threshold the HSV image to get only white colors\n mask = cv2.inRange(hsv, lower_white, upper_white)\n # Bitwise-AND mask and original image\n crop_img = cv2.bitwise_and(crop_img, crop_img, mask=mask)\n # crop_img = cv2.cvtColor(crop_img, cv2.COLOR_HSV2RGB)\n # crop_img = cv2.cvtColor(crop_img, cv2.COLOR_RGB2GRAY)\n\n description = \"\"\n if cloud:\n success, encoded_image = cv2.imencode('.jpg', crop_img)\n description = detect_texts_google_cloud(encoded_image.tobytes())\n else:\n # dst = cv2.fastNlMeansDenoisingColored(crop_img,None,10,10,7,21)\n # stacked = np.hstack((dst, crop_img))\n if debug:\n # cv2.imshow('dst', dst)\n cv2.imshow('crop_img', crop_img)\n cv2.imshow('frame', frame)\n cv2.waitKey(1)\n\n result = ocr.ocr(crop_img, det=False, rec=True, cls=False)\n for line in result:\n # print(current_time_ts, line)\n if line[1] > 0.7:\n description = html.unescape(line[0].strip().replace(',', '').replace('、', '').replace('.', ''))\n break\n\n description = \"\" if len(description) < 6 else description\n prev_des = \"\"\n ratio = fuzz.ratio(description.lower(), old_des.lower())\n if len(list_srt) > 0:\n prev_des = list_srt[-1]['description']\n\n print(current_time_ts, description, ratio)\n if (old_des != \"\" or description == \"\") and (ratio < 70) and current_time - prev_time > 0.5:\n list_srt.append({\n \"description\": old_des,\n \"translate\": translate_text(dst_language, old_des),\n \"first_time\": prev_time_ts,\n \"last_time\": current_time_ts,\n \"sub_idx\": sub_idx\n })\n # with open(f\"{os.path.splitext(file_name)[0]}_raw.srt\", \"a\", encoding=\"utf-8\") as myfile:\n # myfile.write(f\"{list_srt[-1]['sub_idx']}\\n\")\n # myfile.write(f\"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\\n\")\n # myfile.write(f\"{list_srt[-1]['description']}\\n\")\n # myfile.write('\\n')\n # myfile.close()\n\n with open(filesub, \"a\", encoding=\"utf-8\") as myfile_vi:\n if not disable_time:\n myfile_vi.write(f\"{list_srt[-1]['sub_idx']}\\n\")\n myfile_vi.write(f\"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\\n\")\n myfile_vi.write(f\"{list_srt[-1]['translate']}\\n\")\n myfile_vi.write('\\n')\n myfile_vi.close()\n\n print(f\"{list_srt[-1]['sub_idx']}\\n\")\n print(f\"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\\n\")\n print(f\"{list_srt[-1]['description']}\\n\")\n print(f\"{list_srt[-1]['translate']}\\n\")\n print(f\"Similarity{ratio}\\n\")\n print('\\n')\n\n sub_idx += 1\n prev_time = current_time\n\n if description == \"\":\n prev_time = current_time\n\n old_des = description\n current_time += time_per_frame * div_frame\n\n i += 1\n\n cap.release()\n return output",
"def convert_chn_text(detail=True):\n p = {\n \"data_path\": \"../data/data_literature\",\n \"output_dir\": \"../data/converted_data\"\n }\n if detail:\n gen_params_info(p)\n\n os.system(\"rm -rf %s\" % p[\"output_dir\"])\n os.system(\"mkdir -p %s\" % p[\"output_dir\"])\n files = os.listdir(p[\"data_path\"])\n for file_name in files:\n if detail:\n print(\"to process %s\" % file_name)\n file_path = \"%s/%s\" % (p[\"data_path\"], file_name)\n out_file_path = \"%s/%s\" % (p[\"output_dir\"], file_name)\n fh_in = codecs.open(filename=file_path, mode=\"r\", encoding='utf8')\n fh_out = codecs.open(filename=out_file_path, mode=\"w\", encoding='utf8')\n line_idx = 1\n verb = \"\"\n for line in fh_in:\n line = line.lstrip()\n if line.find(\"\\t\") < 0:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n items = line.split(\"\\t\")\n if len(items) != 4:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO 4 TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n frame_id = items[0]\n if frame_id.find(\".\") >= 0:\n frame_id = frame_id.split(\".\")[0]\n verb = items[2].strip()\n left_sent = items[1].strip()\n right_sent = items[3].strip()\n out_line = \"%s\\t%s\\t%s\\t%s\"\\\n % (frame_id, left_sent, verb, right_sent)\n print(out_line, file=fh_out)\n\n line_idx += 1\n\n fh_in.close()\n fh_out.close()",
"def create_yml(self):\n fid = open(os.path.join(RESOURCE_PATH,\n '11079419_SNA_SNA.txt'),\n MODE_ASCII_READ)\n\n stream_handle = fid\n\n self.create_parser(stream_handle, True)\n\n particles = self.parser.get_records(1000)\n\n self.particle_to_yml(particles, '11079419_SNA_SNA_telem.yml')\n fid.close()",
"def store_lyrics_text(target_path, track_id, text, extension=\".txt\"):\n file_path = os.path.join(target_path, track_id + extension)\n print(file_path)\n with open(file_path, 'w') as fp_out:\n fp_out.write(text)",
"def create_new_text(self, *args, **kw):\n shape_id = self._create('text', args, kw)\n self.variables.shape_ids.append(shape_id)\n canvas_coords = args[0]\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.TEXT, None)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)\n self.variables.current_shape_id = shape_id\n return shape_id",
"def generateNotes():\r\n fs = 44100 # hertz\r\n seconds = 3 # Note duration of 3 seconds\r\n noteNames = [\"C4\", \"D4\", \"E4\", \"F4\", \"G4\", \"A4\", \"B4\"]\r\n for noteName in noteNames:\r\n myNote = music21.note.Note(noteName)\r\n noteFrequency = myNote.pitch.frequency\r\n # Generate array with seconds*sample_rate steps, ranging between 0 and seconds\r\n t = np.linspace(0, seconds, seconds * fs, False)\r\n\r\n # Generate a 440 Hz sine wave\r\n sound = np.sin(noteFrequency * t * 2 * np.pi)\r\n\r\n # Ensure that highest value is in 16-bit range\r\n audio = sound * (2**15 - 1) / np.max(np.abs(sound))\r\n # Convert to 16-bit data\r\n audio = audio.astype(np.int16)\r\n\r\n # Start playback\r\n play_obj = sa.play_buffer(audio, 1, 2, fs)\r\n\r\n # Wait for playback to finish before exiting\r\n play_obj.wait_done()\r\n\r\n #Write sound to file\r\n sf.write('assets/patterns/'+noteName+'.wav', audio, fs)",
"def create_textbox_with_text(self, lyrics_list: List, english_lyrics_list: List,\n song_numbers_str: str = 'DE123, E123, F123'):\n\n service = self.slides_service\n\n songNumbersBoxHeight = {\n 'magnitude': 50,\n 'unit': 'PT'\n }\n songNumbersBoxWidth = {\n 'magnitude': 680,\n 'unit': 'PT'\n }\n\n lyricsBoxSize = {\n 'magnitude': 300,\n 'unit': 'PT'\n }\n\n requests = [\n # create the right text box\n {\n 'createShape': {\n 'objectId': self.right_box_id,\n 'shapeType': 'TEXT_BOX',\n 'elementProperties': {\n 'pageObjectId': self.page_id,\n 'size': {\n 'height': lyricsBoxSize,\n 'width': lyricsBoxSize\n },\n 'transform': {\n 'scaleX': 1,\n 'scaleY': 1,\n 'translateX': 380,\n 'translateY': 50,\n 'unit': 'PT'\n }\n }\n }\n },\n # create left text box\n {\n 'createShape': {\n 'objectId': self.left_box_id,\n 'shapeType': 'TEXT_BOX',\n 'elementProperties': {\n 'pageObjectId': self.page_id,\n 'size': {\n 'height': lyricsBoxSize,\n 'width': lyricsBoxSize\n },\n 'transform': {\n 'scaleX': 1,\n 'scaleY': 1,\n 'translateX': 20,\n 'translateY': 50,\n 'unit': 'PT'\n }\n }\n }\n },\n # create the title text box with song numbers\n {\n 'createShape': {\n 'objectId': self.song_numbers_box_id,\n 'shapeType': 'TEXT_BOX',\n 'elementProperties': {\n 'pageObjectId': self.page_id,\n 'size': {\n 'height': songNumbersBoxHeight,\n 'width': songNumbersBoxWidth\n },\n 'transform': {\n 'scaleX': 1,\n 'scaleY': 1,\n 'translateX': 20,\n 'translateY': 5,\n 'unit': 'PT'\n }\n }\n }\n },\n # insert text into the box\n {\n 'insertText': {\n 'objectId': self.song_numbers_box_id,\n 'insertionIndex': 0,\n 'text': song_numbers_str\n }\n },\n {\n 'insertText': {\n 'objectId': self.left_box_id,\n 'insertionIndex': 0,\n 'text': lyrics_list,\n }\n },\n {\n 'insertText': {\n 'objectId': self.right_box_id,\n 'insertionIndex': 0,\n 'text': english_lyrics_list\n }\n }\n ]\n body = {\n 'requests': requests\n }\n response = service.presentations().batchUpdate(presentationId=self.presentation_id, body=body).execute()\n create_shape_response = response.get('replies')[0].get('createShape')\n obj_id = create_shape_response.get('objectId')\n print(f'Created textbox with ID: {obj_id}')",
"def _create_text(self):\n assert len(self.state) > 0\n tmp = \"\"\n for tag in self.state:\n if \"<span\" in tag or \"<div\" in tag:\n continue\n if len(tag) > self._max_len:\n tmp += self.__split_seq(tag) + \"\\n\" + \"\\n\"\n else:\n tmp += tag + \"\\n\" + \"\\n\"\n\n self.text = copy.copy(tmp)",
"def create_ph_text(self):\n text_list = [f\"Top {STORIES_NUMBER} from Product Hunt:\"]\n query = {\n \"query\": \"\"\"\n query todayPosts {\n posts {\n edges {\n node {\n name\n tagline\n votesCount\n website\n url\n }\n }\n }\n }\n \"\"\"\n }\n headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + PH_API_TOKEN,\n }\n response = self.run_graphql_query(query, headers)\n today_posts = [\n post[\"node\"] for post in response[\"data\"][\"posts\"][\"edges\"]]\n top_posts = sorted(\n today_posts, key=lambda k: k[\"votesCount\"], reverse=True)\n # Format slack text\n for post in top_posts[:STORIES_NUMBER]:\n text_list.append(\n \"*<{}|{}>* - <{}|{} - {}>\".format(\n post[\"url\"],\n post[\"votesCount\"],\n post[\"website\"],\n post[\"name\"],\n post[\"tagline\"],\n )\n )\n self.logger.debug(text_list)\n return \"\\n>\".join(text_list)",
"def write_segment(live_list, report_list):\n #assert len(live_list) == len(report_list)\n with open(os.path.join(trainDir, 'train_text_data'), 'a') as train_text_data: # a means append mode\n for r, l in zip(report_list, live_list):\n sample = \"abstract=<d> <p> <s> \" + r.strip(\"\\r\\n\") + \"</s> </p> </d>\"\t\\\n + \"\\t\" + \"article=<d> <p> <s> \" + l.strip(\"\\r\\n\") + \\\n \" . </s> </p> </d>\\tpublisher=AFP\"\n train_text_data.write(sample + \"\\n\")",
"def build_tweet():\n\n verb = conjugate(random.choice(verbs)['present'], tense=PARTICIPLE, parse=True).title()\n animal = random.choice(animals).title()\n food = random.choice(foods).title()\n noun = random.choice(nouns).title()\n\n band = food + \" \" + noun\n track = verb + \" \" + animal\n\n feature1 = clean_feature(random.choice(j['features']))\n feature2 = clean_feature(random.choice(j['features']))\n feature3 = random.choice(j['features'])\n\n dont = \"\"\n if random.randrange(100) <= 50:\n dont = \"don't \"\n\n s = \"I \" + dont + \"like the \" + feature1 + \" and \" + feature2 + \" with \" + \\\n feature3 + \" in \" + band + \"'s \" + '\"' + track + '\"'\n\n return s",
"def make_text(self):\n\n text_style = self.QR_TEXT_STYLE.format(size=3.443101883 * self.SCALE, units=self.UNITS)\n\n x_pos = str(self.outside_border * self.SCALE + self.line_size + 4 * self.SCALE)\n y_pos = str(self.outside_border * self.SCALE + 2 * self.line_size + 2 * self.inside_border * self.SCALE\n + self.width * self.SCALE + self.font_height)\n text_el = ET.Element(ET.QName(\"text\"), style=text_style, x=x_pos, y=y_pos, id=\"qrplatba-text\")\n text_el.text = 'QR platba'\n return text_el",
"def astext(self):\n self.elements.update({\n 'body': u''.join(self.body),\n 'indices': self.generate_indices()\n })\n return self.render('beamer.tex_t', self.elements)",
"def test_good_transcript(self):\r\n good_sjson = _create_file(content=textwrap.dedent(\"\"\"\\\r\n {\r\n \"start\": [\r\n 270,\r\n 2720\r\n ],\r\n \"end\": [\r\n 2720,\r\n 5430\r\n ],\r\n \"text\": [\r\n \"Hi, welcome to Edx.\",\r\n \"Let's start with what is on your screen right now.\"\r\n ]\r\n }\r\n \"\"\"))\r\n\r\n _upload_sjson_file(good_sjson, self.item.location)\r\n self.item.sub = _get_subs_id(good_sjson.name)\r\n\r\n text, filename, mime_type = self.item.get_transcript()\r\n\r\n expected_text = textwrap.dedent(\"\"\"\\\r\n 0\r\n 00:00:00,270 --> 00:00:02,720\r\n Hi, welcome to Edx.\r\n\r\n 1\r\n 00:00:02,720 --> 00:00:05,430\r\n Let's start with what is on your screen right now.\r\n\r\n \"\"\")\r\n\r\n self.assertEqual(text, expected_text)\r\n self.assertEqual(filename[:-4], self.item.sub)\r\n self.assertEqual(mime_type, 'application/x-subrip; charset=utf-8')",
"def make_final_text(text: str) -> str:\n baseline_text_template = \"\"\"Use this thread to discuss anything (within the rules of the subreddit):\n\n* What you didn't think was worthy of its own post\n* What club game you're most excited for\n* Where you're staying to watch a friendly\n* Which players should be called in\n{}\n* What the mods told you to re-post here\n* Etc\n\n### Schedules\n{}\n\"\"\"\n\n with open('random_dumb_questions.txt', 'r') as files:\n list_of_questions = files.readlines()\n\n question = list_of_questions[randint(0, len(list_of_questions))].replace('\\n', '')\n\n return baseline_text_template.format(question, text)",
"def format_body(self):\n mt = deque(str(self.movetext).split(' ') + [])\n out = mt.popleft()\n ll = len(out)\n while True:\n if len(mt) is 0:\n break\n\n n = mt.popleft()\n # If the current line length + space + character is less than\n # 80 chars long\n if ll + len(n) + 1 < 80:\n to_add = \" \" + n\n out += \" \" + n\n ll += len(to_add)\n else:\n out += \"\\n\" + n\n ll = len(n)\n return out + str(self.score)",
"def makeDocument(fontPath):\n\n f = Font(fontPath) # Get PageBot Font instance of Variable font.\n \n W = H = PageSize\n\n # Create a new document, default to the defined page size. \n doc = Document(w=W, h=H, originTop=False, title='Text Flow', autoPages=1)\n \n view = doc.getView()\n view.padding = 0 # Aboid showing of crop marks, etc.\n view.showPageCropMarks = True\n view.showPageRegistrationMarks = True\n view.showPageFrame = True\n view.showPagePadding = True\n view.showElementOrigin = False\n view.showElementDimensions = False\n \n # Get list of pages with equal y, then equal x. \n #page = doc[0][0] # Get the single page from te document.\n page = doc.getPage(0) # Get page on pageNumber, first in row (this is only one now).\n page.name = 'Page 1'\n page.padding = PagePadding\n \n fs = newFS(f.info.familyName + ' ' + f.info.styleName, \n style=dict(font=f.name, fontSize=18, textFill=0))\n _, th = textSize(fs)\n title = newTextBox(fs, conditions=[Top2Top(), Fit2Width()],\n parent=page, h=th*1.2)\n \n circle = VariableCircle(f, s=GLYPH_NAME, name='VariableCircleSpeciment',\n parent=page, padding=4, x=100, fontSize=64,\n maxW=W-2*PagePadding, minW=100, showAxisName=True, \n # Conditions make the element move to top-left of the page.\n # And the condition that there should be no overflow, otherwise the text box\n # will try to solve it. \n conditions=[Float2Top(), Fit2Bottom(), Center2Center()],\n # Position of the origin of the element. Just to show where it is.\n # Has no effect on the position conditions. \n yAlign=BOTTOM, xAlign=LEFT, fill=CIRCLE_ELEMENT_FILL, borders=0,\n )\n \n score = doc.solve() # Try to solve all pages.\n if score.fails:\n print score.fails\n\n # To avoid circular dependent conditions, we correct the position of the title\n # on left to that the position of the circle has become.\n title.pl = circle.x - page.pl\n \n return doc # Answer the doc for further doing."
] | [
"0.5848218",
"0.58332545",
"0.5605702",
"0.5583834",
"0.55760723",
"0.54602444",
"0.5450515",
"0.5432554",
"0.5432439",
"0.5423649",
"0.5364082",
"0.5279505",
"0.5249628",
"0.5213371",
"0.5209746",
"0.5196646",
"0.51803553",
"0.51700944",
"0.51385075",
"0.5134436",
"0.51308984",
"0.51307184",
"0.5128868",
"0.5121764",
"0.5120093",
"0.5089748",
"0.50564367",
"0.5054934",
"0.5051253",
"0.50402176"
] | 0.59032327 | 0 |
Update timesheet with an external editor | def update_timesheet(self, args):
if len(args) == 1:
print(self.error_wrong_parameters)
return
try:
started, finished = helpers.parse_date_parameters(args[1:])
except ValueError as error:
print(error)
return
if started == datetime.date.fromtimestamp(0):
track = self.db.get_minimal_started_track()
if track:
started = track['started']
else:
started = finished
# Get timesheet records
tracks = self.db.get_tracks_by_date(started, finished,
also_unfinished=False)
# Exposure tracks to the table
tracks_contents = self.create_tracks_contents(tracks)
lnum = 0
header = self.get_timesheet_header(started, finished)
header_length = len(header.split(os.linesep))
while(True):
try:
# Create the editor's contents
contents = self.create_timesheet_contents(header, tracks_contents)
timesheet = self.open_external_editor(contents, lnum)
# we must get the table header here due to the length of the columns
table_header = timesheet[header_length-1:header_length+1]
tracks = timesheet[header_length+1:]
except OSError, message:
print("*** Error: %s", message)
return
# Parse the input
try:
data = self.parse_timesheet(tracks, header_length)
except errors.ParsingError as error:
print(error.msg)
print("Would you like to update the timesheet again? [Y/n] ")
if not helpers.get_yes_no(default='y'):
return
table_header.extend(tracks)
tracks_contents = "".join(table_header)
lnum = error.lnum
continue
break
# Update the DB
# TODO: get rid the danger operation
self.db.delete_tracks_by_date(started=started, finished=finished)
data.sort(key=operator.itemgetter('started'))
for track in data:
self.db.create_track(track['tid'],
track['started'], track['finished'],
int(not bool(track['is_billed'])))
print('The timesheet has been updated.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_upt(self, arg):\n self.do_timesheet('update today')",
"def update_timesheet(item):\n\tj=json.loads(item)\n\tprint(\"-----------------------garffff---------------------\")\n\tnew_employee=None;\n\ttimesheet=frappe.get_doc(\"Time Sheet\",j[\"name\"])\n\tjarray=[]\n\tfor passed_employee in j['employees']:\n\t\tif 'new' in passed_employee.keys():\n\t\t\t#create employee\n\t\t\tnew_employee=frappe.get_doc({\n\t\t\t\t\"doctype\":\"employee_link_with_time\",\n\t\t\t\t\"employee\":passed_employee['employee']\n\t\t\t});\n\n\t\tjarray.append(passed_employee['employee']);\n\t\tfor employee in timesheet.employees:\n\t\t\tif passed_employee[\"employee\"]==employee.employee:\n\t\t\t\tif \"start\" in passed_employee:\n\t\t\t\t\temployee.start=passed_employee[\"start\"]\n\t\t\t\tif \"end\" in passed_employee:\n\t\t\t\t\temployee.end=passed_employee[\"end\"];\n\tforRemove=[]\n\tfor employee_container in timesheet.employees:\n\t\tif employee_container.employee not in jarray:\n\t\t\tforRemove.append(employee_container)\n\tprint(\"___________REMOVE______________\")\n\tprint(forRemove);\n\tif forRemove:\n\t\tfor remove in forRemove:\n\t\t\ttimesheet.employees.remove(remove)\n\n\tif new_employee is not None:\n\t\ttimesheet.append(\"employees\",new_employee)\n\n\t#handel status\n\ttimesheet.status=j[\"status\"]\n\ttimesheet.save()\n\treturn frappe.get_doc(\"Time Sheet\",j[\"name\"])",
"def timesheet_edit_form(request, type, id):\r\n if type == 'timesheet':\r\n timesheet = TimeSheet.objects.get(pk=int(id))\r\n editForm = TimeSheetForm(\r\n initial = {\r\n 'dueDate':timesheet.DueDate,\r\n 'hours':timesheet.Hours,\r\n 'partner':timesheet.Partner,\r\n 'project':timesheet.Project,\r\n 'phase':timesheet.Phase,\r\n 'activity':timesheet.Activity\r\n })\r\n else:\r\n timesheet = InternalTimeSheet.objects.get(pk=int(id))\r\n editForm = InternalForm(\r\n initial = {\r\n 'dueDate':timesheet.InternalDueDate,\r\n 'hours':timesheet.Hours,\r\n 'internal':timesheet.Internal,\r\n 'activity':timesheet.Activity\r\n })\r\n return render(\r\n request,\r\n 'timesheet/forms/edit.html',\r\n {\r\n 'editForm':editForm,\r\n 'type':type,\r\n 'timesheet':timesheet\r\n })",
"def do_timesheet(self, arg):\n\n def _usage():\n self.do_help('timesheet')\n commands = ['update', 'report']\n words = shlex.split(arg)\n words = [token.lower() for token in words]\n if not len(words) or words[0] not in commands:\n print(self.error_wrong_parameters)\n return\n if words[0] == 'update':\n self.update_timesheet(words)\n elif words[0] == 'report':\n self.report_timesheet(words)\n return",
"def edit():",
"def timesheet(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet.html'\r\n )",
"def edit(self):\n template = TaskInfo._generate_template(self.dict())\n tempf = tempfile.mkstemp()[1]\n try:\n with open(tempf, 'w') as outfile:\n outfile.write(template)\n\n editor_cmd = [\n TaskInfo._select_editor(),\n tempf,\n ]\n os.system(\" \".join(editor_cmd))\n\n # validate edited file\n while True:\n try:\n self._file_update(tempf)\n break\n except TaskSyntaxError as e:\n input(\n # pylint: disable=line-too-long\n \"Task syntax error (enter returns to editor): {}\".format( # nopep8\n str(e)))\n os.system(\" \".join(editor_cmd))\n continue\n finally:\n if os.path.exists(tempf):\n os.remove(tempf)\n\n # commit changes\n self.serialize()",
"def edit_date(entry):\n entry.date = get_date()\n entry.save()\n input(\"Edit successful. \")\n return entry",
"def edit():\n\n curitem = treeview.focus().strip(\"#\")\n select_values = series_dict[curitem]\n editent2var.set(\"thumbnails\\\\\")\n\n def raging_fire():\n \"\"\"call back function for edit button in the edit window\"\"\"\n\n if editspin1.get() != '0': # season\n select_values[0] = int(editspin1.get())\n select_values[3] = \"{}\".format(datetime.datetime.now()) # update the modify date\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n if editspin2.get() != '0': # episode\n select_values[1] = int(editspin2.get())\n select_values[3] = \"{}\".format(datetime.datetime.now()) # update the modify date\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n if editentvar.get() != curitem: # name\n series_dict[editentvar.get().title()] = series_dict.pop(curitem) # update the modify date\n select_values[3] = \"{}\".format(datetime.datetime.now())\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n if editent2var.get() != select_values[2]: # pic\n select_values[3] = \"{}\".format(datetime.datetime.now()) # update the modify date\n select_values[2] = editent2var.get()\n with open('series_table.json', 'w') as f:\n json.dump(series_dict, f, indent=2)\n\n edittop.destroy()\n\n if curitem != \"\": # test if an item is highlighted first\n \"\"\"the actual ecit window widgets\"\"\"\n edittop = Toplevel()\n\n editlab1 = Label(edittop, text=\"Current Tv-Series title : \")\n editlab1.grid(row=1, column=1, sticky=W, pady=4)\n\n editent = Entry(edittop, textvariable=editentvar, width=30)\n editentvar.set(curitem)\n editent.grid(row=1, column=2, sticky=W, pady=4)\n\n editlab2 = Label(edittop, text=\"Current Season {}, chance to : \".format(select_values[0]))\n editlab2.grid(row=2, column=1, sticky=W, pady=4)\n\n editspin1 = Spinbox(edittop, from_=0, to=1000, width=5)\n editspin1.grid(row=2, column=2, sticky=W, pady=4)\n\n editlab3 = Label(edittop, text=\"Current Episode {}, change to : \".format(select_values[1]))\n editlab3.grid(row=3, column=1, sticky=W, pady=4)\n\n editspin2 = Spinbox(edittop, from_=0, to=1000, width=5)\n editspin2.grid(row=3, column=2, sticky=W, pady=4)\n\n editlab4 = Label(edittop, text=\"Change image to : \")\n editlab4.grid(row=4, column=1, sticky=W, pady=4)\n\n editent2 = Entry(edittop, textvariable=editent2var, width=35)\n editent2var.set(select_values[2])\n editent2.grid(row=4, column=2, sticky=E, pady=4)\n\n editbut = Button(edittop, text='Edit', command=raging_fire)\n editbut.grid(row=5, column=1, sticky=W, pady=4, padx=20)\n\n download_thumbbut = Button(edittop, text=\"Download The thumbnail\", command=download_thumb)\n download_thumbbut.grid(row=5, column=2, sticky=W, pady=4, padx=20)\n\n edittop.geometry(\"400x200+200+300\")\n edittop.title(\"Edit properties of {} \".format(curitem).upper())",
"def refresh_calendar():\n manage.refresh_calendar()",
"def do_up(self, arg):\n self.do_timesheet('update %s' % arg)",
"def timeEditorPanel(*args, activeClipEditMode: Union[int, bool]=0, activeTabRootClipId:\n bool=True, activeTabTime: bool=True, activeTabView: Union[int, bool]=0,\n autoFit: Union[AnyStr, bool]=\"\", autoFitTime: Union[AnyStr, bool]=\"\",\n control: bool=True, defineTemplate: AnyStr=\"\", displayActiveKeyTangents:\n AnyStr=\"\", displayActiveKeys: AnyStr=\"\", displayInfinities: AnyStr=\"\",\n displayKeys: AnyStr=\"\", displayTangents: AnyStr=\"\", displayValues:\n AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", exists: bool=True, filter:\n Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr, bool]=\"\",\n groupIdForTabView: Union[int, bool]=0, highlightConnection: Union[AnyStr,\n bool]=\"\", keyingTarget: Union[int, bool]=0, layerId: int=0,\n lockMainConnection: bool=True, lookAt: AnyStr=\"\", mainListConnection:\n Union[AnyStr, bool]=\"\", menu: Script=None, minClipWidth: Union[int, bool]=0,\n panel: Union[AnyStr, bool]=\"\", parent: Union[AnyStr, bool]=\"\",\n selectionConnection: Union[AnyStr, bool]=\"\", setToPrevClipEditMode:\n bool=True, snapTime: Union[AnyStr, bool]=\"\", snapToClip: bool=True,\n snapToFrame: bool=True, snapTolerance: Union[int, bool]=0, snapValue:\n Union[AnyStr, bool]=\"\", stateString: bool=True, tabView: int=0, timeCursor:\n bool=True, unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useTemplate: AnyStr=\"\", q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def edit(filename):\r\n import os\r\n import __main__\r\n\r\n #check file exists\r\n filepath = os.path.abspath(filename)\r\n cwd = os.getcwd()\r\n #a full path given\r\n if os.path.exists(filepath) is False:\r\n raise Exception('File does not exist: '+filename)\r\n #send the editor message\r\n __main__._engine.send_msg('Editor','Open',(filepath,))",
"def change_event(date, time, event_title, room_code, length, type):\n table = load_table(date)\n if table is False:\n print(\"file not found\")\n else:\n position = int(time[:2])-8\n row = table[position]\n row[1] = event_title\n row[2] = room_code\n row[3] = length\n row[4] = type\n table[position] = row\n save_table(date, table)\n print_day(date)",
"def btnUpdateCalendar():\n\n page = getDataPage(uNameEntry.get(), pWordEntry.get())\n\n if page is not None:\n\n promptString.set(\"Updated!\")\n promptLabel.config(fg=\"green2\")\n updateCalendar(page, offsetString.get())\n\n # \"Updated!\"\n\n else:\n\n promptString.set(\"Login unsuccesful!\")\n promptLabel.config(fg=\"red2\")",
"def edit_event_task(self):\n self.edit_event()",
"def edit_time_spent(entry):\n entry.time_spent = get_minutes()\n entry.save()\n input(\"Edit successful. \")\n return entry",
"def edit_document():",
"def commit(self) -> None:\n changed = self.local_sheet.get_changed_rect()\n if changed is None: return\n cell_range = self.worksheet.range(changed[0][0] + 1, changed[1][0] + 1,\n changed[0][1] + 1, changed[1][1] + 1)\n for cell in cell_range:\n cell.value = self.local_sheet.get_cell(cell.row - 1, cell.col - 1, sheet_format=True)\n self.worksheet.update_cells(cell_range, value_input_option='USER_ENTERED')\n self.local_sheet.reset_changed()",
"def update_main_page():\n\n line = house_keeping + '/acis_gain.html'\n text = open(line, 'r').read()\n\n today = tcnv. currentTime('Display')\n\n text = text.replace('#DATE#', today)\n\n file = web_dir + '/acis_gain.html'\n fo = open(file, 'w')\n fo.write(text)\n fo.close()",
"def refresh_dlg(self):\n dd = mg.DATADETS_OBJ\n self.tabentry.any_editor_shown = False\n self.tabentry.new_editor_shown = False\n ## Delete all rows after the first one (sofa_id) and before the new one\n rows2del = self.tabentry.rows_n-2 ## less 1st and last\n self.tabentry.grid.DeleteRows(pos=1, numRows=rows2del)\n self.tabentry.grid.HideCellEditControl()\n self.tabentry.grid.ForceRefresh()\n self.tabentry.safe_layout_adjustment()\n ## get list of name/type tuples (including sofa_id)\n init_settings_data = getdata.get_init_settings_data(dd, self.tblname)\n self.setup_settings_data(init_settings_data)\n self.tabentry.rows_n = len(init_settings_data) + 1 ## + new row\n self.tabentry.rows_to_fill = self.tabentry.rows_n\n ## using default renderer and editor fine (text)\n for row_idx, nametype in enumerate(init_settings_data):\n if row_idx == 0:\n continue ## sofa_id already there (and blue, read-only etc)\n fldname, fldtype = nametype\n self.tabentry.grid.InsertRows(row_idx, 1)\n self.tabentry.grid.SetCellValue(row_idx, 0, fldname)\n self.tabentry.grid.SetCellValue(row_idx, 1, fldtype)\n self.tabentry.grid.SetRowLabelValue(row_idx, str(row_idx+1))\n self.tabentry.grid.ForceRefresh() ## deleteme\n ## extra config\n self.tabentry.grid.SetRowLabelValue(\n self.tabentry.rows_n-1, mg.NEW_IS_READY)\n ## set cell and record position\n self.tabentry.respond_to_select_cell = False\n row2sel = 0 if self.tabentry.rows_n == 1 else 1\n self.tabentry.current_row_idx = row2sel\n self.tabentry.current_col_idx = 0\n self.tabentry.grid.SetGridCursor(self.tabentry.current_row_idx, \n self.tabentry.current_col_idx)\n ## misc\n self.tabentry.grid.ForceRefresh()\n self.update_demo()",
"def edit_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jd.Page(self.session, self.source)",
"def update_timer_details(twitchid, id, *, title, delta, maxtime, styling):\n\twith postgres, postgres.cursor() as cur:\n\t\tcur.execute(\"update mustard.timers set title=%s, delta=%s, maxtime=%s, styling=%s where id=%s and twitchid=%s\",\n\t\t\t(title, delta, maxtime, styling, id, twitchid))\n\t\tif not cur.rowcount: raise ValueError(\"Timer not found, or not owned by that user\")",
"def OnRenameTimer(self):\r\n \r\n self.Edit(self._current)",
"def handleEdit(self, _): # pylint: disable=invalid-name\n\n session_key = self.getSessionKey()\n\n splunk_home = os.environ.get(\"SPLUNK_HOME\")\n default_file = os.path.join(\n splunk_home,\n \"etc\",\n \"apps\",\n \"broken_hosts\",\n \"default\",\n \"data\",\n \"expectedTime.csv.default\")\n with open(default_file, \"r\", encoding='utf-8') as opened_file:\n reader = csv.DictReader(opened_file)\n for line in reader:\n self.write_line(session_key, line)",
"def update(self, dt):",
"def update(self, dt):",
"def edit_file(path, editor=None):\n\n # Find the editor to use\n editor = find_editor(editor)\n\n # Create temporary directory and copy the file\n tmpdir = tempfile.mkdtemp()\n tmpfile = os.path.join(tmpdir, os.path.basename(path))\n shutil.copy2(path, tmpfile)\n\n # Execute the editor\n subprocess.call([editor, tmpfile])\n\n # Copy the temporary file back and cleanup\n shutil.copy2(tmpfile, path)\n shutil.rmtree(tmpdir)",
"def call_schedule(self, bot, update):\n bot.send_message(update.message.chat_id, '_1 пара_ 08:30 - 10:05\\n'\n '_2 пара_ 10:25 - 12:00\\n'\n '_3 пара_ 12:20 - 13:55\\n'\n '_4 пара_ 14:15 - 15:50\\n'\n '_5 пара_ 16:10 - 17:45',\n parse_mode='Markdown')",
"def refreshEditorTemplates(*args, **kwargs)->None:\n pass"
] | [
"0.61382276",
"0.6037365",
"0.5941405",
"0.594015",
"0.58128583",
"0.5658246",
"0.5627402",
"0.5599801",
"0.55778724",
"0.5517923",
"0.5475353",
"0.54705817",
"0.5338446",
"0.52902836",
"0.52834815",
"0.5277494",
"0.5273059",
"0.5257542",
"0.5244639",
"0.52007896",
"0.5169982",
"0.516",
"0.5143928",
"0.51416624",
"0.5139708",
"0.51370025",
"0.51370025",
"0.51167697",
"0.5100829",
"0.50984704"
] | 0.7120731 | 0 |
Get report command's parameters | def get_report_parameters(self, args, default_mask=0):
# Get the task|project filter keyword and an alias
pname = tname = ''
mask = 0
if args[0] in ('task', 'project'):
if not len(args) >= 2:
print("*** Error: Wrong format of the object parameter '%s'"
"" % args[0])
return
tname, pname = self.validate_object(
keyword=args[0], thing=args[1].decode('utf-8'))
args = args[2:]
# Get 'extend' parameter
if args and args[0] == 'extend':
if len(args) == 1:
print("*** Error: Wrong extend bitmask.")
return
# Get mask if 'extend' parameter presents
mask = helpers.parse_extend_mask(args[1])
args = args[2:]
mask = default_mask if not mask else mask
# Get dates
started, finished = helpers.parse_date_parameters(args)
return tname, pname, started, finished, mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_report_parameters(self):\n self.setup_report()\n\n proxy_url, proxy_argument = get_proxy_args(self.cr, self.uid, self.prpt_content)\n proxy = xmlrpclib.ServerProxy(proxy_url)\n return proxy.report.getParameterInfo(proxy_argument)",
"def parameters(self) -> ReportParameters:\n return self.__parameters",
"def getArguments():\n dfPath = sys.argv[1]\n if(len(sys.argv) == 4):\n pop = int(sys.argv[2])\n gen = int(sys.argv[3])\n else:\n pop = 10\n gen = 2\n return dfPath, pop, gen",
"def get_args( self, ):\r\n for iarg in sys.argv[1:]:\r\n #print iarg\r\n argsplits = iarg.split(\"=\")\r\n parm_name = argsplits[0]\r\n parm_value = argsplits[1]\r\n\r\n # so far only one is captured\r\n if parm_name == \"parameters\":\r\n self.parmeters_x = parm_value #\r\n msg = \"command line arg >>{iarg}\" # log file not open but use alt\r\n AppGlobal.logger.info( msg )\r\n else:\r\n msg = \"no parmeter extensions\"\r\n AppGlobal.logger.info( msg )\r\n return",
"def get_cli_arguments(self):\n pass",
"def fetch_report_parameters(cr, uid, report_name, context=None):\n if not report_name.startswith(SERVICE_NAME_PREFIX):\n name = \"%s%s\" % (SERVICE_NAME_PREFIX, report_name)\n else:\n name = report_name\n\n return Report(name, cr, uid, [1], {}, context).fetch_report_parameters()",
"def parameters(self) -> Dict[str, Any]:\n return self.data[\"args\"].get(\"parameters\", {})",
"def get_command_arguments(self, format_vars):\n rval = {}\n for setting, value in self.settings.items():\n if setting in self.command_arguments:\n if value:\n rval[setting] = self.command_arguments[setting].format(**format_vars)\n else:\n rval[setting] = \"\"\n else:\n rval[setting] = value\n return rval",
"def get_cmd_args():\n\n\n\t#Creates the Argument Parser\n\tparser = ArgumentParser(description = \"ID Lab qPCR Analysis v\" + VERSION + \" \" + QUALITY)\n\n\t#Adds the input file argument\n\tparser.add_argument('-f', '--file',\n\t\t\t\tnargs = '+',\n\t\t\t\ttype = FileType('r'),\n\t\t\t\trequired = True)\n\n\t#Adds the output directory\n\tparser.add_argument('-o', '--output',\n\t\t\t\trequired = True)\n\n\t#Adds the model argument, to select between the three models\n\tparser.add_argument('-m', '--mod', '--model',\n\t\t\t\tnargs = '?',\n\t\t\t\tchoices = ['relative', 'absolute', 'stability'],\n\t\t\t\trequired = True)\n\n\t#Adds the control genes argument, taking a list of gene names\n\tparser.add_argument('-cg', '--cgenes', '--controlgenes',\n\t\t\t\tnargs = '+',\n\t\t\t\trequired = True)\n\n\t#Adds the optional control sample argument for the stability model, taking a list of sample names\n\tparser.add_argument('-cs', '--csample', '--controlsamples',\n\t\t\t\tnargs = '*')\n\n\t#Adds optional outlier cutoff\n\tparser.add_argument('-oc', '--ocutoff',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.3)\n\n\t#Adds optional max outliers\n\tparser.add_argument('-om', '--omax',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.5)\n\n\t#Adds optional encoding \n\tparser.add_argument('-e', '--encoding',\n\t\t\t\tdefault = 'ISO-8859-1')\n\n\t#Adds optional header size\n\tparser.add_argument('-hd', '--header',\n\t\t\t\tdefault = 47)\n\n\treturn vars(parser.parse_args())",
"def get_reports_params(args: Dict[str, Any]) -> Dict[str, Any]:\n params: Dict[str, Any] = {}\n arg_keys = args.keys()\n\n report_type = args.get('report_type', '')\n if report_type not in REPORT_TYPE_LABEL_NAME:\n raise ValueError(MESSAGES['INVALID_REPORT_TYPE'])\n params['report_type'] = REPORT_TYPE_LABEL_NAME[report_type]\n\n if 'type' in arg_keys:\n output_type = args.get('type', '')\n if output_type not in REPORT_TYPE_ALLOWED_FORMAT[report_type]:\n raise ValueError(\n MESSAGES['INVALID_REPORT_OUTPUT_TYPE'].format(\n ', '.join(REPORT_TYPE_ALLOWED_FORMAT[report_type])\n )\n )\n params['type'] = output_type\n\n params = validate_time_parameters(args, params)\n\n params = validate_ips_report_type_arguments(args, params)\n\n if report_type == ALERT_DETAILS_REPORT:\n params = validate_alert_report_type_arguments(args, params)\n\n return params",
"def getParameters(self): #$NON-NLS-1$\r",
"def get_arguments():\n parser = argparse.ArgumentParser(description=\"\"\"\n Yield a sorted frequency count of similar/dissimilar InChi DW/OpenBabel.\n\"\"\")\n\n parser.add_argument(\n \"file\",\n help=\"DataWarrior's list file exported after running the macro.\")\n args = parser.parse_args()\n\n data = args.file\n return data",
"def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params",
"def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out",
"def get_arguments():\n\tparser.add_argument('-i', '--interface', help='interface to affect')\n\tparser.add_argument('-m','--mac', help='mac to allocate')\n\n\targs = parser.parse_args()\n\tinterface = args.interface\n\tmac = args.mac\n\treturn (interface, mac)",
"def get_params(self):\n pass",
"def get_arguments(self):\n args = self.parser.parse_args()\n config = None\n with open(args.config_file, \"r\") as f:\n config = json.load(f)\n\n if \"collections\" in config:\n if len(config[\"collections\"]) > 0:\n collection = config[\"collections\"][0]\n if \"collection_name\" in collection:\n self.collection_name = collection[\"collection_name\"]\n else:\n raise AttributeError(\n \"'collection_name' not present in 'collections'!\"\n )\n self.slack_channel = collection[\"slack_channel\"]\n else:\n raise AttributeError(\n \"No 'collections' details found in config_file!\"\n )\n else:\n raise AttributeError(\"'collections' not present in config_file!\")\n\n if \"postman_api_key\" in config:\n self.postman_api_key = config[\"postman_api_key\"]\n else:\n raise AttributeError(\n \"'postman_api_key' not present in config_file!\"\n )\n\n if \"trigger_interval\" in config:\n self.trigger_interval = config[\"trigger_interval\"]\n\n if \"slack_token\" in config:\n self.slack_token = config[\"slack_token\"]\n else:\n raise AttributeError(\"'slack_token' not present in config_file!\")\n\n return (\n self.collection_name,\n self.postman_api_key,\n self.trigger_interval,\n self.slack_channel,\n self.slack_token,\n )",
"def report_args(args):\n\n print (\"SETTINGS:\\n\")\n print (\"-f : Output data file >> {:s}\".format(args.file))\n print (\"-l : Length of data series >> {:d}\".format(args.length))\n print (\"-p : Process >> {:s}\".format(args.process))\n print (\"-d : Ouput diretory >> {:s}\".format(args.directory))\n print (\"\\n\")",
"def reports_cli():",
"def args(self):\n return self.cmd_args",
"def get_params(self):",
"def get_diameters():\n return Global_Module.global_diameters",
"def get_resource_params():\n return Parameter.list()",
"def parameters(self):\n return []",
"def parameters(self):\n pass",
"def get_params():\n\n parser = get_params_parser()\n args = parser.parse_args()\n\n tasks = [args.raw, args.enrich, args.identities_load, args.identities_merge, args.panels]\n\n if not any(tasks):\n print(\"No tasks enabled\")\n sys.exit(1)\n\n return args",
"def get_params(self):\n params = {}\n for step in self.steps:\n params[step[0]] = step[1].get_params()\n return params",
"def extract_command_params(arguments):\n\n # There should be only one argument\n if len(arguments) != 2:\n raise Exception('Illegal number of arguments. '\n 'Usage: '\n 'python read_repair_utility.py conf/parameter.ini')\n\n app_config_file = arguments[1]\n return app_config_file",
"def arguments(self):\n return parse_arguments(self['data'])",
"def get_params(self):\n raise NotImplementedError"
] | [
"0.73213214",
"0.6491068",
"0.6297721",
"0.6168276",
"0.61573726",
"0.6150455",
"0.6062616",
"0.6031534",
"0.6029903",
"0.6005491",
"0.59798586",
"0.5973395",
"0.59154636",
"0.58459044",
"0.58375025",
"0.5804794",
"0.58014464",
"0.57914436",
"0.5789844",
"0.57788223",
"0.5762254",
"0.5739138",
"0.57171625",
"0.56778425",
"0.5626533",
"0.5626183",
"0.5620433",
"0.56129014",
"0.5598043",
"0.5577192"
] | 0.67556924 | 1 |
Update the timesheet for today. Look at 'help timesheet' for details. | def do_upt(self, arg):
self.do_timesheet('update today') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n try:\n started, finished = helpers.parse_date_parameters(args[1:])\n except ValueError as error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track()\n if track:\n started = track['started']\n else:\n started = finished\n # Get timesheet records\n tracks = self.db.get_tracks_by_date(started, finished,\n also_unfinished=False)\n # Exposure tracks to the table\n tracks_contents = self.create_tracks_contents(tracks)\n lnum = 0\n header = self.get_timesheet_header(started, finished)\n header_length = len(header.split(os.linesep))\n while(True):\n try:\n # Create the editor's contents\n contents = self.create_timesheet_contents(header, tracks_contents)\n timesheet = self.open_external_editor(contents, lnum)\n # we must get the table header here due to the length of the columns\n table_header = timesheet[header_length-1:header_length+1]\n tracks = timesheet[header_length+1:]\n except OSError, message:\n print(\"*** Error: %s\", message)\n return\n # Parse the input\n try:\n data = self.parse_timesheet(tracks, header_length)\n except errors.ParsingError as error:\n print(error.msg)\n print(\"Would you like to update the timesheet again? [Y/n] \")\n if not helpers.get_yes_no(default='y'):\n return\n table_header.extend(tracks)\n tracks_contents = \"\".join(table_header)\n lnum = error.lnum\n continue\n break\n # Update the DB\n # TODO: get rid the danger operation\n self.db.delete_tracks_by_date(started=started, finished=finished)\n data.sort(key=operator.itemgetter('started'))\n for track in data:\n self.db.create_track(track['tid'],\n track['started'], track['finished'],\n int(not bool(track['is_billed'])))\n print('The timesheet has been updated.')",
"def do_rt(self, arg):\n self.do_timesheet('report today')",
"def refresh_calendar():\n manage.refresh_calendar()",
"def update_timesheet(item):\n\tj=json.loads(item)\n\tprint(\"-----------------------garffff---------------------\")\n\tnew_employee=None;\n\ttimesheet=frappe.get_doc(\"Time Sheet\",j[\"name\"])\n\tjarray=[]\n\tfor passed_employee in j['employees']:\n\t\tif 'new' in passed_employee.keys():\n\t\t\t#create employee\n\t\t\tnew_employee=frappe.get_doc({\n\t\t\t\t\"doctype\":\"employee_link_with_time\",\n\t\t\t\t\"employee\":passed_employee['employee']\n\t\t\t});\n\n\t\tjarray.append(passed_employee['employee']);\n\t\tfor employee in timesheet.employees:\n\t\t\tif passed_employee[\"employee\"]==employee.employee:\n\t\t\t\tif \"start\" in passed_employee:\n\t\t\t\t\temployee.start=passed_employee[\"start\"]\n\t\t\t\tif \"end\" in passed_employee:\n\t\t\t\t\temployee.end=passed_employee[\"end\"];\n\tforRemove=[]\n\tfor employee_container in timesheet.employees:\n\t\tif employee_container.employee not in jarray:\n\t\t\tforRemove.append(employee_container)\n\tprint(\"___________REMOVE______________\")\n\tprint(forRemove);\n\tif forRemove:\n\t\tfor remove in forRemove:\n\t\t\ttimesheet.employees.remove(remove)\n\n\tif new_employee is not None:\n\t\ttimesheet.append(\"employees\",new_employee)\n\n\t#handel status\n\ttimesheet.status=j[\"status\"]\n\ttimesheet.save()\n\treturn frappe.get_doc(\"Time Sheet\",j[\"name\"])",
"def set_datetime_today(self):\n self.datetime_today = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')",
"def update_schedule(self, time=None):\n self.get_queryset().update_schedule(time=time)",
"def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()",
"def update_schedule(self, time=None):\n _update_schedule(self, time=time)",
"def write_daily_hours(daily_hours, sheet_obj, wb_obj):\n # Load the workbook for the progress tracking\n if daily_hours > 0:\n workbook_path = get_workbook_path()\n sheet = sheet_obj\n wb = wb_obj\n # getting the date for today\n datetime_obj = datetime.datetime.now()\n today_formatted = datetime_obj.strftime('%d.%m.%Y')\n # Fill in the progress made, if date already exists, hours are added, else new line with date + hours created\n new_daily_hours_cell = sheet.cell(row=sheet.max_row, column=2)\n if sheet.cell(row=sheet.max_row, column=1).value == today_formatted:\n hours = new_daily_hours_cell.value\n new_daily_hours_cell.value = daily_hours + float(hours)\n else:\n sheet.cell(row=(sheet.max_row + 1), column=1).value = today_formatted\n new_daily_hours_cell = sheet.cell(row=sheet.max_row, column=2)\n new_daily_hours_cell.value = daily_hours\n wb.save(workbook_path)",
"def do_upw(self, arg):\n self.do_timesheet('update week')",
"def do_upm(self, arg):\n self.do_timesheet('update week')",
"def update(self, dt):\n pass",
"def main():\n with excel_app() as app:\n print(app)\n input(\"press enter to continue ...\")\n with excel_book(app, r'E:\\scratch\\Hello1.xls') as book:\n print(book.Worksheets[0].Range(\"A1\").Value)\n book.Worksheets[0].Range(\"A2\").Value = str(datetime.date.today())\n input(\"press enter to continue ...\")\n book.Save()",
"def reschedule():\n if not schedule.empty():\n purge_events() \n\n today_s = tuple_to_str(time.localtime()[:3])\n\n # first check if exception entry exist for today in datemap\n if today_s in datemap:\n \tschedule_day(datemap[today_s])\n else:\n # otherwise schedule it as normal weekday\n schedule_day(days[time.strftime(\"%A\")])",
"def do_up(self, arg):\n self.do_timesheet('update %s' % arg)",
"def today(self):\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._update_time()\n return self._today",
"def updateToday(tradingDay):\n if date.today() != tradingDay.today:\n tradingDay = TradingDay(tradingDay.contractDetails)\n\n if tradingDay.isMarketOpen():\n if not tradingDay.marketOpen:\n tradingDay.marketOpen = True\n console().info(\"The Market Has Opened\")\n else:\n if tradingDay.marketOpen:\n tradingDay.marketOpen = False\n console().info(\"The Market Has Closed\")\n return tradingDay",
"def do_timesheet(self, arg):\n\n def _usage():\n self.do_help('timesheet')\n commands = ['update', 'report']\n words = shlex.split(arg)\n words = [token.lower() for token in words]\n if not len(words) or words[0] not in commands:\n print(self.error_wrong_parameters)\n return\n if words[0] == 'update':\n self.update_timesheet(words)\n elif words[0] == 'report':\n self.report_timesheet(words)\n return",
"def update_time(self):\n pass # Do nothing",
"def test_reportperiod_updatetimesheet_self(self):\n date = self.reporting_period.start_date.strftime('%Y-%m-%d')\n response = self.app.get(\n reverse(\n 'reportingperiod:UpdateTimesheet',\n kwargs={'reporting_period': date}\n ),\n headers={'X_FORWARDED_EMAIL': self.regular_user.email},\n )\n self.assertEqual(response.status_code, 200)",
"def work_refresh(self):\n now = dt.now()\n self.eisenhower_priority()\n p_week = now.isocalendar()[1] - self.work_datetime.isocalendar()[1]\n\n if (1 <= p_week) and (self.priority not in [1, 2]):\n self.time_ntf = now\n else:\n pass",
"def test_update_dt(self):\n result = self.test_client.update_dt\n\n assert result == \"2020-02-18 01:54:13\"",
"def update(self, dt):\n\t\tpass",
"def setEvaluationDate(cell):\n global _qToday\n \n _qToday = toDate(cell.value)\n if not to_date:\n _qToday = Settings.instance().getEvaluationDate()\n else:\n Settings.instance().setEvaluationDate(_qToday)\n \n return _qToday.ISO()",
"def update(self) -> None:\n self._state = \"Working\"\n self.shabbat_start = None\n self._shabbat_end = None\n today = datetime.date.today()\n if today.weekday() == 5:\n friday = today + datetime.timedelta(-1)\n else:\n friday = today + datetime.timedelta((4 - today.weekday()) % 7)\n\n saturday = friday + datetime.timedelta(+1)\n\n year = str(friday.year)\n month = (\"0\" + str(friday.month))[-2:]\n\n hebcal_url = (\n \"http://www.hebcal.com/hebcal/?v=1&cfg=json&maj=off&min=off\"\n \"&mod=off&nx=off&year={}&month={}&ss=off&mf=off&c=on&geo=city\"\n \"&city={}&m={}&s=off&i=off&b={}\"\n ).format(\n year,\n month,\n self._city,\n str(self._havdalah),\n str(self._candle_light),\n )\n\n hebcal_response = requests.get(hebcal_url)\n hebcal_json_input = hebcal_response.text\n hebcal_decoded = json.loads(hebcal_json_input)\n\n if \"error\" in hebcal_decoded:\n self._state = hebcal_decoded[\"error\"]\n _LOGGER.error(hebcal_decoded[\"error\"])\n else:\n for item in hebcal_decoded[\"items\"]:\n if item[\"category\"] == \"candles\":\n ret_date = datetime.datetime.strptime(\n item[\"date\"][0:-6].replace(\"T\", \" \"),\n \"%Y-%m-%d %H:%M:%S\",\n )\n if ret_date.date() == friday:\n self._shabbat_start = ret_date\n elif item[\"category\"] == \"havdalah\":\n ret_date = datetime.datetime.strptime(\n item[\"date\"][0:-6].replace(\"T\", \" \"),\n \"%Y-%m-%d %H:%M:%S\",\n )\n if ret_date.date() == saturday:\n self._shabbat_end = ret_date\n\n self._state = \"Updated\"",
"def time_automation_listener(now):\n hass.async_add_job(action, {\n 'trigger': {\n 'platform': 'time',\n 'now': now,\n },\n })",
"def update(self) -> None:\n new_values = self.worksheet.get_values()\n self.local_sheet.rebuild(new_values)",
"def update_schedule(self, time=None, for_object=None):\n _update_schedule([self], time, for_object)",
"def _update_schedule(self, interval):\n while True:\n if Schedule().update_current_week():\n self._view_schedule()\n print 'Yes'\n time.sleep(interval)",
"def do_rrt(self, arg):\n self.do_timesheet('report extend track today')"
] | [
"0.62373966",
"0.5995113",
"0.5968316",
"0.5938137",
"0.5781066",
"0.57411987",
"0.56810856",
"0.5629616",
"0.5542387",
"0.54968566",
"0.54915357",
"0.54783255",
"0.547548",
"0.54681015",
"0.54673827",
"0.54571974",
"0.5454602",
"0.5431281",
"0.54092795",
"0.53934515",
"0.53598493",
"0.5342674",
"0.5336242",
"0.5324056",
"0.5323502",
"0.5318583",
"0.52980906",
"0.52972716",
"0.5274724",
"0.5263983"
] | 0.7854881 | 0 |
Update the timesheet for a week. Look at 'help timesheet' for details. | def do_upw(self, arg):
self.do_timesheet('update week') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_upm(self, arg):\n self.do_timesheet('update week')",
"def update_week(sched, year, stype, week):\n games = week_schedule(year, stype, week)\n if not games:\n return False\n\n for game in games:\n sched[game['eid']] = game\n\n return True",
"def do_rw(self, arg):\n self.do_timesheet('report week')",
"def do_upt(self, arg):\n self.do_timesheet('update today')",
"def next_week(self, table):\n if (\"week\" + str(self.week + 1)) not in self.t.timeline:\n self.t.add_week()\n self.week += 1\n self.clear_frame(table)\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table)",
"def work_refresh(self):\n now = dt.now()\n self.eisenhower_priority()\n p_week = now.isocalendar()[1] - self.work_datetime.isocalendar()[1]\n\n if (1 <= p_week) and (self.priority not in [1, 2]):\n self.time_ntf = now\n else:\n pass",
"def weekly():",
"def week(update: Update, _: CallbackContext) -> None:\n running_total, average_dose_per_day = return_weekly_figure()\n text = \\\n (\n \"\\n📅 *Rolling 7 Day Stats*\\n\" \n + \"\\n\\t\\t\\t📈 Rolling 7 Day Doses - \" + str('{:,}'.format(running_total))\n + \"\\n\\t\\t\\t💉 Average Daily Doses - \" + str('{:,}'.format(average_dose_per_day)) \n )\n update.message.reply_markdown(text)\n logger.info(\"Getting week update for \" + str(update.message.chat_id))",
"def week(self, week):\n\n self._week = week",
"def do_rrw(self, arg):\n self.do_timesheet('report extend track week')",
"def lessons_week(self, bot, update, group_name):\n week_number = self.week()\n bot.send_message(update.message.chat_id,\n text='`{}`\\n'.format(group_name) + self.timetable.lessons_week(group_name, week_number),\n parse_mode='Markdown')",
"def update_timesheet(item):\n\tj=json.loads(item)\n\tprint(\"-----------------------garffff---------------------\")\n\tnew_employee=None;\n\ttimesheet=frappe.get_doc(\"Time Sheet\",j[\"name\"])\n\tjarray=[]\n\tfor passed_employee in j['employees']:\n\t\tif 'new' in passed_employee.keys():\n\t\t\t#create employee\n\t\t\tnew_employee=frappe.get_doc({\n\t\t\t\t\"doctype\":\"employee_link_with_time\",\n\t\t\t\t\"employee\":passed_employee['employee']\n\t\t\t});\n\n\t\tjarray.append(passed_employee['employee']);\n\t\tfor employee in timesheet.employees:\n\t\t\tif passed_employee[\"employee\"]==employee.employee:\n\t\t\t\tif \"start\" in passed_employee:\n\t\t\t\t\temployee.start=passed_employee[\"start\"]\n\t\t\t\tif \"end\" in passed_employee:\n\t\t\t\t\temployee.end=passed_employee[\"end\"];\n\tforRemove=[]\n\tfor employee_container in timesheet.employees:\n\t\tif employee_container.employee not in jarray:\n\t\t\tforRemove.append(employee_container)\n\tprint(\"___________REMOVE______________\")\n\tprint(forRemove);\n\tif forRemove:\n\t\tfor remove in forRemove:\n\t\t\ttimesheet.employees.remove(remove)\n\n\tif new_employee is not None:\n\t\ttimesheet.append(\"employees\",new_employee)\n\n\t#handel status\n\ttimesheet.status=j[\"status\"]\n\ttimesheet.save()\n\treturn frappe.get_doc(\"Time Sheet\",j[\"name\"])",
"async def async_update(self):\n # Default is no workday\n self._state = False\n\n # Get ISO day of the week (1 = Monday, 7 = Sunday)\n date = get_date(dt.now()) + timedelta(days=self._days_offset)\n day = date.isoweekday() - 1\n day_of_week = day_to_string(day)\n\n if self.is_include(day_of_week, date):\n self._state = True\n\n if self.is_exclude(day_of_week, date):\n self._state = False",
"def reporting_week(self):\n\n print(\"Week Numbers:\")\n print(self.time_stamp)\n print(self.time_stamp_iso)\n print(\"Current = {}\".format(self.current_week()))\n print(\"Reporting = {}\".format(self.current_week() - 1))",
"def last_week(self, table):\n if self.week == 1:\n return\n self.week -= 1\n self.clear_frame(table)\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table)",
"def full_weeks(self, bot, update, group_name):\n week_number = self.week()\n bot.send_message(update.message.chat_id,\n text='`{}`\\n'.format(group_name) + self.timetable.lessons_week(group_name, week_number),\n parse_mode='Markdown')\n week_number.next()\n bot.send_message(update.message.chat_id,\n text=self.timetable.lessons_week(group_name, week_number),\n parse_mode='Markdown')",
"def _update_schedule(self, interval):\n while True:\n if Schedule().update_current_week():\n self._view_schedule()\n print 'Yes'\n time.sleep(interval)",
"def update_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n try:\n started, finished = helpers.parse_date_parameters(args[1:])\n except ValueError as error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track()\n if track:\n started = track['started']\n else:\n started = finished\n # Get timesheet records\n tracks = self.db.get_tracks_by_date(started, finished,\n also_unfinished=False)\n # Exposure tracks to the table\n tracks_contents = self.create_tracks_contents(tracks)\n lnum = 0\n header = self.get_timesheet_header(started, finished)\n header_length = len(header.split(os.linesep))\n while(True):\n try:\n # Create the editor's contents\n contents = self.create_timesheet_contents(header, tracks_contents)\n timesheet = self.open_external_editor(contents, lnum)\n # we must get the table header here due to the length of the columns\n table_header = timesheet[header_length-1:header_length+1]\n tracks = timesheet[header_length+1:]\n except OSError, message:\n print(\"*** Error: %s\", message)\n return\n # Parse the input\n try:\n data = self.parse_timesheet(tracks, header_length)\n except errors.ParsingError as error:\n print(error.msg)\n print(\"Would you like to update the timesheet again? [Y/n] \")\n if not helpers.get_yes_no(default='y'):\n return\n table_header.extend(tracks)\n tracks_contents = \"\".join(table_header)\n lnum = error.lnum\n continue\n break\n # Update the DB\n # TODO: get rid the danger operation\n self.db.delete_tracks_by_date(started=started, finished=finished)\n data.sort(key=operator.itemgetter('started'))\n for track in data:\n self.db.create_track(track['tid'],\n track['started'], track['finished'],\n int(not bool(track['is_billed'])))\n print('The timesheet has been updated.')",
"def update_weekly_total(areacode=AREACODE,areaname=AREA):\n start,stop=model_calcs.RANGE_WEEK\n log.debug(f'Processing {areaname}')\n for week in range(start,stop+1):\n end_day=ons_week.week(week)\n \n week_total=weekly_total(end_day,areacode=areacode,areaname=areaname)\n #print(f'{areaname}: Weektotal for week number {week} ending {end_day}: {week_total}')\n \n if week_total is not None:\n try:\n stored,created=CovidWeek.objects.get_or_create(areacode=areacode,week=week)\n #print(stored.weeklycases)\n if stored.weeklycases != week_total:\n log.debug(f'{areaname}: updating week {week} from {stored.weeklycases} to {week_total}')\n stored.weeklycases=week_total\n stored.areaname=areaname\n stored.save()\n if created:\n stored.nation=ons_week.nation[areacode]\n stored.areaname=areaname\n log.debug(f'Created new entry for week {week} for {areaname}')\n stored.week=week\n stored.save()\n except Exception as e:\n log.error(e)\n log.error(f'No data stored for {areaname} week {week}')\n else:\n log.error(f'Bypassing {areaname} - no data')",
"def period_week_at(self, at_time=\"00:00:00\", week_day=\"Monday\"):\n self.period_at(unit=\"week\", at_time=at_time, week_day=week_day)\n\n return self",
"def write_daily_hours(daily_hours, sheet_obj, wb_obj):\n # Load the workbook for the progress tracking\n if daily_hours > 0:\n workbook_path = get_workbook_path()\n sheet = sheet_obj\n wb = wb_obj\n # getting the date for today\n datetime_obj = datetime.datetime.now()\n today_formatted = datetime_obj.strftime('%d.%m.%Y')\n # Fill in the progress made, if date already exists, hours are added, else new line with date + hours created\n new_daily_hours_cell = sheet.cell(row=sheet.max_row, column=2)\n if sheet.cell(row=sheet.max_row, column=1).value == today_formatted:\n hours = new_daily_hours_cell.value\n new_daily_hours_cell.value = daily_hours + float(hours)\n else:\n sheet.cell(row=(sheet.max_row + 1), column=1).value = today_formatted\n new_daily_hours_cell = sheet.cell(row=sheet.max_row, column=2)\n new_daily_hours_cell.value = daily_hours\n wb.save(workbook_path)",
"def set_schedule(self, day, week, schedule):\n self.schedule['schedule'][day][week] = schedule",
"def update_worksheet(data, worksheet):\n print(f'Updating {worksheet} worksheet...\\n')\n worksheet_to_be_updated = SHEET.worksheet(worksheet)\n worksheet_to_be_updated.append_row(data)\n print(f'{worksheet} worksheet updated successfully!...\\n')",
"def period_week_at(self, at_time=\"00:00:00\", week_day=\"Monday\"):\n for task in self._tasks:\n task.period_week_at(at_time=at_time, week_day=week_day)\n\n return self",
"def do_up(self, arg):\n self.do_timesheet('update %s' % arg)",
"def day_of_week(dt):\n cday = dt\n mday = 2\n uday = cday.isocalendar()[2] + mday\n try:\n if uday > 7:\n CURRDAY = uday - 7\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week>7 : \", CURRDAY)\n else:\n CURRDAY = uday\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week : \", CURRDAY)\n return CURRDAY\n except Exception as e:\n log.exception(\"1;EME;FAILURE;700;SCHEDULE ERROR \" + str(e), exc_info=False)\n sys.exit(0)",
"def update(self) -> None:\n new_values = self.worksheet.get_values()\n self.local_sheet.rebuild(new_values)",
"def update_gsheet(session, sheet, player_name, chosen_set):\n ws = sheet.worksheet(session.name)\n ws.update_cell(session.pick_num + 1, 4, chosen_set)\n player_col = 8 + name_to_pindex(session, player_name)\n pick_row = 1 + session.round_num\n ws.update_cell(pick_row, player_col, chosen_set)",
"def _set_date_weekly(self):\n dt_weekday = dt.now()\n try:\n dt_weekday = self._get_datetime_or_error()\n except ValueError:\n self._dt_string = \"\"\n raise InvalidDateError(detail={\n \"message\": \"Invalid Date Provided\",\n \"period\": self.period.value,\n \"date\": self._given_date\n })\n week_start = dt_weekday - timedelta(days=dt_weekday.weekday())\n self.date['year'] = week_start.year\n self.date['month'] = week_start.month\n self.date['day'] = week_start.day",
"def lessons_next_week(self, bot, update, group_name):\n week_number = self.week()\n week_number.next()\n\n bot.send_message(update.message.chat_id,\n text='`{}`\\n'.format(group_name) + self.timetable.lessons_week(group_name, week_number),\n parse_mode='Markdown')"
] | [
"0.75189185",
"0.6730039",
"0.6707246",
"0.6574584",
"0.63909006",
"0.636376",
"0.6340887",
"0.6294754",
"0.61888754",
"0.6179247",
"0.6088131",
"0.6087225",
"0.6061335",
"0.60387963",
"0.60356843",
"0.5992439",
"0.59173197",
"0.59076345",
"0.57140833",
"0.5708552",
"0.56935287",
"0.5685043",
"0.5660801",
"0.5651252",
"0.5624221",
"0.55900645",
"0.5589487",
"0.5586264",
"0.55656946",
"0.5533454"
] | 0.78006864 | 0 |
Update the timesheet for a month. Look at 'help timesheet' for details. | def do_upm(self, arg):
self.do_timesheet('update week') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_upt(self, arg):\n self.do_timesheet('update today')",
"def setMonth(self, *args):\n return _libsbml.Date_setMonth(self, *args)",
"def update_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n try:\n started, finished = helpers.parse_date_parameters(args[1:])\n except ValueError as error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track()\n if track:\n started = track['started']\n else:\n started = finished\n # Get timesheet records\n tracks = self.db.get_tracks_by_date(started, finished,\n also_unfinished=False)\n # Exposure tracks to the table\n tracks_contents = self.create_tracks_contents(tracks)\n lnum = 0\n header = self.get_timesheet_header(started, finished)\n header_length = len(header.split(os.linesep))\n while(True):\n try:\n # Create the editor's contents\n contents = self.create_timesheet_contents(header, tracks_contents)\n timesheet = self.open_external_editor(contents, lnum)\n # we must get the table header here due to the length of the columns\n table_header = timesheet[header_length-1:header_length+1]\n tracks = timesheet[header_length+1:]\n except OSError, message:\n print(\"*** Error: %s\", message)\n return\n # Parse the input\n try:\n data = self.parse_timesheet(tracks, header_length)\n except errors.ParsingError as error:\n print(error.msg)\n print(\"Would you like to update the timesheet again? [Y/n] \")\n if not helpers.get_yes_no(default='y'):\n return\n table_header.extend(tracks)\n tracks_contents = \"\".join(table_header)\n lnum = error.lnum\n continue\n break\n # Update the DB\n # TODO: get rid the danger operation\n self.db.delete_tracks_by_date(started=started, finished=finished)\n data.sort(key=operator.itemgetter('started'))\n for track in data:\n self.db.create_track(track['tid'],\n track['started'], track['finished'],\n int(not bool(track['is_billed'])))\n print('The timesheet has been updated.')",
"def do_rm(self, arg):\n self.do_timesheet('report month')",
"def do_timesheet(self, arg):\n\n def _usage():\n self.do_help('timesheet')\n commands = ['update', 'report']\n words = shlex.split(arg)\n words = [token.lower() for token in words]\n if not len(words) or words[0] not in commands:\n print(self.error_wrong_parameters)\n return\n if words[0] == 'update':\n self.update_timesheet(words)\n elif words[0] == 'report':\n self.report_timesheet(words)\n return",
"def set_finish_month(self, month):\n return self.form.set_value(\"output period \\\"month to\\\"\", MONTHS[month - 1])",
"def new_month(self, month: int, year: int, bill: Bill) -> None:\n self.bill = bill\n self.bill.set_rates(\"MTM\", MTM_MINS_COST)\n self.bill.add_fixed_cost(MTM_MONTHLY_FEE)",
"def _set_month(self, month) -> bool:\n if self.set_start_month(month) is False:\n return False\n return self.set_finish_month(month)",
"def setIndexMonth(self,index):\n self.indexMonth = index",
"def calendarPageChanged(self, year, month):\n success = self.porker_thread.extendDates(datetime.date(year, month, 1))\n #if not success:\n # self.alertMessage(\"Failure!\",\"Unable to extend the thread's dates for some reason.\")\n #efficiency = self.porker_thread.getEfficiencyFor(self.getActiveDate())\n #self.porker_thread.sentDatesData = False",
"def new_month(self, month: int, year: int, bill: Bill) -> None:\n self.bill = bill\n self.bill.set_rates(\"TERM\", TERM_MINS_COST)\n self.bill.add_fixed_cost(TERM_MONTHLY_FEE)\n if not ((self.end.month < month and self.end.year <= year) or\n self.end.year < year):\n # refresh included minutes and SMSs\n self.bill.add_free_minutes((-1) * self.bill.free_min)\n if self.start.month == month and self.start.year == year:\n # if first month, add term deposit to bill.\n self.bill.add_fixed_cost(TERM_DEPOSIT)\n else:\n self._carried_term = True",
"def update_timesheet(item):\n\tj=json.loads(item)\n\tprint(\"-----------------------garffff---------------------\")\n\tnew_employee=None;\n\ttimesheet=frappe.get_doc(\"Time Sheet\",j[\"name\"])\n\tjarray=[]\n\tfor passed_employee in j['employees']:\n\t\tif 'new' in passed_employee.keys():\n\t\t\t#create employee\n\t\t\tnew_employee=frappe.get_doc({\n\t\t\t\t\"doctype\":\"employee_link_with_time\",\n\t\t\t\t\"employee\":passed_employee['employee']\n\t\t\t});\n\n\t\tjarray.append(passed_employee['employee']);\n\t\tfor employee in timesheet.employees:\n\t\t\tif passed_employee[\"employee\"]==employee.employee:\n\t\t\t\tif \"start\" in passed_employee:\n\t\t\t\t\temployee.start=passed_employee[\"start\"]\n\t\t\t\tif \"end\" in passed_employee:\n\t\t\t\t\temployee.end=passed_employee[\"end\"];\n\tforRemove=[]\n\tfor employee_container in timesheet.employees:\n\t\tif employee_container.employee not in jarray:\n\t\t\tforRemove.append(employee_container)\n\tprint(\"___________REMOVE______________\")\n\tprint(forRemove);\n\tif forRemove:\n\t\tfor remove in forRemove:\n\t\t\ttimesheet.employees.remove(remove)\n\n\tif new_employee is not None:\n\t\ttimesheet.append(\"employees\",new_employee)\n\n\t#handel status\n\ttimesheet.status=j[\"status\"]\n\ttimesheet.save()\n\treturn frappe.get_doc(\"Time Sheet\",j[\"name\"])",
"def month(self, month):\n\n self._month = month",
"def _next_month(self):\r\n self._canvas.place_forget()\r\n\r\n year, month = self._date.year, self._date.month\r\n self._date = self._date + self.timedelta(\r\n days=calendar.monthrange(year, month)[1] + 1)\r\n self._date = self.datetime(self._date.year, self._date.month, 1)\r\n self._build_calendar() # reconstruct calendar\r",
"def every_month(self, time, function, args=None, kwargs=None, name=None):\n if args is None:\n args = list()\n if kwargs is None:\n kwargs = dict()\n if name is None:\n name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')\n self.config[name] = {'mode':'every_month', 'time':time, 'function':function, 'args':args, \n 'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),\n 'time_init':datetime.datetime.now()}\n self.params.tracker_dict[name] = dict()",
"def changeDisplayedMonth(self):\n #ho bisogno di sapere qual è il mese mostrato\n currentMonth = self.indexMonth\n currentYear = self.currentYear\n\n sender = self.sender().objectName()\n if sender == 'bot_next':\n # if currentMonth < 11:\n if self.indexMonth < 11:\n self.indexMonth += 1\n self.setBaseDate(self.baseDate.addMonths(1))\n else:\n self.indexMonth = 0\n self.setCurrentYear(currentYear+1)\n # print('baseDate before', self.baseDate)\n self.setBaseDate(self.baseDate.addMonths(1))\n # print('baseDate after', self.baseDate)\n # print('new Year: ', self.currentYear)\n\n elif sender == 'bot_prev':\n # if currentMonth > 0:\n if self.indexMonth > 0:\n self.indexMonth -= 1\n self.setBaseDate(self.baseDate.addMonths(-1))\n else:\n self.indexMonth = 11\n self.setCurrentYear(currentYear-1)\n self.setBaseDate(self.baseDate.addMonths(-1))\n # print('new Year: ', self.currentYear)\n if currentMonth != self.indexMonth:\n # print(f'currentPageChanged.emit({self.indexMonth})')\n self.currentPageChanged.emit(self.indexMonth)\n self.combo_mesi.setCurrentIndex(self.indexMonth)\n if currentYear != self.currentYear:\n # print('current year changed')\n self.setListaGiorniDellAnno(self.createDates(self.baseDate), self.indexMonth)",
"def _next_month(self):\n self._canvas.place_forget()\n\n year, month = self._date.year, self._date.month\n self._date = self._date + self.timedelta(\n days=calendar.monthrange(year, month)[1] + 1)\n self._date = self.datetime(self._date.year, self._date.month, 1)\n self._build_calendar() # reconstruct calendar",
"def new_month(self, month: int, year: int, bill: Bill) -> None:\n raise NotImplementedError",
"def do_time_travel(self, args):\n for bank in known_banks():\n bank.reset_monthly_spending_caps()\n\n print(\"Successfully travelled one month into the future.\\n\")",
"def set_month(self, month):\n # if the sting value of month is correct then we transform it into an\n # integer\n if isinstance(month, str):\n if month in MONTH_STR:\n month_int = MONTH_STR.index(month) + 1\n else:\n raise ValueError(\"Weekday as a string can only take the value {}\".format(MONTH_STR))\n else:\n month_int = month\n\n # Check if month_int in good range\n if month_int not in range(1, 13):\n raise ValueError(\"Month value must be in range [1..12] but is {}\".format(month_int))\n\n # First we separate the tens and the digit\n tens, digit = divmod(int(month_int), 10)\n\n # Then we add them in a single int\n reg_value = (tens << 4) | digit\n\n # The we add it to the register\n self.__write_register(_REGISTER_MONTH, reg_value)",
"def test_reportperiod_updatetimesheet_save_only_set(self):\n date = self.reporting_period.start_date.strftime('%Y-%m-%d')\n response = self.app.post(\n reverse(\n 'reportingperiod:UpdateTimesheet',\n kwargs={'reporting_period': date}\n ),\n {\n 'save_only': '1',\n 'timecardobject_set-TOTAL_FORMS': '1',\n 'timecardobject_set-INITIAL_FORMS': '0',\n 'timecardobject_set-MIN_NUM_FORMS': '0',\n 'timecardobject_set-MAX_NUM_FORMS': '1000',\n 'timecardobject_set-0-project': '4',\n 'timecardobject_set-0-hours_spent': '',\n },\n headers={'X_FORWARDED_EMAIL': self.regular_user.email},\n )\n formset = response.context['formset']\n self.assertTrue(formset.save_only)",
"def monthly_schedule(self,month):\n response = requests.get(f'http://company.com/{self.lname}/{month}')\n if response.ok:\n return response.text\n else:\n return 'Bad Response!'",
"def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True",
"def set_Month(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Month', value)",
"def new_month(self, month: int, year: int, bill: Bill) -> None:\n self.bill = bill\n self.bill.set_rates(\"PREPAID\", PREPAID_MINS_COST)\n if self.balance > (-10.0):\n self.balance += (-25.0)\n self.bill.add_fixed_cost(self.balance)",
"def test_reportperiod_updatetimesheet_self(self):\n date = self.reporting_period.start_date.strftime('%Y-%m-%d')\n response = self.app.get(\n reverse(\n 'reportingperiod:UpdateTimesheet',\n kwargs={'reporting_period': date}\n ),\n headers={'X_FORWARDED_EMAIL': self.regular_user.email},\n )\n self.assertEqual(response.status_code, 200)",
"def set_month(self, month):\r\n\t\tmonths = ['Enero', 'Febrero', 'Marzo', 'Abril',\r\n\t\t\t\t 'Mayo', 'Junio', 'Julio', 'Agosto'\r\n\t\t\t\t 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']\r\n\t\tfor i in range(12):\r\n\t\t\tif month == i: \r\n\t\t\t\treturn months[i-1]",
"def refresh_calendar():\n manage.refresh_calendar()",
"def click_next_month(self):\n self.action.click(self.calendar_next)\n time.sleep(3)",
"def add_month(cab_data):\n return cab_data.assign(month=lambda x: x.time.dt.month)"
] | [
"0.62956774",
"0.5872806",
"0.5851527",
"0.57449275",
"0.5719809",
"0.5693252",
"0.5651246",
"0.56409156",
"0.56211275",
"0.56069833",
"0.5585783",
"0.5505197",
"0.5472943",
"0.54604954",
"0.54581743",
"0.5444931",
"0.544139",
"0.5417249",
"0.54065466",
"0.53555846",
"0.5322098",
"0.53163",
"0.5157692",
"0.5142458",
"0.5123824",
"0.51214653",
"0.51142925",
"0.5111652",
"0.5104535",
"0.5103221"
] | 0.60178006 | 1 |
Report the timesheet for today. | def do_rt(self, arg):
self.do_timesheet('report today') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_upt(self, arg):\n self.do_timesheet('update today')",
"def do_rrt(self, arg):\n self.do_timesheet('report extend track today')",
"def show_today_tasks(self):\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline == today.strftime('%Y-%m-%d')).all()\n print(f'Today {today.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()",
"def today(self):\r\n return RecordsToday(self)",
"def set_datetime_today(self):\n self.datetime_today = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')",
"def today(self):\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._update_time()\n return self._today",
"def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks",
"def today():\n return datetime.today()",
"def today(self):\n return [t for t in self.tasks if t.date == datetime.date.today()]",
"def every_day():\n logger.info('[ EVERY_DAY ] [ %s ]' % str(datetime.now().time()))",
"def today(cls):\n t = _time.time()\n return cls.fromtimestamp(t)",
"def today():\n return date.today()",
"def today():\n this_cal = Kalendar()\n to_display = \"TODAY:<BR><BR>\"\n\n elements = this_cal.get_all_day_elements(datetime.datetime.now())\n for element in elements:\n for key, values in element.items():\n to_display += key + \":<BR>\"\n for val in values:\n to_display += \" \" + val + \"<BR>\"\n\n return to_display",
"def report_timesheet(self, args):\n if len(args) == 1:\n print(self.error_wrong_parameters)\n return\n # Shift 'report' keyword\n args = args[1:]\n pname = tname = ''\n mask = TS_GROUP_BY['date'] | TS_GROUP_BY['task']\n # Get report parameters\n try:\n tname, pname, started, finished, mask = \\\n self.get_report_parameters(args, default_mask=mask)\n except ValueError, error:\n print(error)\n return\n if started == datetime.date.fromtimestamp(0):\n track = self.db.get_minimal_started_track(tname, pname)\n if not track:\n print(\"There are no tracks have been found.\")\n return\n started = track['started']\n # Check if there is an unfinished task\n task = self.db.get_active_task(started, finished, tname, pname)\n if task:\n print(u\"Warning: There is an unfinished task '{task}#{project}' \"\n \"in the period from '{started}' to '{finished}'.{eol}\"\n \"The unfinished record will be ignored.{eol}\"\n \"Proceed creating the report? [Y/n] \"\n \"\".format(task=task['tname'], project=task['pname'],\n started=datetime.date.strftime(\n started, \"%x\").decode('utf8'),\n finished=datetime.date.strftime(\n finished, \"%x\").decode('utf8'),\n eol=os.linesep), end='')\n if not helpers.get_yes_no(default='y'):\n return\n # Make a report\n self.make_report(tname, pname, started, finished, mask)",
"def todayDate(self):\n return time.strftime(\"%m/%d/%Y\", time.localtime())",
"def get_today():\n return datetime.today()",
"def test_today(self):\n self.assertEquals(\n self.builder._today(), date.today().strftime('%Y-%m-%d'))",
"def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True",
"def check_today(self):\n import time\n _time = time.time\n time.time = lambda: 1003539807.89\n try:\n assert Date(\"today\") == Date(\"10/19/2001\"), \"wrong date\"\n finally:\n time.time = _time",
"def timesheet(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet.html'\r\n )",
"def TODAY():\n return datetime.date.today()",
"def time_to_generate_monthly_report(today):\n # We will make three attempts to generate the monthly report every month\n return today.day in (1, 2, 3)",
"def get_date_hour_today() -> str:\n return datetime.now().strftime(\"%Y-%m-%dT%H\")",
"def logDayDetails(self):\n console().info(\"Today is {}.\".format(self.today.strftime(DATE_FMT)))\n hours = self.contractDetails.tradingHours.split(\";\")[0].split(\":\")[1]\n console().info(\"Today's Trading Hours Are: {}\".format(hours))\n if self.normalDay:\n console().info(\"Today is a Valid Day for Trading\")\n else:\n console().info(\"Today is not a Valid Trading Day. Sleeping Until Tomorrow\")",
"def timesheet_all(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet_all.html'\r\n )",
"def today(cls):\n return date()",
"def _today() -> datetime.date:\n return datetime.today().date()",
"def do_rw(self, arg):\n self.do_timesheet('report week')",
"def today(self) -> bool:\n return self._algorithm.can_study_now(self._stat)",
"def get_today_date():\n return date.today()"
] | [
"0.6755711",
"0.6618635",
"0.6351735",
"0.61901796",
"0.61826795",
"0.6099821",
"0.59579736",
"0.59521276",
"0.59359",
"0.59144366",
"0.585397",
"0.5832119",
"0.58296067",
"0.5788071",
"0.5755071",
"0.56960785",
"0.56843895",
"0.5677893",
"0.5671634",
"0.5670418",
"0.5661753",
"0.5646851",
"0.56259257",
"0.5612956",
"0.5604969",
"0.55848247",
"0.55586535",
"0.5551767",
"0.54977435",
"0.5497534"
] | 0.73340017 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.