query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Visualise colorspace vector as an interactive 3D figure. Colorspace can have extra columns By default RGB channels are clipped to the range [0,1]. Extra arguments can be used to control the appearance of ipyvolume.scatter
def show_colorspace(cspace: np.array, clip=True, size = 0.5, marker='sphere', **kwargs) -> None: assert isinstance(cspace, DataFrame), "Colorspace must be a dataframe" assert all(np.isin(['R', 'G', 'B'], cspace.columns)), "Colorspace must contain RGB columns" fig = ipv.figure() if clip: ipv.scatter(cspace.loc[:, 'R'].values, cspace.loc[:, 'G'].values, cspace.loc[:, 'B'].values, color=np.clip(cspace[['R', 'G', 'B']].values, 0, 1), s=size, marker=marker, *kwargs) else: ipv.scatter(cspace.loc[:, 'R'].values, cspace.loc[:, 'G'].values, cspace.loc[:, 'B'].values, color=cspace[['R', 'G', 'B']].values, s=size, marker=marker, *kwargs) ipv.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drawColorColumn(x, yseq, zseq):\n dislin.curvy3(x, yseq, zseq, len(yseq))", "def plot3D(x):\n cycol = cycle('bgrcmk')\n fig = plt.figure()\n ax = Axes3D(fig)\n for i in range(5):\n ax.scatter(x[:, i, 0], x[:, i, 1], x[:, i, 2], c=next(cycol),\n marker='.')\n plt.show()", "def vector_plot(tvects,is_vect=True,orig=[0,0,0]):\n\n if is_vect:\n if not hasattr(orig[0],\"__iter__\"):\n coords = [[orig,np.sum([orig,v],axis=0)] for v in tvects]\n else:\n coords = [[o,np.sum([o,v],axis=0)] for o,v in zip(orig,tvects)]\n else:\n coords = tvects\n\n data = []\n for i,c in enumerate(coords):\n X1, Y1, Z1 = zip(c[0])\n X2, Y2, Z2 = zip(c[1])\n vector = go.Scatter3d(x = [X1[0],X2[0]],\n y = [Y1[0],Y2[0]],\n z = [Z1[0],Z2[0]],\n marker = dict(size = [0,5],\n color = ['blue'],\n line=dict(width=5,\n color='DarkSlateGrey')),\n name = 'Vector'+str(i+1))\n data.append(vector)\n\n layout = go.Layout(\n margin = dict(l = 4,\n r = 4,\n b = 4,\n t = 4)\n )\n fig = go.Figure(data=data,layout=layout)\n #pio.write_html(fig,file='index.html',auto_open=False)\n #py.plot(fig, filename = 'gdp_per_cap4', auto_open=True)\n fig.show()", "def slice_explorer(data, cmap='gray'):\n data_len = len(data)\n\n @interact(plane=(0, data_len-1), continuous_update=False)\n def display_slice(plane=data_len/2):\n fig, axis = plt.subplots(figsize=(20, 7))\n axis_3d = fig.add_subplot(133, projection='3d')\n show_plane(axis, data[plane], title='Plane {}'.format(plane), cmap=cmap)\n slice_in_3d(axis=axis_3d, shape=data.shape, plane=plane)\n plt.show()\n\n return display_slice", "def full_3d(self, quantity):\n # The data just tells you what integer grid point you are on. Not what actual x,y coordinate you\n # are at\n x = np.arange(0, self.period, self.dx)\n y = np.arange(0, self.period, self.dy)\n z = np.arange(0, self.height + self.dz, self.dz)\n points = np.array(list(itertools.product(z, x, y)))\n # Get the scalar\n scalar = self.get_scalar_quantity(quantity)\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now plot!\n self.scatter3d(points[:, 1], points[:, 2], points[\n :, 0], scalar.flatten(), labels, 'full_3d')", "def plot_3d(vector_array, save_plot_dir):\n principal_df = pd.DataFrame(data=vector_array, columns=['pc1', 'pc2', 'pc3'])\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n xs = principal_df['pc1']\n ys = principal_df['pc2']\n zs = principal_df['pc3']\n ax.scatter(xs, ys, zs, s=50, alpha=0.6, edgecolors='w')\n\n ax.set_xlabel('pc1')\n ax.set_ylabel('pc2')\n ax.set_zlabel('pc3')\n\n plt.savefig(save_plot_dir + '/3D_scatter.png')\n plt.close()", "def __call__(\n self, *, plot=None, color=plot_util.cp_int[0], multiplier=None, **kwargs\n ):\n if self.region.ndim != 3:\n raise RuntimeError(\"Only 3-dimensional regions can be plotted.\")\n\n if plot is None:\n plot = k3d.plot()\n plot.display()\n\n multiplier = self._setup_multiplier(multiplier)\n\n plot_array = np.ones((1, 1, 1)).astype(np.uint8) # avoid k3d warning\n\n rescaled_region = self.region.scale(1 / multiplier)\n bounds = [\n i\n for sublist in zip(rescaled_region.pmin, rescaled_region.pmax)\n for i in sublist\n ]\n\n plot += k3d.voxels(\n plot_array, color_map=color, bounds=bounds, outlines=False, **kwargs\n )\n\n self._axis_labels(plot, multiplier)", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def visualize_py3js(grid, U, bounding_box=([0, 0], [1, 1]), codim=2, title=None, legend=None,\n separate_colorbars=False, rescale_colorbars=False, columns=2,\n color_map=get_cmap('viridis')):\n assert isinstance(U, VectorArray) \\\n or (isinstance(U, tuple) and all(isinstance(u, VectorArray) for u in U)\n and all(len(u) == len(U[0]) for u in U))\n if isinstance(U, VectorArray):\n size = len(U)\n U = (U.to_numpy().astype(np.float32, copy=False),)\n else:\n size = len(U[0])\n U = tuple(u.to_numpy().astype(np.float32, copy=False) for u in U)\n\n if separate_colorbars:\n if rescale_colorbars:\n vmins = tuple(np.min(u[0]) for u in U)\n vmaxs = tuple(np.max(u[0]) for u in U)\n else:\n vmins = tuple(np.min(u) for u in U)\n vmaxs = tuple(np.max(u) for u in U)\n else:\n if rescale_colorbars:\n vmins = (min(np.min(u[0]) for u in U),) * len(U)\n vmaxs = (max(np.max(u[0]) for u in U),) * len(U)\n else:\n vmins = (min(np.min(u) for u in U),) * len(U)\n vmaxs = (max(np.max(u) for u in U),) * len(U)\n\n return ThreeJSPlot(grid, color_map, title, bounding_box, codim, U, vmins, vmaxs, separate_colorbars, size)", "def show_3d(ax, pixels, colors, axis_labels):\n scale = 255\n # Set axis limits\n ax.set_xlim(*(0,255/scale))\n ax.set_ylim(*(0,255/scale))\n ax.set_zlim(*(0,255/scale))\n # Set axis labels and sizes\n axis_labels=list(axis_labels)\n #ax.tick_params(axis='both', which='major', labelsize=14, pad=8)\n ax.set_xlabel(axis_labels[0], fontsize=13, labelpad=16)\n ax.set_xticks([0,0.5,1.0])\n ax.set_ylabel(axis_labels[1], fontsize=13, labelpad=16)\n ax.set_yticks([0,0.5,1.0])\n ax.set_zlabel(axis_labels[2], fontsize=13, labelpad=16)\n ax.set_zticks([0,0.5,1.0])\n # Plot pixel values with colors given in colors_rgb\n ax.scatter(pixels[:, :, 0].ravel(),\n pixels[:, :, 1].ravel(),\n pixels[:, :, 2].ravel(),\n c=colors.reshape((-1, 3)), edgecolors='none')\n return ax", "def scplot3D(x, y, z, c, s, sf=4, ax=None, clim=None):\n if clim is None:\n vmin, vmax = get_equal_vmin_vmax(c)\n else:\n vmin, vmax = clim\n ax.set_facecolor('0.8')\n kwargs = {'edgecolor':'k', 'lw':'0.2', 'cmap':'RdBu', 'vmin':vmin, 'vmax':vmax}\n #kwargs = {'cmap':'RdBu', 'vmin':vmin, 'vmax':vmax}\n sc = ax.scatter(x, y, z, c=c, s=s*sf, depthshade=True, **kwargs)\n cbar = pltlib.add_cbar(ax, sc, label='Mass balance (m we/yr)')\n leg = add_legend(ax, sf=sf, loc='lower left')\n ax.minorticks_on()\n #ax.tick_params(left=True, right=True, bottom=True, top=True)\n return ax", "def get_3d_plot(three_d_matrix, ax, title, length):\r\n x, y, z = np.where(three_d_matrix != 0)\r\n ax.scatter(x, y, z, c='blue')\r\n ax.set_xlabel('x')\r\n ax.set_ylabel('y')\r\n ax.set_xlim(0, length)\r\n ax.set_ylim(0, length)\r\n ax.set_title(title)", "def plot2Ddata(\n xyz,\n data,\n vec=False,\n nx=100,\n ny=100,\n ax=None,\n mask=None,\n level=False,\n figname=None,\n ncontour=10,\n dataloc=False,\n contourOpts={},\n levelOpts={},\n streamplotOpts={},\n scale=\"linear\",\n clim=None,\n method=\"linear\",\n shade=False,\n shade_ncontour=100,\n shade_azimuth=-45.0,\n shade_angle_altitude=45.0,\n shadeOpts={},\n):\n\n # Error checking and set vmin, vmax\n vlimits = [None, None]\n\n if clim is not None:\n vlimits = [np.min(clim), np.max(clim)]\n\n for i, key in enumerate([\"vmin\", \"vmax\"]):\n if key in contourOpts.keys():\n if vlimits[i] is None:\n vlimits[i] = contourOpts.pop(key)\n else:\n if not np.isclose(contourOpts[key], vlimits[i]):\n raise Exception(\n \"The values provided in the colorbar limit, clim {} \"\n \"does not match the value of {} provided in the \"\n \"contourOpts: {}. Only one value should be provided or \"\n \"the two values must be equal.\".format(\n vlimits[i], key, contourOpts[key]\n )\n )\n contourOpts.pop(key)\n vmin, vmax = vlimits[0], vlimits[1]\n\n # create a figure if it doesn't exist\n if ax is None:\n fig = plt.figure()\n ax = plt.subplot(111)\n\n # interpolate data to grid locations\n xmin, xmax = xyz[:, 0].min(), xyz[:, 0].max()\n ymin, ymax = xyz[:, 1].min(), xyz[:, 1].max()\n x = np.linspace(xmin, xmax, nx)\n y = np.linspace(ymin, ymax, ny)\n X, Y = np.meshgrid(x, y)\n xy = np.c_[X.flatten(), Y.flatten()]\n\n if vec is False:\n if method == \"nearest\":\n F = NearestNDInterpolator(xyz[:, :2], data)\n else:\n F = LinearNDInterpolator(xyz[:, :2], data)\n DATA = F(xy)\n DATA = DATA.reshape(X.shape)\n\n # Levels definitions\n dataselection = np.logical_and(~np.isnan(DATA), np.abs(DATA) != np.inf)\n if scale == \"log\":\n DATA = np.abs(DATA)\n\n # set vmin, vmax if they are not already set\n vmin = DATA[dataselection].min() if vmin is None else vmin\n vmax = DATA[dataselection].max() if vmax is None else vmax\n\n if scale == \"log\":\n levels = np.logspace(np.log10(vmin), np.log10(vmax), ncontour + 1)\n norm = colors.LogNorm(vmin=vmin, vmax=vmax)\n else:\n levels = np.linspace(vmin, vmax, ncontour + 1)\n norm = colors.Normalize(vmin=vmin, vmax=vmax)\n\n if mask is not None:\n Fmask = NearestNDInterpolator(xyz[:, :2], mask)\n MASK = Fmask(xy)\n MASK = MASK.reshape(X.shape)\n DATA = np.ma.masked_array(DATA, mask=MASK)\n\n contourOpts = {\"levels\": levels, \"norm\": norm, \"zorder\": 1, **contourOpts}\n cont = ax.contourf(X, Y, DATA, **contourOpts)\n\n if level:\n levelOpts = {\"levels\": levels, \"zorder\": 3, **levelOpts}\n CS = ax.contour(X, Y, DATA, **levelOpts)\n\n else:\n # Assume size of data is (N,2)\n datax = data[:, 0]\n datay = data[:, 1]\n if method == \"nearest\":\n Fx = NearestNDInterpolator(xyz[:, :2], datax)\n Fy = NearestNDInterpolator(xyz[:, :2], datay)\n else:\n Fx = LinearNDInterpolator(xyz[:, :2], datax)\n Fy = LinearNDInterpolator(xyz[:, :2], datay)\n DATAx = Fx(xy)\n DATAy = Fy(xy)\n DATA = np.sqrt(DATAx ** 2 + DATAy ** 2).reshape(X.shape)\n DATAx = DATAx.reshape(X.shape)\n DATAy = DATAy.reshape(X.shape)\n if scale == \"log\":\n DATA = np.abs(DATA)\n\n # Levels definitions\n dataselection = np.logical_and(~np.isnan(DATA), np.abs(DATA) != np.inf)\n\n # set vmin, vmax\n vmin = DATA[dataselection].min() if vmin is None else vmin\n vmax = DATA[dataselection].max() if vmax is None else vmax\n\n if scale == \"log\":\n levels = np.logspace(np.log10(vmin), np.log10(vmax), ncontour + 1)\n norm = colors.LogNorm(vmin=vmin, vmax=vmax)\n else:\n levels = np.linspace(vmin, vmax, ncontour + 1)\n norm = colors.Normalize(vmin=vmin, vmax=vmax)\n\n if mask is not None:\n Fmask = NearestNDInterpolator(xyz[:, :2], mask)\n MASK = Fmask(xy)\n MASK = MASK.reshape(X.shape)\n DATA = np.ma.masked_array(DATA, mask=MASK)\n\n contourOpts = {\"levels\": levels, \"norm\": norm, \"zorder\": 1, **contourOpts}\n cont = ax.contourf(X, Y, DATA, **contourOpts)\n\n streamplotOpts = {\"zorder\": 4, \"color\": \"w\", **streamplotOpts}\n ax.streamplot(X, Y, DATAx, DATAy, **streamplotOpts)\n\n if level:\n levelOpts = {\"levels\": levels, \"zorder\": 3, **levelOpts}\n CS = ax.contour(X, Y, DATA, levels=levels, zorder=3, **levelOpts)\n\n if shade:\n\n def hillshade(array, azimuth, angle_altitude):\n \"\"\"\n coded copied from https://www.neonscience.org/create-hillshade-py\n \"\"\"\n azimuth = 360.0 - azimuth\n x, y = np.gradient(array)\n slope = np.pi / 2.0 - np.arctan(np.sqrt(x * x + y * y))\n aspect = np.arctan2(-x, y)\n azimuthrad = azimuth * np.pi / 180.0\n altituderad = angle_altitude * np.pi / 180.0\n shaded = np.sin(altituderad) * np.sin(slope) + np.cos(altituderad) * np.cos(\n slope\n ) * np.cos((azimuthrad - np.pi / 2.0) - aspect)\n return 255 * (shaded + 1) / 2\n\n shadeOpts = {\n \"cmap\": \"Greys\",\n \"alpha\": 0.35,\n \"antialiased\": True,\n \"zorder\": 2,\n **shadeOpts,\n }\n\n ax.contourf(\n X,\n Y,\n hillshade(DATA, shade_azimuth, shade_angle_altitude),\n shade_ncontour,\n **shadeOpts\n )\n\n if dataloc:\n ax.plot(xyz[:, 0], xyz[:, 1], \"k.\", ms=2)\n ax.set_aspect(\"equal\", adjustable=\"box\")\n if figname:\n plt.axis(\"off\")\n fig.savefig(figname, dpi=200)\n if level:\n return cont, ax, CS\n else:\n return cont, ax", "def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()", "def plot3d(data_x, data_y, data_z, vol):\n fig = go.Figure(\n data = [\n go.Mesh3d(\n x = data_x,\n y = data_y,\n z = data_z,\n i = [7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2], # These are needed, numbers from documentation\n j = [3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],\n k = [0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6],\n colorscale=[[0, 'darkblue'],\n [0.5, 'lightskyblue'],\n [1, 'darkblue']],\n intensity = np.linspace(0, 1, 8, endpoint=True),\n showscale=False,\n opacity = 0.6\n )\n ],\n layout = go.Layout(\n title = \"Le volume est: \" + str(vol),\n autosize = True\n )\n )\n\n # This prints it\n pyo.iplot(fig, filename='Determinant-Volume')", "def plotVolumeContours(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Particle Positions Colored by Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n pos = ax.scatter(self.data[self.nonB, 0], self.data[self.nonB, 1], self.data[self.nonB, 2], s=10, c=self.volumes, cmap='plasma')\n cbar = fig.colorbar(pos, ax=ax)\n cbar.ax.tick_params(labelsize=15)", "def plot3d(self,datarange=None,nx=100,ny=100,clf=True,cb=True,data='auto',**kwargs):\n from enthought.mayavi import mlab as M\n from operator import isMappingType\n\n if data == 'auto':\n if self.data:\n data = self.data[:2]\n else:\n data = None\n\n if data: #TODO:correct coord conv\n xd,yd = data[0][0],data[0][1]\n if datarange is None:\n datarange = (np.min(xd),np.max(xd),np.min(yd),np.max(yd))\n maxmind = (np.max(data[1]),np.min(data[1]))\n elif datarange is None:\n if self.rangehint is not None:\n datarange = self.rangehint\n else:\n raise ValueError(\"Can't choose limits for plotting without data or a range hint\")\n maxmind = None\n\n grid = np.mgrid[datarange[0]:datarange[1]:1j*nx,datarange[2]:datarange[3]:1j*ny]\n res = self(grid)\n\n# if maxmind:\n# norm = plt.normalize(min(np.min(res),maxmind[1]),max(np.max(res),maxmind[0]))\n# else:\n# norm = plt.normalize(np.min(res),np.max(res))\n\n if clf:\n M.clf()\n\n M.mesh(grid[0],grid[1],res)\n\n if cb:\n if isMappingType(cb):\n M.colorbar(**cb)\n else:\n M.colorbar()\n\n if data:\n if isMappingType(data):\n kwscat = dict(data)\n else:\n kwscat = {}\n zd = data[1]\n zres = zd-self((xd,yd))\n kwscat.setdefault('scale_mode','none')\n kwscat.setdefault('scale_factor','auto')\n g = M.points3d(xd,yd,zd,zres,**kwscat)\n if kwscat['scale_factor'] == 'auto':\n g.glyph.glyph.scale_factor /= 2\n\n #M.xlim(datarange[0],datarange[1])\n #M.ylim(datarange[2],datarange[3])", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file\",help = \"netCDF4 file to visualize\")\n args = parser.parse_args()\n\n print(\"Visualizing file %s\" % args.file)\n\n # open data in read mode\n data = netCDF4.Dataset(args.file, 'r')\n # surf_2d_slice(data)\n yz_slice(data, 50)", "def plot_scalp(v, channel):\n\n channelpos = [tts.channels[c] for c in channel]\n points = [calculate_stereographic_projection(i) for i in channelpos]\n x = [i[0] for i in points]\n y = [i[1] for i in points]\n z = v\n X, Y, Z = interpolate_2d(x, y, z)\n plt.contour(X, Y, Z, 20)\n plt.contourf(X, Y, Z, 20)\n #plt.clabel(im)\n plt.colorbar()\n plt.gca().add_artist(plt.Circle((0, 0), radius=1, linewidth=3, fill=False))\n plt.plot(x, y, 'bo')\n for i in zip(channel, zip(x,y)):\n plt.annotate(i[0], i[1])", "def surf_2d_slice(data):\n X = np.array(data['x'])\n Y = np.array(data['y'])\n Z = np.array(data['z'])\n\n Xgrid, Ygrid = np.meshgrid(X,Y)\n zlevel = Z[0]\n\n Tslice = np.array(data['T'][0,:, :])\n\n fig, ax = plt.subplots()\n\n # Tslice = np.ma.masked_less_equal(Tslice, 11.001)\n\n pcolor = ax.pcolormesh(Xgrid, Ygrid, Tslice, vmin = 11, vmax = 13.5, cmap = 'Blues')\n\n ax.set_xlabel('x [km]')\n ax.set_ylabel('y [km]')\n\n ax.set_title(\"T:%s at z = %s\" % (os.path.split(data.filepath())[1], zlevel))\n\n\n fig.colorbar(pcolor)\n\n fig.savefig('MITgcmpyvis.png',dpi = 500)", "def open3dpaint(nppoints, color_map = 'jet', reduce_for_vis = False, voxel_size = 0.1, pointsize = 0.1):\n assert (type(nppoints) == pclpy.pcl.PointCloud.PointXYZRGB) or (type(nppoints) == pclpy.pcl.PointCloud.PointXYZ) or (type(nppoints) == np.ndarray) or (type(nppoints) is list) or (type(nppoints) is tuple), 'Not valid point_cloud'\n \n if (type(nppoints) is not list) & (type(nppoints) is not tuple):\n nppoints = [nppoints]\n try:\n visualizer = open3d.visualization.Visualizer()\n visualizer.create_window()\n options = visualizer.get_render_option()\n options.background_color = np.asarray([0, 0, 0])\n options.point_size = pointsize\n\n if len(nppoints) > 1:\n for n,i in enumerate(nppoints):\n workpoints = i\n if (type(workpoints) == pclpy.pcl.PointCloud.PointXYZRGB) or (type(workpoints) == pclpy.pcl.PointCloud.PointXYZ):\n workpoints = workpoints.xyz\n\n if reduce_for_vis:\n workpoints = seg_tree.voxelize(workpoints,voxel_size)\n\n points = convertcloud(workpoints)\n color_coef = n/len(nppoints)/2 + n%2*.5\n if type(color_map) == np.ndarray:\n color = color_map\n elif color_map == 'jet':\n color=cm.jet(color_coef)[:3]\n else:\n color=cm.Set1(color_coef)[:3]\n points.colors = open3d.utility.Vector3dVector(np.ones_like(workpoints)*color)\n #points.colors = open3d.utility.Vector3dVector(color)\n visualizer.add_geometry(points)\n else:\n workpoints = nppoints[0]\n if (type(workpoints) == pclpy.pcl.PointCloud.PointXYZRGB) or (type(workpoints) == pclpy.pcl.PointCloud.PointXYZ):\n workpoints = workpoints.xyz\n \n if reduce_for_vis:\n workpoints = seg_tree.voxelize(workpoints,voxel_size)\n points = convertcloud(workpoints)\n visualizer.add_geometry(points)\n visualizer.run()\n visualizer.destroy_window()\n \n except Exception as e:\n print(type(e))\n print(e.args)\n print(e)\n visualizer.destroy_window()", "def draw_pointcloud(ax, example):\n points = example['points'].cpu().detach().numpy()\n points_num = len(points)\n xs = np.empty([points_num])\n ys = np.empty([points_num])\n zs = np.empty([points_num])\n intensity = np.empty([len(points)])\n for j, point in enumerate(points):\n xs[j] = point[1]\n ys[j] = point[2]\n zs[j] = point[3]\n intensity[j] = point[4]\n\n intensity = intensity\n ax.scatter3D(xs, ys, zs, c=intensity, marker='.', s=0.3, cmap=plt.get_cmap('jet'))", "def scatter3d(self, x, y, z, filename=None, spot_cols=None, label=False, stem=False, \n label_font_size=6, rotation=134, elevation=48, interactive=False, squish_scales=False, \n spot_size=40, **kargs):\n assert filename, \"scatter(): Must provide a filename\" \n \n xdata = self.__v[x-1]\n ydata = self.__v[y-1]\n zdata = self.__v[z-1]\n \n fig = self.__draw.getfigure(**kargs)\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elevation, azim=rotation)\n \n cols = self.cols\n if spot_cols:\n cols = spot_cols \n \n ax.scatter(xdata, ydata, zdata, edgecolors=\"none\", c=cols, s=spot_size)\n if label:\n for i, lab in enumerate(self.labels):\n ax.text(xdata[i], ydata[i], zdata[i], lab, size=label_font_size, ha=\"center\", va=\"bottom\")\n \n if stem: # stem must go after scatter for sorting. Actually, not true right? matplotlib uses zorder for that...\n z_min = min(zdata)\n for x_, y_, z_ in zip(xdata, ydata, zdata): \n line = art3d.Line3D(*list(zip((x_, y_, z_min), (x_, y_, z_))), marker=None, c=\"grey\", alpha=0.1)\n ax.add_line(line)\n \n ax.set_xlabel(\"PC%s\" % (x,)) # can be overridden via do_common_args()\n ax.set_ylabel(\"PC%s\" % (y,))\n ax.set_zlabel(\"PC%s\" % (z,))\n \n if \"logx\" in kargs and kargs[\"logx\"]:\n ax.set_xscale(\"log\", basex=kargs[\"logx\"])\n if \"logy\" in kargs and kargs[\"logy\"]:\n ax.set_yscale(\"log\", basey=kargs[\"logy\"])\n \n if squish_scales: \n # Don't worry about kargs, do_common_args will overwrite.\n ax.set_xlim([min(xdata), max(xdata)])\n ax.set_ylim([min(ydata), max(ydata)])\n ax.set_zlim([min(zdata), max(zdata)])\n \n self.__draw.do_common_args(ax, **kargs)\n if \"zlims\" in kargs:\n ax.set_zlim([kargs[\"zlim\"][0], kargs[\"zlim\"][1]])\n \n if interactive:\n fig.show() # hope you are not on a cluster!\n \n real_filename = self.__draw.savefigure(fig, filename)\n \n config.log.info(\"scatter3d(): Saved 'PC%s' vs 'PC%s' vs 'PC%s' scatter to '%s'\" % (x, y, z, real_filename))", "def cube(im_in, azimuth=30., elevation=45., filename=None,\n do_axis=True, show_label=True,\n cube_label = {'x':'x', 'y':'y', 't':'t'},\n colormap='gray', roll=-180., vmin=0., vmax=1.,\n figsize=figsize, dpi=300, **kwargs):\n im = im_in.copy()\n\n N_X, N_Y, N_frame = im.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n import numpy as np\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n from vispy.util.transforms import perspective, translate, rotate\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n# frame = scene.visuals.Cube(size = (N_X/2, N_frame/2, N_Y/2), color=(0., 0., 0., 0.),\n# edge_color='k',\n# parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n# line = scene.visuals.Line(pos=np.array([[p[0]*N_Y/2, p[1]*N_X/2, p[2]*N_frame/2], [p[3]*N_Y/2, p[4]*N_X/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_frame/2, p[2]*N_Y/2],\n [p[3]*N_X/2, p[4]*N_frame/2, p[5]*N_Y/2]]), color='black', parent=view.scene)\n\n opts = {'parent':view.scene, 'cmap':'grays', 'clim':(0., 1.)}\n image_xy = scene.visuals.Image(np.rot90(im[:, :, 0], 3), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (1, 0, 0))\n tr_xy.translate((-N_X/2, -N_frame/2, -N_Y/2))\n image_xy.transform = tr_xy\n\n image_xt = scene.visuals.Image(np.fliplr(im[:, -1, :]), **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (0, 0, 1))\n tr_xt.translate((N_X/2, -N_frame/2, N_Y/2))\n image_xt.transform = tr_xt\n\n image_yt = scene.visuals.Image(np.rot90(im[-1, :, :], 1), **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((+N_X/2, -N_frame/2, N_Y/2))\n image_yt.transform = tr_yt\n\n if do_axis:\n t = {}\n for text in ['x', 'y', 't']:\n t[text] = scene.visuals.Text(cube_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['t'].pos = canvas.size[0] - canvas.size[0] // 5, canvas.size[1] - canvas.size[1] // 6\n t['y'].pos = canvas.size[0] // 12, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=35, azimuth=30)\n cam.fov = 45\n cam.scale_factor = N_X * 1.7\n if do_axis: margin = 1.3\n else: margin = 1\n cam.set_range((-N_X/2, N_X/2), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2, N_frame/2))\n view.camera = cam\n if not(filename is None):\n im = canvas.render()\n app.quit()\n import vispy.io as io\n io.write_png(filename, im)\n else:\n app.quit()\n return im", "def scatter3d(self, x, y, z, cs, labels, ptype, colorsMap='jet'):\n cm = plt.get_cmap(colorsMap)\n cNorm = matplotlib.colors.Normalize(vmin=min(cs), vmax=max(cs))\n scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)\n fig = plt.figure(figsize=(9, 7))\n\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(x, y, z, c=scalarMap.to_rgba(cs), edgecolor='none')\n scalarMap.set_array(cs)\n cb = fig.colorbar(scalarMap)\n cb.set_label(labels[3])\n ax.set_xlabel(labels[0])\n ax.set_ylabel(labels[1])\n ax.set_zlabel(labels[2])\n fig.suptitle(os.path.basename(self.conf['General']['sim_dir']))\n if self.conf['General']['save_plots']:\n name = labels[-1] + '_' + ptype + '.png'\n path = os.path.join(self.conf['General']['sim_dir'], name)\n fig.savefig(path)\n if self.conf['General']['show_plots']:\n plt.show()\n plt.close(fig)", "def visualize(z_in, azimuth=25., elevation=30.,\n thresholds=[0.95, .9, .75, .5, .25, .125], opacities=[1, .9, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75, .5, .25, .1], opacities=[.9, .8, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75], opacities=[.99, .7, .2],\n# thresholds=[0.7, .5, .2], opacities=[.95, .5, .2],\n fourier_label = {'f_x':'f_x', 'f_y':'f_y', 'f_t':'f_t'},\n filename=None, do_axis=True, do_grids=False, draw_projections=True,\n colorbar=False, f_N=2., f_tN=2., figsize=figsize, dpi=300, figpath=figpath, **kwargs):\n z = z_in.copy()\n N_X, N_Y, N_frame = z.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n\n # Normalize the amplitude.\n z /= z.max()\n\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n #from vispy.util.transforms import perspective, translate, rotate\n from vispy.color import Color\n transparent = Color(color='black', alpha=0.)\n import colorsys\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n vol_data = np.rollaxis(np.rollaxis(z, 1), 2)\n# volume = scene.visuals.Volume(vol_data, parent=view.scene)#frame)\n center = scene.transforms.STTransform(translate=( -N_X/2, -N_Y/2, -N_frame/2))\n# volume.transform = center\n# volume.cmap = 'blues'\n\n if draw_projections:\n from vispy.color import Colormap\n cm = Colormap([(1.0, 1.0, 1.0, 1.0), 'k'])\n opts = {'parent':view.scene, 'cmap':cm, 'clim':(0., 1.)}\n\n energy_xy = np.rot90(np.max(z, axis=2)[:, ::-1], 3)#[:, ::-1]\n fourier_xy = scene.visuals.Image(np.rot90(energy_xy), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (0, 0, 1))\n tr_xy.translate((N_X/2, -N_Y/2, -N_frame/2))\n fourier_xy.transform = tr_xy\n\n energy_xt = np.rot90(np.max(z, axis=1)[:, ::-1], 3)[::-1, ::-1]\n fourier_xt = scene.visuals.Image(energy_xt, **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (1, 0, 0))\n tr_xt.translate((-N_X/2, N_Y/2, -N_frame/2))\n fourier_xt.transform = tr_xt\n\n energy_yt = np.max(z, axis=0)[:, ::-1]\n fourier_yt = scene.visuals.Image(energy_yt, **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((-N_X/2, -N_Y/2, N_frame/2))\n fourier_yt.transform = tr_yt\n\n # Generate iso-surfaces at different energy levels\n surfaces = []\n for i_, (threshold, opacity) in enumerate(zip(thresholds, opacities)):\n surfaces.append(scene.visuals.Isosurface(z, level=threshold,\n# color=Color(np.array(colorsys.hsv_to_rgb(1.*i_/len(thresholds), 1., 1.)), alpha=opacity),\n color=Color(np.array(colorsys.hsv_to_rgb(.66, 1., 1.)), alpha=opacity),\n shading='smooth', parent=view.scene)\n )\n surfaces[-1].transform = center\n\n # Draw a sphere at the origin\n axis = scene.visuals.XYZAxis(parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_Y/2, p[2]*N_frame/2], [p[3]*N_X/2, p[4]*N_Y/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n\n axisX = scene.visuals.Line(pos=np.array([[0, -N_Y/2, 0], [0, N_Y/2, 0]]), color='red', parent=view.scene)\n axisY = scene.visuals.Line(pos=np.array([[-N_X/2, 0, 0], [N_X/2, 0, 0]]), color='green', parent=view.scene)\n axisZ = scene.visuals.Line(pos=np.array([[0, 0, -N_frame/2], [0, 0, N_frame/2]]), color='blue', parent=view.scene)\n\n if do_axis:\n t = {}\n for text in ['f_x', 'f_y', 'f_t']:\n t[text] = scene.visuals.Text(fourier_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['f_x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['f_y'].pos = canvas.size[0] - canvas.size[0] // 8, canvas.size[1] - canvas.size[1] // 6\n t['f_t'].pos = canvas.size[0] // 8, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=elevation, azimuth=azimuth, up='z')\n cam.fov = 48\n cam.scale_factor = N_X * 1.8\n if do_axis: margin = 1.35\n else: margin = 1\n cam.set_range((-N_X/2*margin, N_X/2/margin), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2*margin, N_frame/2/margin))\n view.camera = cam\n\n render_im = canvas.render()\n app.quit()\n if not(filename is None):\n import vispy.io as io\n io.write_png(filename, render_im)\n else:\n return render_im", "def scatter(args):\n prism.scatter.run(\n input_fp=args.input,\n output_fp=args.output,\n width=args.width,\n height=args.height,\n scale=args.scale,\n font_family=args.font_family,\n )", "def plot_pcolormesh_scalar(x, y, C, outpath, title, xlabel=None, ylabel=None, title2='', subtext='', subsubtext='',\n vmin='auto', vmax='auto', cmap=\"coolwarm\", show=False, close=True, axis_on=True, FSFS=20):\n import lepm.plotting.plotting as leplt\n return leplt.plot_pcolormesh_scalar(x, y, C, outpath, title, xlabel=None, ylabel=None, title2='', subtext='',\n subsubtext='', vmin='auto', vmax='auto', cmap=\"coolwarm\", show=False,\n close=True, axis_on=True, FSFS=20)", "def plot_slice(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,abs(self.psi)[:,:,math.floor(res/2)])\n plt.show()", "def plot3d(data):\n assert span1 == span2\n span = span1\n # ---------------------- create the figure and axes ---------------------- #\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n # -- discretize the definition space and compute the function's images --- #\n X, Y = discretise_space([defspace1, defspace2], n=span)\n Z = data\n\n # ----------------------- appearance and plotting ------------------------ #\n ax.set_zlim(np.min(Z) - 0.5, np.max(Z) + 0.5)\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n ax.set(xlabel='$W\\_C$', ylabel='$W\\_W$', zlabel=\"Utilité\")#,\n # title='Utilité à {} ticks en fonction de W_W et W_C'.format(ticks))\n\n # Plot the surface.\n surf = ax.plot_surface(X, Y, Z, alpha=0.8, #, cmap='binary'\n linewidth=0, antialiased=False, zorder=1)\n\n plt.show()" ]
[ "0.59832585", "0.5860537", "0.58340544", "0.57691926", "0.5764829", "0.5710427", "0.5683266", "0.5635767", "0.5606914", "0.55751276", "0.5560233", "0.5559747", "0.55588275", "0.55202836", "0.5513298", "0.55122715", "0.5509551", "0.5489844", "0.5477256", "0.5471932", "0.54474795", "0.544496", "0.5441413", "0.54371476", "0.5427513", "0.5410042", "0.5408558", "0.5385089", "0.53844553", "0.53567576" ]
0.69867015
0
Tests that constructed signals are actual proportions.
def test_construct_signals_proportions(self): cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'), SIGNALS) assert np.all(cbg_df['completely_home_prop'].values <= 1) assert np.all(cbg_df['full_time_work_prop'].values <= 1) assert np.all(cbg_df['part_time_work_prop'].values <= 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_proportions(self):\r\n\r\n proportions = [\r\n v['proportion'] for k, v in self.composition.items()\r\n ]\r\n\r\n if sum(proportions) < 1.0:\r\n raise ValueError('Sum of proportions between host and pathogen must be 1.0.')\r\n elif sum(proportions) > 1.0:\r\n raise ValueError('Sum of proportions between host and pathogen allocations cannot exceed 1.0')\r\n else:\r\n self.logger.info('Sum of proportions equals 1.0 - proceeding')", "def _is_proportion(control, test):\n return set(control) == set(test) == {0, 1}", "def test_uncertainties(self):\n new_wave = np.linspace(0.9, 2.1, 200)\n\n # Without uncertainties\n binned = u.spectres(new_wave, self.wave, self.flux)\n self.assertEqual(len(binned), 2)\n\n # With uncertainties\n binned = u.spectres(new_wave, self.wave, self.flux, self.flux/100.)\n self.assertEqual(len(binned), 3)", "def test_composition_adds_to_100_percent(self):", "def test_pressure_count(self):\n self.assertEqual(self.Pcount, 7)", "def test_probabilities_are_ok(self, seed):\n bins = defaultdict(int)\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", \"2\", \"3\", \"4\")\n categories = OrderedDict(zip(categories, probs))\n dim = Categorical(\"yolo\", categories)\n for _ in range(500):\n sample = dim.sample(seed=seed)[0]\n bins[sample] += 1\n for keys in bins.keys():\n bins[keys] /= float(500)\n for key, value in categories.items():\n assert abs(bins[key] - value) < 0.01", "def test_for_arbitrarily_complicated_substance():\n verify_atomic_weight_for_substance(\"Al4O2H2\", 141.94015428)", "def test_allows_signal_notify(self):\n b1 = Block()\n b2 = Block()\n self.configure_block(b1, {})\n self.configure_block(b2, {})\n\n b1.notify_signals([Signal(), Signal()])\n b2.notify_signals([Signal()])\n\n # Assert that 3 total signals were captured\n self.assert_num_signals_notified(3)\n\n # Assert that we captured the right number of signals per block too\n self.assert_num_signals_notified(2, b1)\n self.assert_num_signals_notified(1, b2)", "def test_frequency(self):\n self.assertAlmostEqual(self.tunneling.frequency.value_si, self.frequency, 4)", "def test_frequency(self):\n self.assertAlmostEqual(self.tunneling.frequency.value_si, self.frequency, 4)", "def test_mixing_ratio():\n p = 998. * units.mbar\n e = 73.75 * units.mbar\n assert_almost_equal(mixing_ratio(e, p), 0.04963, 2)", "def test_mixing_ratio_dimensions():\n p = 998. * units.mbar\n e = 73.75 * units.hPa\n assert str(mixing_ratio(e, p).units) == 'dimensionless'", "def test_hist_not_same_length_numerator_and_unc(self):\n with self.assertRaises(AssertionError):\n _, _ = hist_ratio(\n numerator=np.ones(3),\n denominator=np.ones(3),\n numerator_unc=np.ones(2),\n denominator_unc=np.ones(3),\n )", "def probability(self, samples):\n pass", "def test_hist_not_same_length_denomiantor_and_unc(self):\n with self.assertRaises(AssertionError):\n _, _ = hist_ratio(\n numerator=np.ones(3),\n denominator=np.ones(3),\n numerator_unc=np.ones(3),\n denominator_unc=np.ones(2),\n )", "def __init__(self, count):\n assert count >= 0\n self.is_proportion = count < 1.0\n self.cutoff = count", "def test_verify_npred(self):\n pwl=models.PowerLaw(index=2 * u.Unit(''),\n amplitude=2e-11 * u.Unit('cm-2 s-1 TeV-1'),\n reference=1 * u.TeV)\n\n npred_stacked=self.obs_stacker.stacked_obs.predicted_counts(model=pwl)\n\n npred1=self.obs_list[0].predicted_counts(model=pwl)\n npred2=self.obs_list[1].predicted_counts(model=pwl)\n # Set npred outside safe range to 0\n npred1.data.data[np.nonzero(self.obs_list[0].on_vector.quality)]=0\n npred2.data.data[np.nonzero(self.obs_list[1].on_vector.quality)]=0\n\n npred_summed=npred1.data.data + npred2.data.data\n\n assert_allclose(npred_stacked.data.data, npred_summed)", "def assertGaussianOversampledPsfEqual(self, lhs, rhs):\n self.assertEqual(lhs.getSigma(), rhs.getSigma())\n self.assertEqual(lhs.getOversampleFactor(), rhs.getOversampleFactor())\n self.assertEqual(lhs.getTargetSize(), rhs.getTargetSize())", "def test_constructed_is_small(self):\n self.assertTrue(all(elt<10 for elt in goodwinsheaf.checkradii()))#check all entries have small radii", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_weight(self):\n # create a coconut of each type\n self.nuts = [Coconut(variety) for variety in ['middle eastern',\n 'south asian',\n 'american']]\n \n # check that weights are as expected\n self.weights = [2.5, 3.0, 3.5]\n for i in range(0,3):\n self.assertEqual(self.nuts[i]._Coconut__weight,\n self.weights[i],\n \"The weight is wrong\")", "def test_results_length(spheroid_convex_fixture):\n assert(len(spheroid_convex_fixture.pairs) == 1000)\n assert(len(spheroid_convex_fixture.combinations) == 1000)\n assert(len(spheroid_convex_fixture.deltas) == 1000)", "def test_hist_not_same_length_numerator_denominator(self):\n with self.assertRaises(AssertionError):\n _, _ = hist_ratio(\n numerator=np.ones(2),\n denominator=np.ones(3),\n numerator_unc=np.ones(3),\n denominator_unc=np.ones(3),\n )", "def test_hist_ratio(self):\n step, step_unc = hist_ratio(\n numerator=self.numerator,\n denominator=self.denominator,\n numerator_unc=self.numerator_unc,\n denominator_unc=self.denominator_unc,\n )\n\n np.testing.assert_almost_equal(step, self.step)\n np.testing.assert_almost_equal(step_unc, self.step_unc)", "def GetProportion(self):\r\n\r\n return self.proportion", "def test_container_weight(self):\r\n weight = self.combinedoe_container.weight\r\n self.assertEqual(weight, 1)", "def test_signals(self):\n G = graphs.Sensor()\n G.plot()\n def test_color(param, length):\n for value in ['r', 4*(.5,), length*(2,), np.ones([1, length]),\n np.random.RandomState(42).uniform(size=length),\n np.ones([length, 3]), [\"red\"] * length,\n np.random.RandomState(42).rand(length, 4)]:\n params = {param: value}\n G.plot(**params)\n for value in [10, (0.5, 0.5), np.ones([length, 2]),\n np.ones([2, length, 3]),\n np.ones([length, 3]) * 1.1]:\n params = {param: value}\n self.assertRaises(ValueError, G.plot, **params)\n for value in ['r', 4*(.5)]:\n params = {param: value, 'backend': 'pyqtgraph'}\n self.assertRaises(ValueError, G.plot, **params)\n test_color('vertex_color', G.n_vertices)\n test_color('edge_color', G.n_edges)\n def test_size(param, length):\n for value in [15, length*(2,), np.ones([1, length]),\n np.random.RandomState(42).uniform(size=length)]:\n params = {param: value}\n G.plot(**params)\n for value in [(2, 3, 4, 5), np.ones([2, length]),\n np.ones([2, length, 3])]:\n params = {param: value}\n self.assertRaises(ValueError, G.plot, **params)\n test_size('vertex_size', G.n_vertices)\n test_size('edge_width', G.n_edges)", "def test_volume(self):\n\n self.test_shape.workplane = \"XY\"\n self.test_shape.rotation_axis = \"Z\"\n\n assert self.test_shape.volume() == pytest.approx(math.pi * (10**2) * 100 * 8)", "def test_sample_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')" ]
[ "0.66484034", "0.6311004", "0.61575395", "0.6049411", "0.5846437", "0.57422686", "0.5709035", "0.5704337", "0.5583857", "0.5583857", "0.5528866", "0.5524776", "0.55001354", "0.54795945", "0.5478572", "0.54679793", "0.54651594", "0.544234", "0.543187", "0.5401828", "0.5401828", "0.5399854", "0.5398259", "0.53966784", "0.5388757", "0.53760356", "0.53725624", "0.53487164", "0.5344276", "0.53412247" ]
0.7607326
0
Tests that aggregation at the county level creates nonzerovalued signals.
def test_aggregate_county(self): cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'), SIGNALS) df = aggregate(cbg_df, SIGNALS, 'county') assert np.all(df[f'{SIGNALS[0]}_n'].values > 0) x = df[f'{SIGNALS[0]}_se'].values assert np.all(x[~np.isnan(x)] >= 0) assert df.shape == (1472, 17)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aggregate_nation(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'nation')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (1, 17)", "def test_aggregate_state(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'state')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (54, 17)", "def test_case_zero():\n empty_c = Clumper([])\n assert empty_c.mean(\"i\") is None\n assert empty_c.max(\"i\") is None\n assert empty_c.min(\"i\") is None\n assert empty_c.sum(\"i\") is None\n assert empty_c.unique(\"i\") == []\n assert empty_c.n_unique(\"i\") == 0", "def test_main(self):\n agg_list = generate_aggregation_list(self.config, self.files)\n evaluate_aggregation_list(self.config, agg_list, self.file)\n\n with nc.Dataset(self.file) as nc_in:\n status = nc_in.variables[\"status\"]\n # there should be no fill values...\n # before ncagg v0.8.5 vlen types like string incorrectly aggregated to all fill values.\n self.assertFalse(any(status[:] == status._FillValue))", "def test_aggregate_msa(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'msa')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (372, 17)", "def test_county(self):\n counties = self.geographies.find({ 'geoid': '15009' })\n\n self.assertEqual(counties.count(), 1)\n\n county = counties[0]\n\n self.assertEqual(county['sumlev'], config.SUMLEV_COUNTY)\n self.assertEqual(county['metadata']['NAME'], 'Maui County')\n self.assertEqual(county['metadata']['STATE'], '15')\n self.assertEqual(county['metadata']['COUNTY'], '009')\n\n pop_2000 = 128094 \n pop_2010 = 154834\n self._test_totalpop(county, pop_2000, pop_2010)", "def observed_species(counts):\n return (counts!=0).sum()", "def test_empty_input(self):\n discs = calc_disc_c(np.ones(0), np.ones(0), np.ones(0), 0.3)\n np.testing.assert_almost_equal(discs, np.array([]))", "def test_no_source_measurements(self):\n measurement = self.measurement(self.metric())\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def test_none(self):\n mkv = MKV(test_paths['subs']['zero'], 0)\n mkv._analyze()\n assert mkv.subs.stream_count == 0\n assert mkv.subs.copy_count == 0\n assert mkv.subs.copy_indices == []\n assert mkv.subs.copy_streams == []", "def test_empty_input(self):\n discs = calc_disc_b(np.ones(0), np.ones(0), np.ones(0), 0.3)\n np.testing.assert_almost_equal(discs, np.array([]))", "def test_uncertainties(self):\n new_wave = np.linspace(0.9, 2.1, 200)\n\n # Without uncertainties\n binned = u.spectres(new_wave, self.wave, self.flux)\n self.assertEqual(len(binned), 2)\n\n # With uncertainties\n binned = u.spectres(new_wave, self.wave, self.flux, self.flux/100.)\n self.assertEqual(len(binned), 3)", "def test_result_zero(self, init_wealth, n_bars):\n series_wealth = init_wealth + np.zeros(n_bars, dtype=float)\n result = self.MetricClass()._result_from_wealth(series_wealth)\n expected = init_wealth\n assert result == expected", "def test_empty_input(self):\n discs = calc_disc(np.column_stack((np.ones(0), np.ones(0), np.ones(0))))\n np.testing.assert_almost_equal(discs, np.array([]))", "def test_ones(self):\n discs = calc_disc_c(np.ones(10), np.ones(10), np.ones(10), 0)\n np.testing.assert_almost_equal(discs, np.zeros(10))", "def test_county_subdivision(self):\n counties = self.geographies.find({ 'geoid': '1500190630' })\n\n self.assertEqual(counties.count(), 1)\n\n county = counties[0]\n\n self.assertEqual(county['sumlev'], config.SUMLEV_COUNTY_SUBDIVISION)\n self.assertEqual(county['metadata']['NAME'], 'Hilo CCD')\n self.assertEqual(county['metadata']['STATE'], '15')\n self.assertEqual(county['metadata']['COUNTY'], '001')\n\n pop_2000 = 42425 \n pop_2010 = 45714 \n self._test_totalpop(county, pop_2000, pop_2010)", "def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)", "def get_zeros(self):\n return self.serie.isin([0]).sum()", "def test_single(self):\n df = self.df.head(1).copy()\n for renorm in [True, False]:\n with self.subTest(renorm=renorm):\n out = standardise_aggregate(df, renorm=renorm)\n outvals = out.values[~np.isnan(out.values)]\n dfvals = df.values[~np.isnan(df.values)]\n self.assertTrue(np.allclose(outvals, dfvals))", "def test_result_zero(self, rate, n, init_wealth, n_bars):\n series_wealth = init_wealth + np.zeros(n_bars)\n result = self.MetricClass(rate=rate, n=n)._result_from_wealth(series_wealth)\n expected = 0\n assert np.allclose(result, expected)", "def test_ones(self):\n discs = calc_disc_b(np.ones(10), np.ones(10), np.ones(10), 0)\n np.testing.assert_almost_equal(discs, np.zeros(10))", "def test_zero(self, test_type='t-test'):\n return zero_tests(self.evaluations, test_type, self.model_var, self.dof)", "def is_zero(self):\n return float(self.coeff.nominator) / self.coeff.denominator == 0.0", "def test_hist_w_unc_zero_case(self):\n bins, hist, unc, band = hist_w_unc(\n arr=[],\n bins=[],\n )\n\n np.testing.assert_almost_equal(bins, [])\n np.testing.assert_almost_equal(hist, [])\n np.testing.assert_almost_equal(unc, [])\n np.testing.assert_almost_equal(band, [])", "def purity_test(self):\n mean = filter_data(self.data,self.ancestors)['Class'].mean()\n if mean == 0:\n return 0\n elif mean == 1:\n return 1\n return None", "def test_none_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=0)", "def test_aggregate_hhs(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'hhs')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (10, 17)", "def test_count_no_spins(self):\n\n # Reset relax.\n reset()\n\n # Add a data pipe to the data store.\n ds.add(pipe_name='orig', pipe_type='mf')\n\n # Test the number of spins counted.\n self.assertEqual(mol_res_spin.count_spins(), 0)", "def __nonzero__(self):\n for e in self:\n if e != 0:\n return True\n return False", "def test_aggregate_hrr(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'hrr')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (306, 17)" ]
[ "0.68716997", "0.6666926", "0.61090475", "0.57431424", "0.5742255", "0.5667957", "0.5607133", "0.5604635", "0.55638605", "0.55366623", "0.5536144", "0.55044466", "0.54993826", "0.5419435", "0.5376306", "0.53653294", "0.53649396", "0.5341991", "0.5326681", "0.53014153", "0.52980876", "0.528812", "0.52680254", "0.5233191", "0.5231674", "0.5216971", "0.52166337", "0.520204", "0.51703066", "0.51609075" ]
0.7612559
0
Tests that aggregation at the state level creates nonzerovalued signals.
def test_aggregate_state(self): cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'), SIGNALS) df = aggregate(cbg_df, SIGNALS, 'state') assert np.all(df[f'{SIGNALS[0]}_n'].values > 0) x = df[f'{SIGNALS[0]}_se'].values assert np.all(x[~np.isnan(x)] >= 0) assert df.shape == (54, 17)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aggregate_nation(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'nation')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (1, 17)", "def test_none(self):\n mkv = MKV(test_paths['subs']['zero'], 0)\n mkv._analyze()\n assert mkv.subs.stream_count == 0\n assert mkv.subs.copy_count == 0\n assert mkv.subs.copy_indices == []\n assert mkv.subs.copy_streams == []", "def test_aggregate_msa(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'msa')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (372, 17)", "def test_aggregate_county(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'county')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (1472, 17)", "def test_case_zero():\n empty_c = Clumper([])\n assert empty_c.mean(\"i\") is None\n assert empty_c.max(\"i\") is None\n assert empty_c.min(\"i\") is None\n assert empty_c.sum(\"i\") is None\n assert empty_c.unique(\"i\") == []\n assert empty_c.n_unique(\"i\") == 0", "def test_quality_lt_zero(self):\n with pytest.raises(StateError):\n State(substance=\"water\", x=Q_(-1.0, \"dimensionless\"), p=Q_(101325, \"Pa\"))", "def __nonzero__(self):\n return self.value.__nonzero__()", "def __nonzero__(self):\n for e in self:\n if e != 0:\n return True\n return False", "def get_noised_result(self, sample_state, global_state):\n new_cumulative_sum = tf.nest.map_structure(\n tf.add, global_state.samples_cumulative_sum, sample_state)\n cumulative_sum_noise, new_tree_state = self._tree_aggregator.get_cumsum_and_update(\n global_state.tree_state)\n noised_cumulative_sum = tf.nest.map_structure(tf.add, new_cumulative_sum,\n cumulative_sum_noise)\n new_global_state = attr.evolve(\n global_state,\n samples_cumulative_sum=new_cumulative_sum,\n tree_state=new_tree_state)\n event = dp_event.UnsupportedDpEvent()\n return noised_cumulative_sum, new_global_state, event", "def test_nothing_checked(self, Signal):\n blk = ElapsedTime()\n config = {\n 'units': {\n 'days': False,\n 'hours': False,\n 'minutes': False,\n 'seconds': False,\n },\n 'timestamp_a': '1984-05-03T00:00:00.999Z',\n 'timestamp_b': '1984-05-03T00:00:01.001Z',\n }\n self.configure_block(blk, config)\n\n # process a list of signals\n blk.start()\n blk.process_signals([\n Signal({\n 'pi': 3.142,\n }),\n ])\n blk.stop()\n\n self.assert_last_signal_list_notified([\n Signal({\n 'pi': 3.142,\n }),\n ])", "def test_op_zero_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n offl_a = stream.bind(a)\n offl_a.zero()\n offl_a.update_host()\n stream.sync()\n self.assertEqual(sum(a), 0,\n \"Array should be all zeros.\")", "def evaluate(self) :\n for inp in self.inStates :\n if inp.getState() == 0 : return 0\n return 1", "def test_empty_input(self):\n discs = calc_disc_b(np.ones(0), np.ones(0), np.ones(0), 0.3)\n np.testing.assert_almost_equal(discs, np.array([]))", "def is_zero(self):\n for action, prob in self._regrets.items():\n if prob != 0.0:\n return False\n return True", "def isZero(self):\n return self.count == 0", "def test_empty_input(self):\n discs = calc_disc(np.column_stack((np.ones(0), np.ones(0), np.ones(0))))\n np.testing.assert_almost_equal(discs, np.array([]))", "def isZero(self):\n\t\treturn (self.p.isZero() & (self.q.isZero() == False))", "def test_aggregate_hhs(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'hhs')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (10, 17)", "def is_empty(self):\n return ch.prod(ch.tensor(self.x.shape)).item() == 0", "def is_zero(self, a):\n return not a", "def is_zero(self):\n # any nonzero entry in any matrix representation\n # disqualifies the morphism as having totally zero outputs\n return self._matrix.is_zero()", "def get_noised_result(self, sample_state, global_state):\n tree_noise, new_tree_state = self._tree_aggregator.get_cumsum_and_update(\n global_state.tree_state)\n noised_sample = tf.nest.map_structure(lambda a, b, c: a + b - c,\n sample_state, tree_noise,\n global_state.previous_tree_noise)\n new_global_state = attr.evolve(\n global_state, previous_tree_noise=tree_noise, tree_state=new_tree_state)\n event = dp_event.UnsupportedDpEvent()\n return noised_sample, new_global_state, event", "def test_s_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_1q_clifford.s_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_1q_clifford.s_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_ones(self):\n discs = calc_disc_b(np.ones(10), np.ones(10), np.ones(10), 0)\n np.testing.assert_almost_equal(discs, np.zeros(10))", "def __nonzero__(self):\n return self.__nonzero", "def __nonzero__(self):\n return self.__nonzero", "def is_zero(self):\n return self._express.is_zero()", "def test_amplitude_damping_error_full_0state_noncanonical(self):\n error = amplitude_damping_error(1, excited_state_population=0,\n canonical_kraus=False)\n targets = [np.diag([1, 0]), np.array([[0, 1], [0, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")", "def test_aggregate_hrr(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'hrr')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (306, 17)", "def test_count_no_spins(self):\n\n # Reset relax.\n reset()\n\n # Add a data pipe to the data store.\n ds.add(pipe_name='orig', pipe_type='mf')\n\n # Test the number of spins counted.\n self.assertEqual(mol_res_spin.count_spins(), 0)" ]
[ "0.61210126", "0.5934089", "0.589467", "0.5892433", "0.5861224", "0.5711701", "0.5653383", "0.5630056", "0.5579708", "0.5577906", "0.5550419", "0.5545783", "0.55085456", "0.5499616", "0.5489012", "0.5481875", "0.5469063", "0.5461217", "0.54575956", "0.5446156", "0.5441203", "0.54358083", "0.5427589", "0.54234934", "0.54045355", "0.54045355", "0.5391061", "0.5376186", "0.5373958", "0.5363377" ]
0.748049
0
Tests that aggregation at the state level creates nonzerovalued signals.
def test_aggregate_nation(self): cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'), SIGNALS) df = aggregate(cbg_df, SIGNALS, 'nation') assert np.all(df[f'{SIGNALS[0]}_n'].values > 0) x = df[f'{SIGNALS[0]}_se'].values assert np.all(x[~np.isnan(x)] >= 0) assert df.shape == (1, 17)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aggregate_state(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'state')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (54, 17)", "def test_none(self):\n mkv = MKV(test_paths['subs']['zero'], 0)\n mkv._analyze()\n assert mkv.subs.stream_count == 0\n assert mkv.subs.copy_count == 0\n assert mkv.subs.copy_indices == []\n assert mkv.subs.copy_streams == []", "def test_aggregate_msa(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'msa')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (372, 17)", "def test_aggregate_county(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'county')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (1472, 17)", "def test_case_zero():\n empty_c = Clumper([])\n assert empty_c.mean(\"i\") is None\n assert empty_c.max(\"i\") is None\n assert empty_c.min(\"i\") is None\n assert empty_c.sum(\"i\") is None\n assert empty_c.unique(\"i\") == []\n assert empty_c.n_unique(\"i\") == 0", "def test_quality_lt_zero(self):\n with pytest.raises(StateError):\n State(substance=\"water\", x=Q_(-1.0, \"dimensionless\"), p=Q_(101325, \"Pa\"))", "def __nonzero__(self):\n return self.value.__nonzero__()", "def __nonzero__(self):\n for e in self:\n if e != 0:\n return True\n return False", "def get_noised_result(self, sample_state, global_state):\n new_cumulative_sum = tf.nest.map_structure(\n tf.add, global_state.samples_cumulative_sum, sample_state)\n cumulative_sum_noise, new_tree_state = self._tree_aggregator.get_cumsum_and_update(\n global_state.tree_state)\n noised_cumulative_sum = tf.nest.map_structure(tf.add, new_cumulative_sum,\n cumulative_sum_noise)\n new_global_state = attr.evolve(\n global_state,\n samples_cumulative_sum=new_cumulative_sum,\n tree_state=new_tree_state)\n event = dp_event.UnsupportedDpEvent()\n return noised_cumulative_sum, new_global_state, event", "def test_nothing_checked(self, Signal):\n blk = ElapsedTime()\n config = {\n 'units': {\n 'days': False,\n 'hours': False,\n 'minutes': False,\n 'seconds': False,\n },\n 'timestamp_a': '1984-05-03T00:00:00.999Z',\n 'timestamp_b': '1984-05-03T00:00:01.001Z',\n }\n self.configure_block(blk, config)\n\n # process a list of signals\n blk.start()\n blk.process_signals([\n Signal({\n 'pi': 3.142,\n }),\n ])\n blk.stop()\n\n self.assert_last_signal_list_notified([\n Signal({\n 'pi': 3.142,\n }),\n ])", "def test_op_zero_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n offl_a = stream.bind(a)\n offl_a.zero()\n offl_a.update_host()\n stream.sync()\n self.assertEqual(sum(a), 0,\n \"Array should be all zeros.\")", "def evaluate(self) :\n for inp in self.inStates :\n if inp.getState() == 0 : return 0\n return 1", "def test_empty_input(self):\n discs = calc_disc_b(np.ones(0), np.ones(0), np.ones(0), 0.3)\n np.testing.assert_almost_equal(discs, np.array([]))", "def is_zero(self):\n for action, prob in self._regrets.items():\n if prob != 0.0:\n return False\n return True", "def isZero(self):\n return self.count == 0", "def test_empty_input(self):\n discs = calc_disc(np.column_stack((np.ones(0), np.ones(0), np.ones(0))))\n np.testing.assert_almost_equal(discs, np.array([]))", "def isZero(self):\n\t\treturn (self.p.isZero() & (self.q.isZero() == False))", "def test_aggregate_hhs(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'hhs')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (10, 17)", "def is_empty(self):\n return ch.prod(ch.tensor(self.x.shape)).item() == 0", "def is_zero(self, a):\n return not a", "def is_zero(self):\n # any nonzero entry in any matrix representation\n # disqualifies the morphism as having totally zero outputs\n return self._matrix.is_zero()", "def get_noised_result(self, sample_state, global_state):\n tree_noise, new_tree_state = self._tree_aggregator.get_cumsum_and_update(\n global_state.tree_state)\n noised_sample = tf.nest.map_structure(lambda a, b, c: a + b - c,\n sample_state, tree_noise,\n global_state.previous_tree_noise)\n new_global_state = attr.evolve(\n global_state, previous_tree_noise=tree_noise, tree_state=new_tree_state)\n event = dp_event.UnsupportedDpEvent()\n return noised_sample, new_global_state, event", "def test_s_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_1q_clifford.s_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_1q_clifford.s_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_ones(self):\n discs = calc_disc_b(np.ones(10), np.ones(10), np.ones(10), 0)\n np.testing.assert_almost_equal(discs, np.zeros(10))", "def __nonzero__(self):\n return self.__nonzero", "def __nonzero__(self):\n return self.__nonzero", "def is_zero(self):\n return self._express.is_zero()", "def test_aggregate_hrr(self):\n cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),\n SIGNALS)\n df = aggregate(cbg_df, SIGNALS, 'hrr')\n\n assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)\n x = df[f'{SIGNALS[0]}_se'].values\n assert np.all(x[~np.isnan(x)] >= 0)\n assert df.shape == (306, 17)", "def test_amplitude_damping_error_full_0state_noncanonical(self):\n error = amplitude_damping_error(1, excited_state_population=0,\n canonical_kraus=False)\n targets = [np.diag([1, 0]), np.array([[0, 1], [0, 0]])]\n circ, p = error.error_term(0)\n self.assertEqual(p, 1, msg=\"Kraus probability\")\n self.assertEqual(circ[0][\"qubits\"], [0])\n for op in circ[0]['params']:\n self.remove_if_found(op, targets)\n self.assertEqual(targets, [], msg=\"Incorrect kraus matrices\")", "def test_count_no_spins(self):\n\n # Reset relax.\n reset()\n\n # Add a data pipe to the data store.\n ds.add(pipe_name='orig', pipe_type='mf')\n\n # Test the number of spins counted.\n self.assertEqual(mol_res_spin.count_spins(), 0)" ]
[ "0.74812746", "0.59329057", "0.5895889", "0.58929867", "0.58616966", "0.5710675", "0.565309", "0.5629537", "0.557881", "0.5575357", "0.5549657", "0.55448717", "0.55095005", "0.54987305", "0.54884887", "0.54829407", "0.5467536", "0.54628205", "0.5458857", "0.5445779", "0.5440979", "0.5434696", "0.5427114", "0.5424107", "0.54035383", "0.54035383", "0.5389984", "0.5375636", "0.5375472", "0.5363581" ]
0.6122557
1
Method for reading a nordic file and parsing it to a string array while also checking the integrity of the file(Will give errors when lines are too long). It also wil parse empty space on the file if it is too short.
def readNordicFile(f): nordics = [] emsg = "Nordic Read: The following line is too short: {0}\n{1}" i = 0 nordics.append([]) for line in f: if line.strip() == "" or line is None: if len(nordics[i]) == 0: continue i += 1; nordics.append([]) elif(len(line) < 81): raise Exception("Line not long enough (len:{0}):\n{1}".format(len(line), line)) elif (line[79] == "7"): continue else: nordics[i].append(line) if not nordics[-1]: return nordics[:-1] else: return nordics
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readP(path, encoding='iso-8859-1', n=0):\n with open(path, encoding=encoding) as f:\n raw = [x.strip() for x in f if x]\n if n:\n raw = [x for x in raw if len(x) <= n]\n return raw", "def read(read_file) -> list:\n result = []\n try:\n with open(read_file) as file:\n for lines in file:\n line = decode(lines.strip(\"\"))\n result.append(extract_information(line))\n global header\n header = result[0:2]\n result = result[3:]\n for word in result:\n if \"None\" in word[0:3]:\n raise InvalidPrincessException(\"Invalid princess!\")\n continue\n return result\n except FileNotFoundError:\n raise Exception(\"File not found!\")", "def parse_file(input_file):\n \n all_lines = input_file.split('\\n')\n all_info_list = []\n for line in all_lines:\n line = line.split('\\t')\n info_per_row_list = []\n for value in line:\n my_string = \"\"\n value = value.strip('\\'\"')\n if len(value) == 0:\n value = \"NA\"\n my_string += value\n info_per_row_list += [my_string]\n all_info_list += [info_per_row_list]\n return all_info_list", "def readFile(filename): \n file = open(filename,\"r\")\n text = file.read() \n file.close()\n \n voteList = []\n text=text.split(\"\\n\")\n \n for i in range(len(text)-1):\n text[i]=text[i].strip()\n voteList.append((text[i]).split(\" \"))\n \n return voteList", "def _get_file_as_array(self, file_):\n file_as_string = \"\"\n for line in file_:\n if \";\" in line:\n line = line[:line.find(\";\")]\n line = (line.replace('\\t', '').replace('\\n', ' ')\n .replace('(', ' ( ').replace(')', ' ) '))\n file_as_string += line\n file_.close()\n return file_as_string.strip().split()", "def read_file(self):\n\n\t\twith open(self.filename, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tif len(line)>1:\n\t\t\t\t\tlenght_value,array_values = line.split(';')\n\t\t\t\t\tlist_values = [int(x) for x in array_values.split(',')]\n\t\t\t\t\tprint self.get_arraysurdit(list_values)", "def readAllfromFile(self):\n with open(self._fname, 'r') as f:\n lines = f.readlines()\n readList = []\n for line in lines:\n line = line.strip()\n if len(line) > 1:\n gra = self._readGrafromLine(line)\n readList.append(gra)\n f.close()\n return readList", "def read_file(file_name):\n\twith open(file_name, 'r') as file:\n\t\tdata = file.read().split('\\n')\n\t\n\treturn list(map(lambda x: x.split('\\t'), data))", "def read_file(filename):\r\n with open(filename, \"r\") as f:\r\n data = f.readlines()\r\n res = []\r\n for line in data:\r\n line = line[:-1]\r\n res.append(list(line))\r\n return np.array(res)", "def _read(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n lines = []\n for line in f:\n lines.append(line.strip())\n return lines", "def readInput(fileName):\n with open(fileName, 'r') as file:\n\n fileContent = file.read()\n\n return fileContent.split(\"\\n\")", "def readFile (filename):\n # some OSes need to know that the file might have some special characters\n f = open(filename)\n # convert reader to a list so we can close the file\n result = [ line.strip().split('\\t') for line in f if len(line) > 1 ]\n # close the file so we do not take up extra system resources\n f.close()\n # throw away the header row(s) of the data\n return result[1:]", "def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array", "def Read_RMCA_basic(Complete_Path):\n fid = open(Complete_Path,'r')\n S = []\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n S.append(float(line))\n #R.append(float(line[27:-2]))\n return np.array(S)", "def read_file(file_path):\n with open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n data = f.read()\n return data.split(\"\\n\")", "def parse_input(filename):\n with open(filename, 'r') as input_file:\n return [i.rstrip() for i in input_file]", "def lineReadfile(filename):\n#\t\"input:filename output=readlines() \"\n\tf = open(filename)\n\tlist1 =[]\n\twhile 1:\n\t\ts = f.readline()\n\t\tif s==\"\":\n\t\t\tbreak\n\t\ts=string.replace(s,\"\\n\",\"\")\n\t\tif s==\"\":\n\t\t\tcontinue\n\t\tlist1.append(s)\n\tf.close()\n\treturn list1", "def read_file(filename):\n with open(filename, encoding='utf-8') as src:\n return [line.strip() for line in src.readlines()]", "def read_file(file_to_read = 'text_albums_data.txt'):\n with open(file_to_read) as f:\n music_data = f.readlines()\n for index, item in enumerate(music_data):\n music_data[index] = item.strip()\n for index, item in enumerate(music_data):\n music_data[index] = item.split('\\n')\n return music_data", "def Read_t_file(file_name):\n t=[]\n \n with open(file_name,'r') as reader:\n temp=reader.readline().strip().split()[-1].split('-')\n t.append(temp[0])\n t.append(temp[1])\n for line in reader.readlines():\n t.append(line.strip().split()[-1].split('-')[-1])\n \n return np.array(t,dtype=np.float32)", "def read_file(infile_name):\n chr_list = [0]*13 \n for i in range(len(chr_list)):\n chr_list[i] = [] \n infile = open(infile_name)\n for line in infile:\n if line.startswith('SL2.40'):\n chr = int(line.strip().split()[0][-2:])\n loci = int(line.strip().split()[1])\n chr_list[chr] += [loci]\n else:\n pass\n infile.close()\n return chr_list", "def readInput(fileName):\r\n with open(fileName, 'r') as file:\r\n\r\n fileContent = []\r\n for line in file:\r\n fileContent.append(line.strip())\r\n\r\n return fileContent", "def get_str_arrays(self):\n return self._fin.readline().strip('\\n').strip(' ').split(' ')", "def readfile(filename):\n try:\n with open(filename, \"r\") as file:\n text = file.readlines()\n for i in range(len(text)):\n text[i] = text[i].rstrip()\n return text\n except:\n print(\"Error readfile()\")", "def read_file_as_list(filename):\n with FileUtils.open_file_by_type(filename) as f:\n return [l for l in (line.strip() for line in f) if l]", "def parse_data(fn):\n data = []\n with open(fn, \"rb\") as f:\n for line in f:\n if py_ver == 3:\n # Python 3 code in this block\n dline = \"\".join(filter(lambda char: char != '\"', line.decode())).split(\",\")\n else:\n # Python 2 code in this block\n dline = line.translate(None, '\"').split(\",\")\n \n if len(dline) == 11 and dline[0].isdigit():\n data.append([float(i) for i in dline])\n\n return np.array(data)", "def read_name_file(filename):\n with open(filename, 'r') as f:\n names = f.read()\n names = names.split('\\n')\n names = list(filter(None, names))\n return names", "def read_file(filename):\n field = []\n with open(filename, encoding='utf-8') as f:\n f.readline()\n for line in f:\n field.append(line[3:].split())\n return field", "def parse(filename):\n with open(filename) as file:\n lines = [line.strip() for line in file]\n return lines", "def reader(filename,only_length=False):\n print(\"Counting lines in file %s\"%filename)\n total_lines=0\n for n,line in enumerate(open(filename,\"r\")):\n total_lines+=1\n \n if only_length:\n return total_lines\n \n X,Y,Z,W,J=[np.zeros(total_lines) for _ in range(5)]\n \n for n, line in enumerate(open(filename, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d from file %s\" %(n,total_lines,filename))\n split_line=np.array(line.split(\" \"), dtype=float) \n X[n]=split_line[0];\n Y[n]=split_line[1];\n Z[n]=split_line[2];\n W[n]=split_line[3];\n J[n]=int(split_line[4]);\n return X,Y,Z,W,J" ]
[ "0.60678965", "0.6035607", "0.6027674", "0.6023274", "0.598013", "0.5867823", "0.586583", "0.58537155", "0.58469784", "0.5815171", "0.5806846", "0.58053046", "0.5800402", "0.5777913", "0.5765686", "0.57436544", "0.57398486", "0.57300663", "0.57231325", "0.57182133", "0.5685933", "0.56830007", "0.5680472", "0.56749994", "0.5648477", "0.56483", "0.5646094", "0.5644518", "0.56406254", "0.56382954" ]
0.760024
0
Returns the text in the example's document in the given span.
def get_text_span(example, span): byte_positions = [] # `text` is a byte string since `document_plaintext` is also a byte string. start = span["plaintext_start_byte"] end = span["plaintext_end_byte"] text = byte_slice(example["document_plaintext"], start, end) for i in range(start, end): byte_positions.append(i) return TextSpan(byte_positions, text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_span_text(self, span: Span) -> str:\n return self._text[span.begin: span.end]", "def read_text_from_span_id(html, span_id):\n return html.find('span', {'id': span_id}).text", "def get_span_text(element, name):\n try:\n return (\n element.find_element_by_css_selector(name)\n .find_elements_by_tag_name(\"span\")[1]\n .text.replace(\"–\", \"-\")\n )\n except NoSuchElementException:\n return \"\"", "def get_single_answer_text(\n self, qid: QuestionId, span_start: int, span_end: int\n ) -> str:\n context = self.quids_to_context_qas[qid]\n try:\n assert span_start <= span_end\n start_idx = context.tokens[span_start].span[0]\n end_idx = context.tokens[min(span_end, len(context.tokens))].span[1]\n return context.original_text[\n start_idx : min(end_idx, len(context.original_text))\n ]\n except Exception as ex:\n print(\n f\"Error while reconstructing answer. num tokens: {len(context.tokens)}, first token: {span_start}, last token: {span_end}\"\n )\n print(\n f\"text len: {len(context.text)}, first char: {context.tokens[span_start].span[0]}, last char: {context.tokens[span_end].span[1]}\"\n )\n return \"\"", "def get_text(doc_element: dict, document: dict):\n response = \"\"\n # If a text segment spans several lines, it will\n # be stored in different text segments.\n for segment in doc_element.text_anchor.text_segments:\n start_index = (\n int(segment.start_index)\n if segment in doc_element.text_anchor.text_segments\n else 0\n )\n end_index = int(segment.end_index)\n response += document.text[start_index:end_index]\n return response", "def _sentence(self, node, offset_mngr):\n text = self._text(node)\n offset = offset_mngr.update(node, text)\n return text, offset", "def _extract_span(item):\n return getattr(item, \"_datadog_span\", None)", "def get_text(self):\n text_element = self.page.find(id=self.text_location)\n return text_element.get_text()", "def get_current_span():\n return cv_span_context.get(), cv_span_parent.get()", "def get_current_span(cls) -> Span:\n return execution_context.get_current_span()", "def get_text(self, node, padded=True):\n # type: (AstNode, bool) -> str\n start, end = self.get_text_range(node, padded)\n return self._text[start: end]", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def get_text(self):", "def display_toxics(span, text):\n html = \"<p class='spans'>\"\n for ind, char in enumerate(text):\n if ind in span:\n html += \"<b style='color:red'>\" + char + '</b>'\n else:\n html += char\n html += '</p>'\n return html", "def obtain_text():\n pass", "def get_text(downgrade_titles=False):", "def get_span(self, i, j):\n assert j > i\n return self.span_v[i][j - i - 1]", "def _get_text(self, range: Range) -> str:\n start_offset = self._position_to_offset(range.start)\n end_offset = self._position_to_offset(range.end)\n return self._text[start_offset:end_offset]", "def _get_doc_text(doc_id):\n querystring = 'select currtext from {} where id = %s;'.format(TABLES[4])\n result = execute_query(querystring, (doc_id,))\n if result:\n result = result[0]\n return None, None, None # waarom zo?", "def get_doc_text(self, doc_id):\n return self._get_doc_key(doc_id, 'text')", "def get_optional_text(element, name, is_span=True):\n text = \"\"\n try:\n if is_span:\n text = get_span_text(element, name)\n else:\n text = element.find_element_by_css_selector(name).text.replace(\"–\", \"-\")\n except NoSuchElementException:\n pass\n\n return text", "def span_instance(self) -> Span:\n return self._span_instance", "def get_text(self):\n return self.text", "def read_value_from_span_id(html, span_id):\n html_span = read_text_from_span_id(html, span_id)\n return float(html_span)", "def _text_of(self, elem):\n if isinstance(elem, Tag):\n text = [ ]\n for sub_elem in elem:\n text.append(self._text_of(sub_elem))\n\n return \" \".join(text)\n else:\n return elem.string", "def get_doc_text(self, doc_id):\n cursor = self.connection.cursor()\n cursor.execute(\n \"SELECT text FROM documents WHERE id = ?\",\n (doc_id,)\n )\n result = cursor.fetchone()\n cursor.close()\n return result if result is None else result[0]" ]
[ "0.82889014", "0.7150462", "0.6169565", "0.59940624", "0.59328765", "0.5892914", "0.58556867", "0.583677", "0.5791252", "0.5784249", "0.57462", "0.5718208", "0.5718208", "0.5718208", "0.5718208", "0.5718208", "0.5707402", "0.564645", "0.56462574", "0.5600071", "0.5583564", "0.55530316", "0.5514736", "0.5500102", "0.5462196", "0.5448155", "0.5430468", "0.5414109", "0.5409519", "0.53854066" ]
0.77947813
1
Returns a text representation of the candidate at the given index.
def get_candidate_text(json_dict, idx): # No candidate at this index. if idx < 0 or idx >= len(json_dict["passage_answer_candidates"]): raise ValueError("Invalid index for passage candidate: {}".format(idx)) return get_text_span(json_dict, json_dict["passage_answer_candidates"][idx])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetString(self, index):\n \n return self.choices[index].GetLabel()", "def __getitem__(self, index):\n return str(self.cpf[index])", "def _printFromIndex(self, index):\n ret = str(self.arr[index])\n iterator = index + 1\n while iterator != index:\n ret += ' {}'.format(self.arr[iterator])\n iterator = iterator + 1\n iterator = iterator % self.size\n return ret", "def __getitem__(self, index):\n def _getTextByIndex(blockIndex):\n return self._doc.findBlockByNumber(blockIndex).text()\n\n if isinstance(index, int):\n index = self._checkAndConvertIndex(index)\n return _getTextByIndex(index)\n elif isinstance(index, slice):\n start, stop, step = index.indices(self._doc.blockCount())\n return [_getTextByIndex(blockIndex) \\\n for blockIndex in range(start, stop, step)]", "def get_index_text(self, name):\n raise NotImplementedError('must be implemented in subclasses')", "def output(index: int = 0) -> str:\n return outputs()[index]", "def StringFromMatchbox(index):\n\tboard = BoardFromIndex(index)\n\tmatchbox = GetMatchboxes()[index]\n\n\toutput = []\n\tfor row in range(3):\n\t\tsquares = []\n\t\tfor col in range(3):\n\t\t\tif board[row][col] == ' ':\n\t\t\t\tsquares.append('{:^3}'.format(matchbox[row*3 + col]))\n\t\t\telse:\n\t\t\t\tsquares.append('{:^3}'.format(board[row][col]))\n\t\toutput.append('|'.join(squares))\n\treturn '\\n-----------\\n'.join(output)", "def get_text_from_note (self,\r\n index):\r\n\r\n if self.using_database:\r\n aprint('GETTING TEXT DROM NOTE')\r\n value_tuple = (notebookname, str(index),)\r\n db_cursor.execute(\"SELECT note_body\"+\r\n \" FROM notes WHERE notebook=?\"+\r\n \" AND note_index=?;\",value_tuple)\r\n try:\r\n text = db_cursor.fetchone()[0].replace(\"''\",\"'\")\r\n except:\r\n text = ''\r\n\r\n return text\r\n\r\n if str(index) in self.note_dict:\r\n return self.note_dict[str(index)].text\r\n return ''", "def __getitem__(self, index):\n return str(self.cnpj[index])", "def get_index_text(self, crate, module, impl, name):\n raise NotImplementedError", "def index_as_string(self):\n return self.index().to_string() if self.index() else None", "def index_letter_string(self, index):\n\t\treturn \"(\" + ALPHABET[index] + \")\"", "def display_for_index(self, index):\n obj = index.data(self.ObjectRole)\n cb = self.DISPLAY_CALLBACKS.get(index.column())\n if not cb:\n return \"\"\n return cb(obj)", "def decode(self, text_index):\n texts = []\n for idx, text_idx in enumerate(text_index):\n text = ''\n text_idx = text_idx.numpy()\n for i in range(len(text_idx)):\n if text_idx[i] == 1:\n break\n else:\n text += self.character[text_idx[i]]\n texts.append(text)\n return texts", "def report(self, index: Optional[int] = None) -> str:\n if self._passed is None:\n raise RuntimeError('Cannot report on incomplete test')\n\n report = '[FAIL] '\n if self._passed:\n report = '[PASS] '\n report += self._name\n\n if self._note is not None:\n report += '\\n ' + str(self._note)\n\n if index is not None:\n number = str(index)\n while len(number) < 2:\n number = ' ' + number\n\n report = '[' + number + '] ' + report\n\n return report", "def synthesize_text(self, h, ix, n):\n # The next input vector\n xnext = np.zeros((self.vocab_len, 1))\n # Use the index to set the net input vector\n xnext[ix] = 1 # 1-hot-encoding\n\n txt = ''\n for t in range(n):\n _, h, _, p = self.evaluate_classifier(h, xnext)\n # At each time step t when you generate a\n # vector of probabilities for the labels,\n # you then have to sample a label from this PMF\n ix = np.random.choice(range(self.vocab_len), p=p.flat)\n xnext = np.zeros((self.vocab_len, 1))\n xnext[ix] = 1 # Lecture 9, page 22\n txt += self.ind_to_char[ix]\n\n return txt", "def report_index(self, index):\n i = 0\n for k, data in self.matches[index].items():\n if i != 0:\n print\n print fmt(\"['%c': charset - chars]\" % k, MAGENTA)\n print fmt(sorted([x for x in data[\"charset\"]]), WHITE)\n print fmt(data[\"chars\"], WHITE)\n i = 1", "def index_to_string(index):\n if index:\n s = \"/\".join(index)\n return Quote(s)\n else:\n return \".\"", "def strIdx(idx):\n if not isinstance(idx, (int, np.integer)):\n raise ValueError(\"Index must be an integer.\")\n\n return str(idx) if idx >= 0 else str(-idx) + u'\\u0305'", "def _get_interleving(self, index):\n try:\n index = self._char_indexes[index - 1]\n except IndexError:\n return \"\"\n s = \"\"\n while True:\n index += 1\n if index in self._char_indexes:\n break\n elif index in self._code_indexes:\n s += self._raw_string[index]\n else:\n break\n return s", "def cause_of_death_index_to_string(index: int) -> str:\n\n if index == CauseOfDeath.STARVATION.value:\n return \"Starvation\"\n\n elif index == CauseOfDeath.DEHYDRATION.value:\n return \"Dehydration\"\n\n elif index == CauseOfDeath.EATEN.value:\n return \"Eaten\"\n\n else:\n raise ValueError(\"Did not recognize CauseOfDeath index!\")", "def __getitem__(self, index):\n sample, label = self.data[index], self.labels[index]\n\n # transform the sample and the label,\n # in order to feed them to the model\n vec_sample = vectorize(sample, self.word2idx, self.length)\n\n # PROSOXH EIXAME BUUUUUUUUUUUUUUUUUUUG ZHTAGAME index POU > MAX_LENGTH\n return vec_sample, label, min(len(self.data[index]), self.length)", "def solution_to_string(self):\n solution_vector_index_format = [index+1 if elem == 1 else -index-1 for index, elem in enumerate(self.solution_vector)]\n return \" \".join(map(str, solution_vector_index_format))", "def __getitem__(self, index):\n txt_seq = self.txt_seqs[index]\n word_id_seq = self._preprocess(txt_seq)\n return word_id_seq, txt_seq", "def _index_to_unicode(cls, index: int) -> str:\n return \"\".join(cls._unicode_subscripts[int(_)] for _ in str(index))", "def __getitem__(self, idx):\n return self.corpus[idx]", "def get_text(self, i: int = None) -> str:\n if i is None:\n i = self.index\n else:\n i = str(i)\n logging.info(f\"get text. {self.desc}\")\n js = f\"\"\"return document.querySelectorAll(\"{self.css}\")[{i}].textContent;\"\"\"\n return self._execute_javascript(js)", "def input(index: int = 0) -> str:\n return inputs()[index]", "def __str__(self):\n return \"{}_human\".format(self.index)", "def __getitem__(self, index):\n clef = self.clef.clef # pandas dataframe\n vocab_concepts = self.vocab_concept\n vocab_word = self.vocab_word\n ann_id = self.ids[index]\n concepts_whole = clef.loc[ann_id]['concepts']\n concepts_whole = concepts_whole.split(';')\n caption = clef.loc[ann_id]['caption']\n img_id = clef.loc[ann_id]['image_id']\n path = clef.loc[ann_id]['file_name'] + \".jpg\"\n image = Image.open(os.path.join(self.root, path)).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\n caption = []\n caption.append(vocab_word('<start>'))\n caption.extend([vocab_word(token) for token in tokens])\n caption.append(vocab_word('<end>'))\n target = torch.Tensor(caption)\n concepts_idx = [[0,vocab_concepts(concept)] for concept in concepts_whole]\n return image, concepts_idx, target" ]
[ "0.6410651", "0.6363077", "0.6317435", "0.6307074", "0.61769325", "0.6108662", "0.61011386", "0.6054563", "0.6041569", "0.5981198", "0.58085775", "0.576806", "0.57276076", "0.5684942", "0.5660255", "0.5657564", "0.56432194", "0.5622297", "0.56197083", "0.56084627", "0.5603121", "0.559912", "0.5573512", "0.5569066", "0.55673957", "0.5557956", "0.5556555", "0.5542793", "0.5526307", "0.55124336" ]
0.7112093
0
Converts a TyDi 'entry' from `create_entry_from_json` to `TyDiExample`.
def to_tydi_example(entry, is_training): if is_training: answer = make_tydi_answer(entry["contexts"], entry["answer"]) start_byte_offset = answer.offset end_byte_offset = answer.offset + byte_len(answer.text) else: answer = None start_byte_offset = None end_byte_offset = None return TyDiExample( example_id=int(entry["id"]), language_id=get_language_id(entry["language"]), question=entry["question"], contexts=entry["contexts"], plaintext=entry["plaintext"], context_to_plaintext_offset=entry["context_to_plaintext_offset"], answer=answer, start_byte_offset=start_byte_offset, end_byte_offset=end_byte_offset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def from_dict(cls, dikt) -> \"Todo\":\n return util.deserialize_model(dikt, cls)", "def example_json(example_json_file):\n return json.loads(example_json_file)", "def _json_to_instance(self, json_dict: JsonDict) -> Instance:\n premise_text = json_dict[\"premise\"]\n hypothesis_text = json_dict[\"hypothesis\"]\n same_sentence = json_dict[\"same_sentence\"]\n same_paragraph = json_dict[\"same_paragraph\"]\n\n return self._dataset_reader.text_to_instance(premise_text,\n hypothesis_text,\n label=None,\n same_sentence=same_sentence,\n same_paragraph=same_paragraph)", "def from_dict(cls, dikt) -> 'DayResult':\n return util.deserialize_model(dikt, cls)", "def decode_json_example(json_examples, name=None):\n return gen_parsing_ops.decode_json_example(json_examples, name=name)", "def parse_example(example):\n metadata, data = example.strip().split('\\n\\n')\n metadata = pytoml.loads(metadata)\n metadata['success'] = metadata['result'] == 'success'\n metadata['name'] = re.sub(r'[ -]', '_', metadata['name'].lower())\n del metadata['result']\n return Example(data=data.strip(), **metadata)", "def from_json(text, check_format=True, directory=None):\n return ExperimentListFactory.from_dict(\n json.loads(text, object_hook=_decode_dict),\n check_format=check_format,\n directory=directory,\n )", "def from_dict(cls, dikt) -> 'VultrExtra':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Story':\n return util.deserialize_model(dikt, cls)", "def create_example_train(row, vocab):\n context, utterance, label = row\n context_transformed = transform_sentence(context, vocab)\n utterance_transformed = transform_sentence(utterance, vocab)\n context_len = len(next(vocab._tokenizer([context])))\n utterance_len = len(next(vocab._tokenizer([utterance])))\n label = int(float(label))\n\n # New Example\n example = tf.train.Example()\n example.features.feature[\"context\"].int64_list.value.extend(context_transformed)\n example.features.feature[\"utterance\"].int64_list.value.extend(utterance_transformed)\n example.features.feature[\"context_len\"].int64_list.value.extend([context_len])\n example.features.feature[\"utterance_len\"].int64_list.value.extend([utterance_len])\n example.features.feature[\"label\"].int64_list.value.extend([label])\n return example", "def from_dict(cls, dikt) -> 'TemperatureZone':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'ExtraLink':\n return util.deserialize_model(dikt, cls)", "def convert_to_tf_example(\n patient_data: Tuple[str, Dict[str, object]]\n) -> tf.train.Example:\n try:\n data = patient_data[1]\n patient = data[\"patient\"][0]\n studies = data[\"studies\"][0]\n \n features = convert_patient_to_feature(patient)\n for study_id, study in studies:\n study_data = convert_study_to_feature(study)\n for feature in study_data:\n features.update(feature)\n return tf.train.Example(features=tf.train.Features(feature=features),)\n except Exception as e:\n _logger.error(\n f\"Error occurred when creating a TFRecord. patient_data: {data.get('patient', data)}. Error: {e}.\"\n )\n return tf.train.Example(features=tf.train.Features(feature={}),)", "def create_recipe(*, recipe_in: RecipeCreate) -> dict:\n new_entry_id = len(RECIPES) + 1\n recipe_entry = Recipe(\n id=new_entry_id,\n label=recipe_in.label,\n source=recipe_in.source,\n url=recipe_in.url,\n )\n RECIPES.append(recipe_entry.dict())\n\n return recipe_entry", "def dict_to_example(dictionary):\n features = {}\n for k, v in six.iteritems(dictionary):\n features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))\n return tf.train.Example(features=tf.train.Features(feature=features))", "def create_example_test(row, vocab):\n context, utterance = row[:2]\n distractors = row[2:]\n context_len = len(next(vocab._tokenizer([context])))\n utterance_len = len(next(vocab._tokenizer([utterance])))\n context_transformed = transform_sentence(context, vocab)\n utterance_transformed = transform_sentence(utterance, vocab)\n\n # New Example\n example = tf.train.Example()\n example.features.feature[\"context\"].int64_list.value.extend(context_transformed)\n example.features.feature[\"utterance\"].int64_list.value.extend(utterance_transformed)\n example.features.feature[\"context_len\"].int64_list.value.extend([context_len])\n example.features.feature[\"utterance_len\"].int64_list.value.extend([utterance_len])\n\n # Distractor sequences\n for i, distractor in enumerate(distractors):\n dis_key = \"distractor_{}\".format(i)\n dis_len_key = \"distractor_{}_len\".format(i)\n # Distractor Length Feature\n dis_len = len(next(vocab._tokenizer([distractor])))\n example.features.feature[dis_len_key].int64_list.value.extend([dis_len])\n # Distractor Text Feature\n dis_transformed = transform_sentence(distractor, vocab)\n example.features.feature[dis_key].int64_list.value.extend(dis_transformed)\n return example", "def convert_example(example, tokenizer):\n\n feature = tokenizer(\n text=example['question'],\n text_pair=example['answer'],\n max_seq_len=args.max_seq_length)\n feature['labels'] = example['labels']\n feature['id'] = example['id']\n\n return feature", "def convert_json_1(json):\n\n # TODO Add batch details to json format\n # TODO Get default direct entry batch details if not provided\n\n LOGGER.debug('convert json message:%s', json)\n direct_entry = {\n 'record_type': '1',\n 'reel_seq_num': '01',\n 'name_fin_inst': 'SUN',\n 'user_name': 'hello',\n 'user_num': '123456',\n 'file_desc': 'payroll',\n 'date_for_process': datetime.strptime(json['post_date'], '%Y-%m-%d').strftime('%d%m%y'),\n 'bsb_number': json['to_routing'],\n 'account_number': json['to_account'],\n 'indicator': ' ',\n 'tran_code': '13' if json['tran_type'] == 'db' else '53',\n 'amount': '{amount:010}'.format(amount=json['amount']), # $2.00\n 'account_title': json['to_name'],\n 'lodgement_ref': json['to_description'],\n 'trace_bsb_number': json['from_routing'],\n 'trace_account_number': json['from_account'],\n 'name_of_remitter': json['from_name'],\n 'withholding_tax_amount': '00000000',\n }\n\n return direct_entry", "def _convert_example(self, output_file, data_dict):\n print('Generating %s' % output_file)\n with tf.compat.v1.python_io.TFRecordWriter(output_file) as record_writer:\n data = data_dict['data'].astype(np.int8)\n labels = data_dict['label'].astype(np.int64)\n num_entries_in_batch = len(labels)\n for i in tqdm(range(num_entries_in_batch)):\n example = tf.train.Example(features=tf.train.Features(\n feature={\n 'data': _bytes_feature(data[i].tobytes()),\n 'label': _int_feature(labels[i]),\n }))\n record_writer.write(example.SerializeToString())", "def _convert_example(self, output_file, data_dict):\n print('Generating %s' % output_file)\n with tf.compat.v1.python_io.TFRecordWriter(output_file) as record_writer:\n data = data_dict['data'].astype(np.int8)\n labels = data_dict['label'].astype(np.int64)\n num_entries_in_batch = len(labels)\n for i in tqdm(range(num_entries_in_batch)):\n example = tf.train.Example(features=tf.train.Features(\n feature={\n 'data': _bytes_feature(data[i].tobytes()),\n 'label': _int_feature(labels[i]),\n }))\n record_writer.write(example.SerializeToString())", "def test_Entry_creation(self):\n test_entry = self.create_Entry()\n self.assertTrue(isinstance(test_entry, Entry))", "def from_json(cls, json_data, demo_name):\n\t\tcur_bm = []\n\t\tcur_ks = []\n\n\t\tfor k in json_data[\"events\"]:\n\t\t\tif k[\"name\"] == \"Killstreak\":\n\t\t\t\tcur_ks.append(DemoEvent(int(k[\"value\"]), int(k[\"tick\"]), None))\n\t\t\telif k[\"name\"] == \"Bookmark\":\n\t\t\t\tcur_bm.append(DemoEvent(k[\"value\"], int(k[\"tick\"]), None))\n\t\treturn cls(demo_name, cur_ks, cur_bm)", "def example_to_data(self, example):\n raise NotImplementedError", "def from_dict(cls, dikt) -> \"InlineResponse201\":\n return util.deserialize_model(dikt, cls)", "def _deserialize_example(example_proto, labeled=True):\n if labeled:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string),\n 'patient_id': tf.io.FixedLenFeature([], tf.int64),\n 'sex': tf.io.FixedLenFeature([], tf.int64),\n 'age_approx': tf.io.FixedLenFeature([], tf.int64),\n 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64),\n 'diagnosis': tf.io.FixedLenFeature([], tf.int64),\n 'target': tf.io.FixedLenFeature([], tf.int64)\n }\n else:\n feature_description = {\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'image_name': tf.io.FixedLenFeature([], tf.string)\n }\n\n return tf.io.parse_single_example(example_proto, feature_description)", "def from_dict(cls, dikt) -> 'InlineResponse201':\n return util.deserialize_model(dikt, cls)", "def dict_to_feature(d):\n f = ee.Feature(None,ee.Dictionary(d))\n return f", "def create_entry(hass: HomeAssistant) -> MockConfigEntry:\n entry = MockConfigEntry(\n domain=DOMAIN,\n data={\n CONF_URL: URL,\n CONF_API_KEY: API_KEY,\n CONF_VERIFY_SSL: False,\n },\n )\n\n entry.add_to_hass(hass)\n return entry", "def test_addEntryByDict(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n b = self.g.add_entry({'term': 'foo', 'tags': 'a', 'value': '1'})\n self.assertTrue(b)" ]
[ "0.58346426", "0.5392077", "0.5235372", "0.5232819", "0.52080363", "0.51707566", "0.5161617", "0.5157883", "0.5148814", "0.51232684", "0.51134694", "0.5111602", "0.5105579", "0.51049596", "0.5095554", "0.50900686", "0.5078553", "0.5064115", "0.5054117", "0.50366974", "0.50366974", "0.5019247", "0.5002299", "0.49901065", "0.4980904", "0.49704126", "0.4964462", "0.49475947", "0.4909763", "0.49039012" ]
0.74971884
0
Representacion en cadena de la clase Parroquia.
def __unicode__(self): return self.parroquia
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, nombre, cantidad, precio):\n\n # Atributos privados por convensión\n self._an = 15 # Ancho de columna nombre\n self._ac = 8 # Ancho de columna cantidad\n self._ap = 10 # Ancho de columna precio\n self._ast = 10 # Ancho de columna subtotal\n\n # Se inicializan los atributos de la instancia\n self.nombre = nombre\n self.cantidad = cantidad\n self.precio = precio", "def __init__(self):\n\n self._nb_vie = donnees.nb_essai\n\n self._mot_a_trouver = str()\n\n self._mot_en_cours = list() # Sera initilaiser par un nb de 0 = len(mot_a_trouver) sera modifier a chaque proposition juste\n # Il permettra de tester la victoire\n\n self._nom_joueur = str()", "def __init__(self):\n mi_parqueo = list()", "def __init__(self, nombre, cantidad, precio, marca, modelo):\n\n # Se ejecuta el constructor de la clase padre\n super().__init__(nombre, cantidad, precio)\n\n # Se modifica el valor de un atributo privado\n self._an = 25\n\n # Se inicializan los atributos de la clase hija\n self.marca = marca\n self.modelo = modelo", "def __init__(self, nom, prenom):\n \n self.nom = nom\n self.prenom = prenom\n self.age = 33\n self._lieu_residence = \"Paris\" # Notez le souligné _ devant le nom", "def __init__(self):\n self.nombre_roues = 4\n self.nombre_fauteils = 1\n self.moteur = False\n self.volant = True", "def __init__(self, diccionario):\n self.numero = diccionario['numero']\n self.nombre = diccionario['equipo_nombre']\n self.pokmov = lectores.pokemon_y_movimiento_a_tuplas(diccionario)", "def __init__(self, nombre, socios):\n self.__nombre = nombre\n self.__socios = socios\n self.__resultados = {'p1': '', 'p2': '', 'p3': '', 'p4': '', 'p5': '', 'p6': '', 'p7': ''}", "def __init__(self, nom, prenom):\r\n self.nom = nom\r\n self.prenom = prenom\r\n self.age = 33", "def __init__(self):\n {}\n #generate a monoid Q\n self.monoid_Q = self.generateQ()[0]\n self.relationOfElements_Q = self.generateQ()[1]\n self.p_Position = self.generateQ()[2]\n self.qOfPosition = self.generateQ()[3]\n #print(self.qOfPosition)", "def __repr__(self):\n return \"{0}({1})\".format(self.__class__.__name__,\n \", \".join(map(str, self.pars)))", "def __init__(self, nome, qtd_titulos):\n #super(nome, qtd_titulos)\n self.nome = nome\n self.qtd_titulos = qtd_titulos", "def p(self):\n return 'Plane'", "def __str__(self):\n return self.idBaixasPagar", "def __init__(self, parant):\n pass", "def Cima(self):\n if(self.Pila_Vacia()=='true'):\n return \"Pila Vacia\"\n else:\n return self.pila[self.puntero]", "def __init__(self, marqueur, allele, hauteur, informatif):\n\n self.marqueur = marqueur\n self.allele = allele\n self.hauteur = hauteur\n self.informatif = informatif", "def mezclar_bolsa(self):", "def __init__(self, altura, peso, edad):\n\t\tself.altura = altura # OJO TODAS LAS VARIABLES SON PUBLICAS \n\t\tself.peso = peso \n\t\tself.edad = edad\n\t\tself.profesion = \"\" # esta la inicializamos nosotros\n\t\tself.lista_tareas = []\n\t\tself.__privado = 1 # este atributo es privado no podemos acceder a el desde fuera", "def __init__(self):\n self.aeropuertos = {}", "def __init__(self, espec_izquierda, espec_derecha):\n self._izquierda = espec_izquierda\n self._derecha = espec_derecha\n return", "def __repr__(self):\n return \"<PID_onject P: %s I: %s D: %s>\"\\\n % (self.K[0], self.K[1], self.K[2])", "def __init__(self, p1_proba=0.5):\n self.p1_proba = p1_proba", "def __init__():\n self.placa = placa", "def __str__(self):\n\t\tif self.__valide:\n\t\t\treturn str(self.__tete)\n\t\telse:\n\t\t\treturn \"(polynome invalide)\"", "def __init__(self, nom, prenom):\r\n self.nom = nom\r\n self.prenom = prenom\r\n self.age = 33\r\n self.lieu_residence = \"Paris\"\r\n print(\"helllo amine nta m9awwad\")", "def __str__(self) -> str:\n return (\n f\"offre de {self.beneficiaire} sur {self.proposition} \"\n \"(cagnotte {self.proposition.cagnotte})\"\n )", "def __init__(self, e, p, kn, parool):\n self.eesnimi = e\n self.perenimi = p\n self.kasutaja_nimi = kn\n self.parool = parool\n self.roll = \"tavakasutaja\"", "def __str__(self):\n return \"p(\" + \",\".join([str(round(c, digits)) for c in self.components]) + \")\"", "def parameters(self):" ]
[ "0.62735045", "0.6233848", "0.62092257", "0.6143086", "0.5984596", "0.59630907", "0.59224004", "0.58816874", "0.58564144", "0.5777072", "0.5774827", "0.5763267", "0.57328683", "0.5721564", "0.5687393", "0.5685108", "0.5682803", "0.5677757", "0.56428087", "0.5642351", "0.56404096", "0.5624022", "0.56179386", "0.5608335", "0.55860543", "0.5575255", "0.5574087", "0.5547742", "0.5540685", "0.5531553" ]
0.7457928
0
La Url de vista principal de administracion de Parroquia.
def get_absolute_url(self): return ('listar_parroquia', [self.id, ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_orion_admin_url(self, obj):\n return obj.orion_admin_url", "def get_admin_url_to_add_run(self, request):\n base_url = reverse(\"admin:courses_courserun_add\")\n return f\"{base_url:s}?direct_course={self.id:d}\"", "def getAdmin():", "def url(self):\n return url_for('/admin/groups/{}'.format(self.key))", "def url(self):\n ...", "def Url(self) -> str:", "def get_absolute_url(self):\n return reverse('dettaglio_paziente', args=[str(self.idPaziente.id)])", "def admin():\n return redirect(url_for(\"user\", name=\"Admin!\"))", "def barbican_url(self):", "def page_url(self):\n url = '/plaque/%s' % self.key.urlsafe()\n return url", "def get_absolute_url(self):\n return reverse('product-detail', args=[str(self.id_produto)])", "def administrarpermiso():\n if not current_user.is_authenticated():\n flash('Debe loguearse primeramente!!!!', 'loggin')\n return render_template('index.html')\n \n permission = UserRol('ADMINISTRADOR')\n if permission.can():\n isAdmin = request.args.get('value')\n rol = request.args.get('idrol')\n permisos = db_session.query(Permiso).order_by(Permiso.id)\n lista = []\n if not isAdmin :\n #=======================================================================\n # en esta lista se inserta los permisos que estan asignados a un rol\n #=======================================================================\n yourPermiso=getPermisosByRol(rol)\n for p in yourPermiso:\n lista.append(p)\n return render_template('permiso/administrarpermiso.html', permisos = permisos, isAdministrar = isAdmin, idrol = rol, asignados = lista)\n else:\n flash('Sin permisos para administrar proyectos', 'permiso')\n return render_template('index.html')", "def dashboardUrl(self):\n\t\turl = \"http://127.0.0.1/horizon\"\n\t\tx = urlparse(self.url)\n\t\turl = \"%s://%s/horizon\" % (x.scheme , x.netloc.split(\":\")[0] )\n\t\treturn url", "def get_absolute_url(self):\n # return reverse('tutor-detail', args=[str(self.email_address)])\n return reverse('tutor-detail', args=[str(self.username)])", "def show_admin_edit_users():\n return render_admin_page(\"admin-eu.html\")", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def url (self):\n return Links.createURL('/')", "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def goToAdmin(request):\n\n\ttemplate = '../admin'\n\treturn HttpResponseRedirect(template)", "def admin_con():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n admins_query = Admins.query(ancestor = admin_base).order(-Admins.date)\n admins = admins_query.fetch()\n output = template('admin', name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname(), admins = admins)\n return output\n else:\n redirect('/')\n else:\n redirect('/')", "def url(self):\r\n return BASE_URL + \"/courses/\" + self.course_id + \"/\" + self.url_path", "def __command_url(self):\n return \"http://\" + self._host + \\\n \"/cgi-bin/hi3510/{}&-usr=\" + \\\n self._username + \"&-pwd=\" + self._password", "def _get(self, url ='/atributo/administraratributo'):\n return self.client.get(url, follow_redirects=True)", "def _get(self, url ='/administrarrol'):\n return self.client.get(url, follow_redirects=True)", "def resource_url(self):\n return self.portal_url + \"/\" + \"++resource++plonecommunity.app\"", "def admin_index():\n return 'Super-seekrit admin page.'", "def admin_only():\n return 'Super-seekrit admin page.'", "def url(self):\n return app.settings.cherrypy.url()", "def post(self) :\n self.redirect('/admin')" ]
[ "0.6939429", "0.6376486", "0.62612", "0.6213472", "0.6176542", "0.61681587", "0.6094484", "0.6040345", "0.5983753", "0.5958032", "0.59322804", "0.5912582", "0.584376", "0.58436394", "0.58342546", "0.58238477", "0.58238477", "0.5822934", "0.581415", "0.5813586", "0.58131236", "0.5793744", "0.5768274", "0.5767544", "0.5763545", "0.5748703", "0.57409084", "0.5730958", "0.5727738", "0.57016754" ]
0.66241646
1
Return True if this element is an instance of the given subclass. If a category string is specified, then both subclass and category matches are required.
def _isA(self, elementClass, category = ''): if not isinstance(self, elementClass): return False if category and self.getCategory() != category: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subclassof(c, b):\n try:\n return issubclass(c, b)\n except TypeError:\n return False", "def is_subclass(parent_class, child_class_name):\n for child_class in parent_class.__subclasses__():\n if child_class.__name__ == child_class_name:\n return True\n return False", "def __subclasscheck__(self, subclass):\n\n if isinstance(subclass, ObjCClass):\n return bool(subclass.isSubclassOfClass(self))\n else:\n raise TypeError(\n f\"issubclass(X, {self!r}) arg 1 must be an ObjCClass, \"\n f\"not {type(subclass).__module__}.{type(subclass).__qualname__}\"\n )", "def is_proper_subclass(cls, maybe_proper_superclass):\n return cls is not maybe_proper_superclass and issubclass(cls, maybe_proper_superclass)", "def isinstance(self, class_or_string):\n if class_or_string is None:\n return False\n import inspect\n if inspect.isclass(class_or_string):\n return isinstance(self, class_or_string)\n else:\n return self.__class__.__name__.lower() == class_or_string.lower()", "def issubclass_(arg1, arg2):\n try:\n return issubclass(arg1, arg2)\n except TypeError:\n return False", "def isClass(self, className):\n return self.characterClass == className or self.baseClass == className", "def InheritsFrom(self,base_class,child_class):\n if self.CleanName(base_class) in child_class.split(\"(\")[-1]:\n return True\n else:\n return False", "def __subclasscheck__(self, subclass):\n\n if isinstance(subclass, ObjCClass):\n return bool(subclass.conformsToProtocol(self))\n elif isinstance(subclass, ObjCProtocol):\n return bool(libobjc.protocol_conformsToProtocol(subclass, self))\n else:\n raise TypeError(\n f\"issubclass(X, {self!r}) arg 1 must be an ObjCClass or ObjCProtocol, \"\n f\"not {type(subclass).__module__}.{type(subclass).__qualname__}\"\n )", "def __subclasshook__(cls, subclass: Type[Any]) -> bool:\n return (subclass in cls.__subclasses__() \n or denovo.unit.has_methods(\n item = subclass,\n methods = [\n 'add', 'subset', '__add__', '__iadd__', '__iter__', \n '__len__']))", "def match(self, cls):\n return isinstance(self, cls)", "def issubclass_safe(value, type_):\n try:\n return issubclass(value, type_)\n except (TypeError, AttributeError):\n # Cannot perform issubclass on some types\n return False", "def isA(citem, testCategory):\n try:\n return testCategory.lower().strip() in citem.category\n except:\n for tc in testCategory:\n if tc.lower().strip() in citem.category:\n return True\n return False", "def isA(citem, testCategory):\n try:\n return testCategory.lower().strip() in citem.category\n except:\n for tc in testCategory:\n if tc.lower().strip() in citem.category:\n return True\n return False", "def class_is_type(cls, *seg_type: str) -> bool:\n # Use set intersection\n if cls._class_types.intersection(seg_type):\n return True\n return False", "def is_not_subclass(self, cls, seconds=60):\n st = '('+') & ('.join(cls.axioms)+')'\n m = prover9(self.axioms, [st], seconds, 1, options=self.options)\n if type(m)==list:\n return True, m[0]\n else:\n return False, m", "def is_subclass(self, cls, seconds=60):\n proofs = []\n for ax in cls.axioms:\n p = pr9(self.axioms, [ax], seconds, self.options)\n if type(p)==list:\n print ax, \"proved\"\n else:\n print ax, p\n return False, 'No conclusions'\n proofs.append(p)\n return True, proofs", "def __subclasscheck__(cls, subclass):\r\n # Check cache\r\n if subclass in cls._abc_cache:\r\n return True\r\n # Check negative cache; may have to invalidate\r\n if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:\r\n # Invalidate the negative cache\r\n cls._abc_negative_cache = set()\r\n cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter\r\n elif subclass in cls._abc_negative_cache:\r\n return False\r\n # Check the subclass hook\r\n ok = cls.__subclasshook__(subclass)\r\n if ok is not NotImplemented:\r\n assert isinstance(ok, bool)\r\n if ok:\r\n cls._abc_cache.add(subclass)\r\n else:\r\n cls._abc_negative_cache.add(subclass)\r\n return ok\r\n # Check if it's a direct subclass\r\n if cls in getattr(subclass, '__mro__', ()):\r\n cls._abc_cache.add(subclass)\r\n return True\r\n # Check if it's a subclass of a registered class (recursive)\r\n for rcls in cls._abc_registry:\r\n if _is_subclass(subclass, rcls):\r\n cls._abc_cache.add(subclass)\r\n return True\r\n # Check if it's a subclass of a subclass (recursive)\r\n for scls in cls.__subclasses__():\r\n if _is_subclass(subclass, scls):\r\n cls._abc_cache.add(subclass)\r\n return True\r\n # No dice; update negative cache\r\n cls._abc_negative_cache.add(subclass)\r\n return False", "def is_Fit_subclass(cls: Type[Fit]) -> bool:\n try:\n if issubclass(cls, Fit) and (cls is not Fit):\n return True\n else:\n return False\n except TypeError:\n return False", "def issubclass_(type_, dtype):\n if not isinstance(type_, typing.Type):\n return False\n return typing.is_subclass(type_, dtype)", "def isa(self, type_id, supertype_id):\n return supertype_id in self.ancestors(type_id)", "def _issubclass_Generic(subclass, superclass, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n # this function is partly based on code from typing module 3.5.2.2\n if subclass is None:\n return False\n subclass = _extra_inv(subclass)\n origin = _origin(superclass)\n if is_Tuple(subclass):\n tpl_prms = get_Tuple_params(subclass)\n if not tpl_prms is None and len(tpl_prms) == 0:\n # (This section is required because Empty shall not be\n # used on Tuples.)\n # an empty Tuple is any Sequence, regardless of type\n # note that we needn't consider superclass being a tuple,\n # because that should have been checked in _issubclass_Tuple\n sup = superclass if origin is None else origin\n sup = _extra_inv(sup)\n return issubclass(typing.Sequence, sup)\n subclass = Sequence[Union[tpl_prms]]\n if is_Generic(subclass):\n # For a class C(Generic[T]) where T is co-variant,\n # C[X] is a subclass of C[Y] iff X is a subclass of Y.\n suborigin = _origin(subclass)\n if suborigin is None:\n orig_bases = _bases(subclass)\n for scls in orig_bases:\n if is_Generic(scls):\n if _issubclass_Generic(scls, superclass, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs,\n _recursion_check):\n return True\n #Formerly: if origin is not None and origin is subclass.__origin__:\n elif origin is not None and \\\n _issubclass(_origin(subclass), origin, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n prms = _parameters(origin)\n assert len(superclass.__args__) == len(prms)\n if len(subclass.__args__) == len(prms):\n sub_args = subclass.__args__\n else:\n # We select the relevant subset of args by TypeVar-matching\n sub_args = _select_Generic_superclass_parameters(subclass, origin)\n assert len(sub_args) == len(prms)\n for p_self, p_cls, p_origin in zip(superclass.__args__,\n sub_args,\n prms):\n if isinstance(p_origin, TypeVar):\n if p_origin.__covariant__:\n # Covariant -- p_cls must be a subclass of p_self.\n if not _issubclass(p_cls, p_self, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs,\n _recursion_check):\n break\n elif p_origin.__contravariant__:\n # Contravariant. I think it's the opposite. :-)\n if not _issubclass(p_self, p_cls, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs,\n _recursion_check):\n break\n else:\n # Invariant -- p_cls and p_self must equal.\n if p_self != p_cls:\n if not _issubclass(p_cls, p_self, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs,\n _recursion_check):\n break\n if not _issubclass(p_self, p_cls, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs,\n _recursion_check):\n break\n else:\n # If the origin's parameter is not a typevar,\n # insist on invariance.\n if p_self != p_cls:\n if not _issubclass(p_cls, p_self, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs,\n _recursion_check):\n break\n if not _issubclass(p_self, p_cls, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs,\n _recursion_check):\n break\n else:\n return True\n # If we break out of the loop, the superclass gets a chance.\n\n # I.e.: origin is None or not _issubclass(suborigin, origin)\n # In this case we must consider origin or suborigin to be None\n # We treat param-values as unknown in the following sense:\n # for covariant params: treat unknown more-or-equal specific than Any\n # for contravariant param: Any more-or-equal specific than Unknown\n # for invariant param: unknown never passes\n # if both are unknown:\n # return False (?) (or NotImplemented? Or let a flag decide behavior?)\n if origin is None:\n if not pytypes.check_unbound_types:\n raise TypeError(\"Attempted to check unbound type(superclass): \"+str(superclass))\n if not suborigin is None:\n if not type.__subclasscheck__(superclass, suborigin):\n return False\n prms = _find_Generic_super_origin(suborigin, superclass)\n args = _select_Generic_superclass_parameters(subclass, superclass)\n for i in range(len(prms)):\n if prms[i].__covariant__:\n if pytypes.strict_unknown_check:\n return False\n elif prms[i].__contravariant__:\n # Subclass-value must be wider than or equal to Any, i.e. must be Any:\n if not args[i] is Any:\n return False\n else:\n return False\n return True\n #else:\n # nothing to do here... (?)\n elif suborigin is None:\n if not pytypes.check_unbound_types:\n raise TypeError(\"Attempted to check unbound type (subclass): \"+str(subclass))\n if not type.__subclasscheck__(origin, subclass):\n return False\n prms = _parameters(origin)\n for i in range(len(prms)):\n if prms[i].__covariant__:\n # subclass-arg here is unknown, so in superclass only Any can pass:\n if not superclass.__args__[i] is Any:\n return False\n elif prms[i].__contravariant__:\n if pytypes.strict_unknown_check:\n return False\n else:\n return False\n return True\n# Formerly: if super(GenericMeta, superclass).__subclasscheck__(subclass):\n try:\n if type.__subclasscheck__(superclass, subclass):\n return True\n except TypeError: pass\n if _extra(superclass) is None or is_Generic(subclass):\n return False\n return _issubclass_2(subclass, _extra(superclass), bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check)", "def is_applicable_to(cls, device_type: str,\n device_class: Type[gdm_test_base.DeviceType],\n device_name: str) -> bool:\n return issubclass(device_class, gazoo_device_base.GazooDeviceBase)", "def inherits_from(obj, a_class):\n if type(obj) is not a_class:\n return(issubclass(type(obj), a_class))\n else:\n return False", "def obj_is_in_class(obj: unrealsdk.UObject, in_class: str) -> bool:\n return bool(obj.Class == unrealsdk.FindClass(in_class))", "def is_my_case(self, type_):\n return (\n isinstance(self.__apply_sequence(type_), self.declaration_class)\n )", "def is_child_class(obj, classinfo):\n try:\n return issubclass(obj, classinfo)\n except TypeError:\n return None", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class) is True:\n return True\n else:\n return False", "def is_subclass(self, left: TypeInfo, right: TypeInfo) -> bool:\n return nx.has_path(self._graph, right, left)", "def _issubclass_Tuple(subclass, superclass, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n # this function is partly based on code from typing module 3.5.2.2\n subclass = _extra_inv(subclass)\n if not is_Type(subclass):\n # To TypeError.\n return False\n if not is_Tuple(subclass):\n if is_Generic(subclass):\n try:\n return _issubclass_Generic(subclass, superclass,\n bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs,\n _recursion_check)\n except:\n pass\n elif is_Union(subclass):\n return all(_issubclass_Tuple(t, superclass, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check)\n for t in get_Union_params(subclass))\n else:\n return False\n super_args = get_Tuple_params(superclass)\n if super_args is None:\n return True\n sub_args = get_Tuple_params(subclass)\n if sub_args is None:\n return False # ???\n # Covariance.\n # For now we check ellipsis in most explicit manner.\n # Todo: Compactify and Pythonify ellipsis branches (tests required before this).\n if is_Tuple_ellipsis(subclass):\n if is_Tuple_ellipsis(superclass):\n # both are ellipsis, so no length check\n common = min(len(super_args), len(sub_args))\n for i in range(common):\n if not _issubclass(sub_args[i], super_args[i], bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n return False\n if len(super_args) < len(sub_args):\n for i in range(len(super_args), len(sub_args)):\n # Check remaining super args against the ellipsis type\n if not _issubclass(sub_args[i], super_args[-1], bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n return False\n elif len(super_args) > len(sub_args):\n for i in range(len(sub_args), len(super_args)):\n # Check remaining super args against the ellipsis type\n if not _issubclass(sub_args[-1], super_args[i], bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n return False\n return True\n else:\n # only subclass has ellipsis\n if len(super_args) < len(sub_args)-1:\n return False\n for i in range(len(sub_args)-1):\n if not _issubclass(sub_args[i], super_args[i], bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n return False\n for i in range(len(sub_args), len(super_args)):\n # Check remaining super args against the ellipsis type\n if not _issubclass(sub_args[-1], super_args[i], bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n return False\n return True\n elif is_Tuple_ellipsis(superclass):\n # only superclass has ellipsis\n if len(super_args)-1 > len(sub_args):\n return False\n for i in range(len(super_args)-1):\n if not _issubclass(sub_args[i], super_args[i], bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n return False\n for i in range(len(super_args), len(sub_args)):\n # Check remaining sub args against the ellipsis type\n if not _issubclass(sub_args[i], super_args[-1], bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check):\n return False\n return True\n else:\n # none has ellipsis, so strict length check\n return (len(super_args) == len(sub_args) and\n all(_issubclass(x, p, bound_Generic, bound_typevars,\n bound_typevars_readonly, follow_fwd_refs, _recursion_check)\n for x, p in zip(sub_args, super_args)))" ]
[ "0.6687288", "0.6670129", "0.66446286", "0.6316864", "0.62128353", "0.610718", "0.6080735", "0.60664964", "0.60402757", "0.5992532", "0.59696436", "0.5941159", "0.58874506", "0.58874506", "0.5886609", "0.58573806", "0.58487344", "0.58478147", "0.57916117", "0.57669014", "0.5609203", "0.55877143", "0.55873746", "0.5577639", "0.5552411", "0.55394125", "0.5538546", "0.55033845", "0.5451642", "0.54360485" ]
0.7002873
0
Set the typed value of an input by its name, creating a child element to hold the input if needed.
def _setInputValue(self, name, value, typeString = ''): method = getattr(self.__class__, "_setInputValue" + getTypeString(value)) return method(self, name, value, typeString)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_field(self, name, value):\n locator = self._get_input_field_locator(name)\n self._populate_field(locator, value)", "def insert_by_name(name, val):\n try:\n name = driver.find_element_by_name(name)\n except Exception as e:\n raise(e)\n else:\n name.send_keys(val)", "def input(self, name, default=0):\n try:\n inp = int(input(\"Input value for {}:\".format(name)))\n except ValueError:\n if isinstance(default, str):\n default = self._variables[default]\n inp = default\n\n self._variables[name] = inp", "def simpleInputItem(name,value=None,itemtype=None,**kargs):\n kargs['name'] = name\n if value is not None:\n kargs['value'] = value\n if itemtype is not None:\n kargs['itemtype'] = itemtype\n return kargs", "def set_value(name, value):\n\n # Get existing named value\n named_value = get_named_value_raw(name)\n\n if named_value is None:\n # Create new named value\n named_value = NamedValue()\n named_value.name = name\n\n # Edit value\n named_value.value = value\n\n # Save\n named_value.put()\n\n # Finished\n return named_value", "def update_simple(parent, name, value):\n element = parent.find('./' + name) \n\n if element is None:\n element = ET.SubElement(parent, name)\n element.text = value\n else:\n element.text = value", "def setValue(self, name: unicode, value: object) -> None:\n ...", "def set_val(self, input):\n return", "def set_input(self, input):\n pass", "def set_input(self, input):\n pass", "def fill_input_field(self, by, locator, value=\"\"):\n field = self.wait_until_visible(locator_type=by, locator=locator)\n field.clear()\n field.send_keys(value)", "def __init__(self,name,value,*args,**kargs):\n \n kargs['text'] = '' # Force no label\n self.input = value\n InputItem.__init__(self,name,*args,**kargs)\n self.layout().insertWidget(1,self.input)", "def __init__(self, type_=\"text\", name=\"\"):\n super().__init__(\"input\")\n self.type = type_\n self.name = name", "def html5_field(name, base):\n return type(str(\"\"), (base,), {\"input_type\": name})", "def add_hidden_input(\n self,\n name: str,\n value: str,\n ) -> None:\n self._client.results[name] = value", "def set_input(self, input):\r\n\r\n self.reset()\r\n self.input = input", "def set_input_type(self, input_type):\n if input_type is not None: self._input_type.value = input_type\n return self", "def set(self, name, value):\n pass", "def input_user_name(self, user_name):\n self.locate_element_by_css_selector(USERNAME_SELECTOR).send_keys(user_name)", "def input_first_name(self, name):\n self.send_keys_to_element(self.firstname_textbox_selector, name)", "def add_input(self,form,prefix,**item):\n #print item\n item['name'] = prefix + item.get('name',self.autoname.next())\n if not 'value' in item:\n # no value: try to find one\n if 'choices' in item:\n item['value'] = item['choices'][0]\n # DO NOT USE A TEST if self.store: HERE\n # THAT DOES NOT SEEM TO WORK: ALWAYS RETURNS FALSE\n try:\n item['value'] = self.store[item['name']]\n except:\n pass\n\n # we should have a value now, or we can't continue!\n if not 'value' in item:\n raise ValueError,\"No value specified for item '%s'\" % item['name']\n \n if not 'itemtype' in item or item['itemtype'] is None:\n item['itemtype'] = defaultItemType(item)\n\n itemtype = item['itemtype']\n\n if type(itemtype) is str:\n if itemtype.endswith('radio') or itemtype.endswith('push'):\n if itemtype[0] in 'hv':\n item['direction'] = itemtype[0]\n item['itemtype'] = itemtype[1:]\n else:\n # default horizontal\n item['direction'] = 'h'\n \n\n if itemtype == 'slider':\n value = item['value']\n if type(value) == int:\n pass\n elif type(value) == float:\n item['itemtype'] = 'fslider'\n else:\n raise ValueError,\"Invalid value type for slider: %s\" % value\n\n item['parent'] = self\n\n field = inputAny(**item)\n self.fields.append(field)\n form.addWidget(field)", "def __init__(self,name,value,*args,**kargs):\n self.input = QtGui.QLineEdit(str(value))\n self.input.setReadOnly(True)\n InputItem.__init__(self,name,*args,**kargs)\n self._value_ = value\n if self._value_ is not None:\n self.layout().insertWidget(1,self.input)", "def set(self, name, found, type, value, valid, stringValue) :\n self.name_ = name\n self.found_ = found\n self.type_ = type\n self.value_ = value\n self.valid_ = valid\n self.stringValue_ = stringValue", "def __init__(self,name,value,*args,**kargs):\n self.input = CoordsBox()\n InputItem.__init__(self,name,*args,**kargs)\n self.layout().insertWidget(1,self.input)\n self.setValue(value)", "def _enter_value(self, element_id: str, value: str) -> None:\n element = self._driver.find_element_by_id('{}0'.format(element_id))\n element.send_keys(value)", "def name(self, value):\n\t\tself.form.setObjectName(value)", "def __init__(self,name,value,*args,**kargs):\n self._is_string_ = type(value) == str\n self._plain = kargs.get('plain',False)\n self.input = QtGui.QTextEdit()\n InputItem.__init__(self,name,*args,**kargs)\n self.setValue(value)\n self.layout().insertWidget(1,self.input)", "def __setattr__(self, name: str, value: Any) -> None:\n if self._initialized:\n for widget in self._list:\n if name == widget.name:\n raise AttributeError(\n \"Cannot set attribute with same name as a widget\\n\"\n \"If you are trying to change the value of a widget, use: \"\n f\"`{self.__class__.__name__}.{name}.value = {value}`\",\n )\n object.__setattr__(self, name, value)", "def render(self, name, value, attrs=None):\n append = self.attrs.pop(\"append\", self.appended_text)\n if value is None:\n value = ''\n final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)\n if value != '':\n # Only add the 'value' attribute if a value is non-empty.\n final_attrs['value'] = force_text(self._format_value(value))\n return format_html('<div class=\"inputs\"><input {}/><span>{}</span></div>',\n \" \".join(\"{}={}\".format(*attr) for attr in final_attrs.items()),\n append)", "def create_input_element(self, **kwargs):\r\n return None" ]
[ "0.6696381", "0.60982335", "0.6062318", "0.58775914", "0.58670473", "0.5854426", "0.5842993", "0.583215", "0.57965046", "0.57965046", "0.5707237", "0.56931925", "0.5665919", "0.56397855", "0.56089824", "0.55845267", "0.5571821", "0.5554291", "0.552724", "0.5509192", "0.5493371", "0.5489259", "0.54838157", "0.5474503", "0.54219157", "0.5415886", "0.5394411", "0.5383711", "0.53824586", "0.5381687" ]
0.64479256
1
(Deprecated) Return a vector of all Parameter elements.
def _getParameters(self): warnings.warn("This function is deprecated; parameters have been replaced with uniform inputs in 1.38.", DeprecationWarning, stacklevel = 2) return list()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameters_to_vector(self) -> np.ndarray:\n return nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def parameters(self) -> List[Parameter]:\n return self._parameters", "def params(self):\n params = []\n\n for item in self._definition.get('params', []):\n params.append(Parameter(**item))\n\n return params", "def param(self):\n return []", "def param(self):\n return []", "def _get_parameters(self) -> list:\n return self.parameters", "def param(self):\r\n\r\n return []", "def parameters(self):\n return [i for i in self.variables if has_roles(i, Parameter)]", "def param(self):\r\n return []", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def parameters(self):\n return []", "def get_params(self):\n return deepcopy(np.hstack([to_numpy(v).flatten() for v in\n self.parameters()]))", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def _getActiveParameters(self):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def parameter_values(self) -> List[Tuple[str, Any]]:\n pvs = [(param, getattr(self, variable))\n for variable, param in self.variable_name_to_query_param.items()]\n return [(p, v) for p, v in pvs if v is not None]", "def param_values(self, pnames=None):\n l = self.get_params(pnames)\n v = [p.__get__(self)() for p in l]\n return np.array(v)", "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def generate_parameters(self):\n id = len(self.param_values)\n\n vector = []\n \n for idx in range(len(self.param_names)):\n name = self.param_names[idx]\n value = None\n settings = self.param_settings[idx]\n new_value = self.get_next_parameter(name, value, settings, id=id)\n vector.append(self.clamp(new_value, settings['min'], settings['max']))\n\n self.param_values.append(vector)\n self.result.append(None)\n return id, vector", "def parameters(self):\n return self._params", "def __parameters__(self) -> tuple[TypeVar, ...]:\n return super().__getattribute__(\"_parameters\")", "def get_parameters(self):\n result = self.generate_parameters()\n id, vector = result\n if not vector:\n return id, vector\n\n params = {}\n for idx in range(len(self.param_names)):\n name = self.param_names[idx]\n settings = self.param_settings[idx]\n value = vector[idx]\n if settings['type'] == \"int\":\n value = int(round(value))\n elif settings['type'] == \"float\":\n value = float(value)\n params[name] = value\n return id, params", "def lfParams2paramsVec(params):\n paramsDict = params.valuesdict()\n paramsVec = [value for value in paramsDict.itervalues()]\n return paramsVec", "def parameters(self):\n return [term.parameter for term in self.terms]", "def get_params(self):\n return list(self.params.values())", "def params(self) -> Tuple[Parameter, ...]:\n raise NotImplementedError()", "def parameter_names(self) -> List[str]:", "def get_all_param_values(layer):\n params = get_all_params(layer)\n return [p.get_value() for p in params]", "def batch_grads_to_vec(parameters):\n\tN = parameters[0].shape[0]\n\tvec = []\n\tfor param in parameters:\n\t\tvec.append(param.view(N,-1))\n\treturn torch.cat(vec, dim=1)", "def parameters(self):\n return self.vars" ]
[ "0.70164126", "0.69106805", "0.6909647", "0.6856145", "0.68113655", "0.68113655", "0.67872435", "0.6787241", "0.6785588", "0.6761088", "0.67584455", "0.6725566", "0.67151064", "0.66764146", "0.65993285", "0.6595009", "0.6587269", "0.6568346", "0.6563564", "0.65622544", "0.65112627", "0.6510747", "0.64927226", "0.6489423", "0.6487368", "0.64746726", "0.64656514", "0.63871", "0.63810676", "0.6377598" ]
0.697155
1
(Deprecated) Return the value string of a parameter by its name.
def _getParameterValueString(self, name): warnings.warn("This function is deprecated; parameters have been replaced with uniform inputs in 1.38.", DeprecationWarning, stacklevel = 2) return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getParam(self, params, name):\n return params.get(name)", "def get_param_with_name(self, param_name):\n return self.params[param_name]", "def getParameter(self, name):", "def getSSMParam(name):\n return ssm_client.get_parameter(\n Name=name,\n WithDecryption=True\n )['Parameter']['Value']", "def get_parameter_value(self, parameter_name):\n if parameter_name in self.description[\"config\"][\"values\"].keys():\n return self.description[\"config\"][\"values\"][parameter_name][\"value\"]\n else:\n return \"No such parameter\"", "def getStrParam(self, paramkey, default=None):\n value = self.request.getParameter(paramkey)\n if value is None: return default\n return value", "def get_name(self) -> str:\n # read the original value passed by the command\n name = self.raw_param.get(\"name\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return name", "def _getParameterValue(self, name, target = ''):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)\n return None", "def param_name(self):\n return self._param_name", "def getName(self):\n return _libsbml.Parameter_getName(self)", "def gui_get_param(self,param_name):\n return self._tkvars[param_name].get()", "def get_parameter(self, name):\n if name not in self._parameters.keys():\n raise ValueError(\"Component ({}) has no Parameter name ({})\".format(self.name, name))\n\n return self._parameters[name]", "def __make_description(self, param_name):\n value = self._params.get_value(param_name)\n return \"%s (Currently %s)\" % (param_name, str(value))", "def __repr_parameter__(self, name: str, value: Any) -> str:\n return f\"{name}={value!r}\"", "def parameterName(self):\n return self.name()", "def _get_one_param(self, param_name):\n return getattr(self, '__' + param_name)", "def get_param(self, param_name, memo=None):\n # Cast param_name to str once, for convenience:\n # (This is needed because Parameter members are Enum objects,\n # which can't be used in place of string-valued indexes)\n param_name = str(param_name)\n explicit_attr = getattr(self, param_name)\n if explicit_attr is not None:\n return explicit_attr\n else:\n return self.build_param(param_name, memo=memo)", "def get_value(name):\n\n named_value = get_named_value_raw(name)\n if named_value is not None:\n return named_value.value", "def _get_ssm_param(self, parameter_name):\n response = self.ssm_client.get_parameter(Name=parameter_name)\n res = response.get(\"Parameter\", {})\n cwa_parameter = res.get(\"Value\", {})\n return cwa_parameter", "def get_arg(self, name):\n return getattr(self.args, f\"{self.key}_{self.alias}_{name}\")", "def param_value(self):\n if self.string:\n return self.string\n if self.token:\n return self.token\n if self.number:\n return self.number\n if self.date:\n return self.date\n if self.quantity:\n return self.quantity\n if self.reference:\n return self.reference\n return ''", "def _getArgStr(self):\n return \"name=%r\" % (self.name)", "def get_param_as_string(self):\n\t\treturn call_sdk_function('PrlResult_GetParamAsString', self.handle)", "def GetValueByName(self, name):", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def get_name(self, ):\n return self.get_parameter('name')" ]
[ "0.70329046", "0.687252", "0.6827756", "0.65972966", "0.65388006", "0.6500877", "0.64673096", "0.6371999", "0.6315599", "0.6314213", "0.62858284", "0.6270459", "0.62613076", "0.6259734", "0.62386006", "0.61926997", "0.6128117", "0.6123776", "0.6123158", "0.6096087", "0.6062657", "0.604602", "0.6040587", "0.6040159", "0.6026916", "0.6026916", "0.6026916", "0.6026916", "0.6026916", "0.6025714" ]
0.7922667
0
(Deprecated) Add a BindInput to this shader reference.
def _addBindInput(self, name, type = DEFAULT_TYPE_STRING): warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2) return self.addInput(name, type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _addBindParam(self, name, type = DEFAULT_TYPE_STRING):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.addInput(name, type)", "def _getBindInputs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.getInputs()", "def _getBindParams(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def add_input(self, sinput):\r\n self.sinputs.append(sinput)\r\n self.variables.append(sinput.variable)", "def _addParameter(self, name):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.addInput(name)", "def add_binding(self, variable, value):\n # If there's already a binding, update it rather than add a new one.\n for binding in self.bindings:\n if binding.variable.name == variable:\n return self.update_binding(variable, value)\n variable = Variable(self.canvas, self, variable)\n binding = Binding(self.canvas, variable, value)\n self.bindings.append(binding)\n x, y = self.pos\n variable.set_pos(x + 10, y + len(self.bindings) * 20)\n if value.moves_with_binding:\n value.set_pos(x + 140, y + len(self.bindings) * 20)\n self.update()", "def mark_as_bound(self, name: str) -> None:\n name = sys.intern(name)\n if not self.has_input(name):\n return\n if self.bound_inputs is None:\n self.bound_inputs = {name}\n else:\n self.bound_inputs.add(name)", "def AddIamPolicyBinding(asset_ref, member, role):\n policy = GetIamPolicy(asset_ref)\n iam_util.AddBindingToIamPolicy(\n dataplex_api.GetMessageModule().GoogleIamV1Binding, policy, member, role)\n return SetIamPolicy(asset_ref, policy)", "def _getBindTokens(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def add_ip_to_input(self, ip, loggly_input, device_name=None):\n\n return self.add_device_to_input(LogglyDevice({'ip': ip}), loggly_input, device_name)", "def add_virtual_input(self, input_dataset_index):\n self.raw_virtual_inputs.append({\"index\": input_dataset_index})", "def add_input(self, var):\n raise NotImplementedError", "def addInput(self, input):\n\t\tself.config._WITH_ACTIONS = True\n\t\tself.config.ACTIONS.append((\"input\", input))", "def __new__(cls, loc=None, name=None):\n assert ((loc is None and isinstance(name, str)) or\n (name is None and 0 <= loc))\n return super(Bind, cls).__new__(cls, loc, name)", "def geomBind(*args, bindMethod: int=0, falloff: Union[float, bool]=0.0, geodesicVoxelParams:\n List[int, bool]=None, maxInfluences: Union[int, bool]=0, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def use(self):\r\n opengles.glUseProgram(self.program)", "def bind(self, *args):\r\n return self._fd.bind(*args)", "def addShader(self, QOpenGLShader): # real signature unknown; restored from __doc__\n return False", "def _bind(self, scope, referanceable):\n\t\tif (isinstance(referanceable, list) or isinstance(referanceable, tuple)):\n\t\t\tfor _ in referanceable:\n\t\t\t\tself._bind(scope, _)\n\t\t\treturn scope\n\t\telif isinstance(referanceable, interfaces.IReferencable):\n\t\t\tscope.setSlot(referanceable.getName(), referanceable)\n\t\t\treturn scope\n\t\telif True:\n\t\t\treturn scope", "def add_inputs(self, inputs):\n self.inputs += inputs", "def register_input(self, arg_):\n self.input_placeholder_ids += (self._store_placeholders(arg_).value,)", "def bind(self, sequence=None, func=None, add=None):\n return self._widget_bind(sequence, func, add, internal=False)", "def connect(self, binding):\n\n # Check whether the binding setting is correct or not.\n if self.io_owner == binding.io_owner:\n raise RuntimeError(\"Can not bind itself.\")\n\n if self.io_type == \"param\" and not self.is_pipeline_executor_interface():\n raise RuntimeError(\n 'The \"param\" binding can only be used by a pipeline executor interface!'\n )\n\n if not self.is_pipeline_executor_interface() and self.io_type == \"input\":\n raise RuntimeError(\"Module can only bind from output interface!\")\n\n if self.io_type == \"param\" and binding.io_type != \"param\":\n raise RuntimeError(\n 'A global \"param\" interface can only be bind with a module \"param\" interface!'\n )\n\n if (\n not self.is_pipeline_executor_interface()\n and not binding.is_pipeline_executor_interface()\n and binding.io_type == \"output\"\n ):\n raise RuntimeError(\"Can not bind module output with another module output!\")\n\n if (\n not self.is_pipeline_executor_interface()\n and binding.is_pipeline_executor_interface()\n and binding.io_type == \"input\"\n ):\n raise RuntimeError(\"Can not bind module output with pipeline input!\")\n\n if self.is_pipeline_executor_interface() and self.io_type == \"output\":\n raise RuntimeError(\"Global output can not be used as binding start point.\")\n\n if (\n self.is_pipeline_executor_interface()\n and self.io_type == \"input\"\n and binding.io_type != \"input\"\n ):\n raise RuntimeError(\"Global input can only bind with module input.\")\n\n self.bindings.append(binding)\n if not self.is_pipeline_executor_interface():\n # Check whether the data types of the source and destination are the same.\n if (\n isinstance(binding.io_owner, PipelineConfig.ModuleWrapper)\n and self.data_type != binding.data_type\n ):\n raise RuntimeError(\n f\"Illegal type (%s vs. %s): binding type is not same!\"\n % (self.data_type, binding.data_type)\n )\n\n binding.parents.append(self)\n\n # Do acyclic check after increasing the in-degree of child node by setting\n # current interface as a parent of the child node.\n\n if not self.check_dag_acyclic(\n binding.io_owner, self.io_owner.input_bindings.bindings\n ):\n raise RuntimeError(\"Illegal connection: Cause a cycle!\")", "def add(self):\n self.inp.inputs.add(self)\n self.out.outputs.add(self)", "def __add_solid_input(solid_inputs, input_name, value, is_kwargs=False):\n if is_kwargs:\n solid_inputs[input_name] = value\n else:\n solid_inputs[input_name] = {'value': value}", "def visit_AttributeBinding(self, node):\n obj = self.stack[-1]\n py_ast = node.binding.expr.py_ast\n op = node.binding.op\n op_compiler = COMPILE_OP_MAP[op]\n code = op_compiler(py_ast, self.filename)\n binding = {\n 'operator': op,\n 'code': code,\n 'name': node.name,\n 'lineno': node.binding.lineno,\n 'filename': self.filename,\n 'block': self.block,\n }\n obj['bindings'].append(binding)", "def add_ip_to_input_by_name(self, ip, input_name, device_name=None):\n\n return self.add_device_to_input(LogglyDevice({'ip': ip}), self.get_input_by_name(input_name), device_name)", "def bind_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bind_name\")", "def leaky_relu(input, negative_slope=0.01, inplace=False):\n return FunctionLib.apply(\n 'Relu', input.device, [input],\n outputs=[input if inplace else None], alpha=float(negative_slope))", "def add_input(self, var):\n\n if var.name not in [i.name for i in self.inputs]:\n logger.debug(f'Adding input {var.name}')\n self.inputs.append(var)\n return\n\n logger.debug(f'Merging input {var.name}')\n self[var.name].merge(var)" ]
[ "0.7632342", "0.65786767", "0.573658", "0.5379098", "0.5296212", "0.5178297", "0.5159998", "0.5151385", "0.5066717", "0.49228904", "0.4916024", "0.48976418", "0.48940352", "0.48545158", "0.47804457", "0.47647017", "0.4753692", "0.47506797", "0.4747506", "0.47449464", "0.4733389", "0.47302887", "0.47113398", "0.4697407", "0.46971804", "0.46968064", "0.46955577", "0.4693433", "0.46852553", "0.46776873" ]
0.84288436
0
(Deprecated) Return a vector of all BindInput elements in this shader reference.
def _getBindInputs(self): warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2) return self.getInputs()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getBindParams(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def _getBindTokens(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def inputs(self):\n return self._inputs", "def pc_input_buffers_full_var(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.doaesprit_sptr_pc_input_buffers_full_var(self, *args)", "def _addBindInput(self, name, type = DEFAULT_TYPE_STRING):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.addInput(name, type)", "def inputs(self):\n return self._inputs", "def inputs(self):\n return self._inputs", "def inputs(self):\n return self._inputs", "def pc_input_buffers_full_var(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.beamformer_sptr_pc_input_buffers_full_var(self, *args)", "def inputs(self):\n return self.inputs", "def pc_input_buffers_full_var(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.randomsampler_sptr_pc_input_buffers_full_var(self, *args)", "def inputs(self):\n\n inputs = []\n for arg in self.arguments:\n if arg.IN:\n inputs.append(arg)\n\n return inputs", "def pc_input_buffers_full(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.doaesprit_sptr_pc_input_buffers_full(self, *args)", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def get_inputs(self):\n inputs = Interaction.get_inputs(self)\n inputs.update(np.atleast_1d(self._demands))\n return inputs", "def get_inputs(self):\n return self.inputs", "def pc_input_buffers_full(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.beamformer_sptr_pc_input_buffers_full(self, *args)", "def inputs(self) -> List[str]:\n return self._model.inputs", "def get_inputs(self):\n return self.attributes[\"inputs\"]", "def get_inputs(self):\n inputs = Interaction.get_inputs(self)\n inputs.update(np.atleast_1d(self._consumes))\n return inputs", "def get_flat_input_refs(self):\n ret = []\n for role_key, role_obj in self.get_recipe_inputs().items():\n for item in role_obj[\"items\"]:\n ret.append(item[\"ref\"])\n return ret", "def pc_input_buffers_full_var(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.phasedarray_sptr_pc_input_buffers_full_var(self, *args)", "def getFullInputs(self):\n res = [self.identifier if self.originalId is None else self.originalId]\n nn = 0\n while nn < len(res):\n _node = res[nn]\n if self.model.existNode(_node) and self.model.getNode(_node).ioEngine.inputs:\n for _inputId in self.model.getNode(_node).ioEngine.inputs:\n if not _inputId in res:\n res.append(_inputId)\n nn += 1\n return res", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs(self):\n return super().inputs", "def inputs_b_v(self):\n return self._inputs_b_v" ]
[ "0.7248788", "0.68201584", "0.61971897", "0.6057106", "0.6041571", "0.60374856", "0.60374856", "0.60374856", "0.6018457", "0.59155166", "0.5823089", "0.57687944", "0.5762386", "0.5726699", "0.5726699", "0.5726699", "0.5711171", "0.5709276", "0.5706087", "0.5700484", "0.5682703", "0.5669003", "0.56503266", "0.5644516", "0.56411475", "0.5632093", "0.5632093", "0.5632093", "0.5632093", "0.5631548" ]
0.83538413
0
(Deprecated) Add a BindParam to this shader reference.
def _addBindParam(self, name, type = DEFAULT_TYPE_STRING): warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2) return self.addInput(name, type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _addBindInput(self, name, type = DEFAULT_TYPE_STRING):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.addInput(name, type)", "def _getBindParams(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def add_param(self, param):\n self.params.append(param)\n return self", "def add_param(self, param):\n self._params.append(param)\n self.add_decompostion(param)", "def _addParameter(self, name):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.addInput(name)", "def addLocalParameter(self, *args):\n return _libsbml.KineticLaw_addLocalParameter(self, *args)", "def addParam(self, var: IRVariable):\n self.params[var.name] = var", "def addParameter(self, *args):\n return _libsbml.Model_addParameter(self, *args)", "def addParameter(self, *args):\n return _libsbml.KineticLaw_addParameter(self, *args)", "def add_param(self, paraminfo):\n self.params[paraminfo.name] = paraminfo", "def AddIamPolicyBinding(asset_ref, member, role):\n policy = GetIamPolicy(asset_ref)\n iam_util.AddBindingToIamPolicy(\n dataplex_api.GetMessageModule().GoogleIamV1Binding, policy, member, role)\n return SetIamPolicy(asset_ref, policy)", "def _add_param(self, name):\n param = ParameterInfo()\n param._name = name\n self._parameters.append(param)\n return param", "def bind(self, bindname, sqltype, value=None):\n datatype = _TYPES[sqltype.upper()]\n var = self.cursor.var(datatype)\n\n if value is not None:\n var.setvalue(0,value)\n\n self.bindparams[bindname.upper()] = var", "def _setParameterValue(self, name, value, typeString = ''):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)", "def add_binding(self, variable, value):\n # If there's already a binding, update it rather than add a new one.\n for binding in self.bindings:\n if binding.variable.name == variable:\n return self.update_binding(variable, value)\n variable = Variable(self.canvas, self, variable)\n binding = Binding(self.canvas, variable, value)\n self.bindings.append(binding)\n x, y = self.pos\n variable.set_pos(x + 10, y + len(self.bindings) * 20)\n if value.moves_with_binding:\n value.set_pos(x + 140, y + len(self.bindings) * 20)\n self.update()", "def geomBind(*args, bindMethod: int=0, falloff: Union[float, bool]=0.0, geodesicVoxelParams:\n List[int, bool]=None, maxInfluences: Union[int, bool]=0, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def add_parameter(self, param_id, value, flags=0):\n param = bytearray()\n param.extend(param_id)\n param.extend(flags)\n param.extend(binary.pack_le32(value))\n self.parameters.append(param)\n raise PyedbglibNotSupportedError(\"Parameters are not yet supported!\")", "def _getBindInputs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.getInputs()", "def _add_argument(hparams, key, value, update=True):\n if hasattr(hparams, key):\n if update:\n setattr(hparams, key, value)\n else:\n hparams.add_hparam(key, value)", "def _add_query_param(self, route_path, name, type_, default=None):\n route = self._find_route(route_path)\n # logging.info(\"Before:\", route.dependant.query_params)\n query_param = create_query_param(name, type_, default)\n route.dependant.query_params.append(query_param)\n # logging.info(\"After:\", route.dependant.query_params)", "def param_binding(self, gate_param_name):\n return self.param_bind.get(gate_param_name)", "def add(self, param):\n self._data.add(param)", "def AddIamPolicyBinding(task_ref, member, role):\n policy = GetIamPolicy(task_ref)\n iam_util.AddBindingToIamPolicy(\n dataplex_api.GetMessageModule().GoogleIamV1Binding, policy, member, role\n )\n return SetIamPolicy(task_ref, policy)", "def add_argument(self, *args: Any, **kwargs: Any) -> None:\n self._arguments.append((args, kwargs))", "def add_parameter(self, name, freqs, values, **kwargs):\n if name in self._parameters.keys():\n raise ValueError(\"Parameter name ({}) already exists in Component ({})\".format(name, self.name))\n if not isinstance(freqs, list):\n freqs = list(freqs)\n\n if not isinstance(values, list):\n values = list(values)\n\n if len(freqs) != len(values):\n raise ValueError(\"Length of parameter freqs ({}) does not equal length of values ({})\"\n .format(freqs, values))\n\n param = Parameter(name, freqs, values, **kwargs)\n self._parameters[name] = param", "def addParameter(cTag, name, value): #@NoSelf", "def register_parameter(self, name, param, bounds, prior=None):\n if '_parameters' not in self.__dict__:\n raise AttributeError(\n \"cannot assign parameter before Module.__init__() call\")\n super(Module, self).register_parameter(name, param)\n kwargs = {}\n kwargs[name] = bounds\n self.set_bounds(**kwargs)", "def add_or_replace_parameter(url, name, new_value):\n return _add_or_replace_parameters(url, {name: new_value})", "def add_parameter(self, param_type, param_name, this_class_name=''):\n self._set_instance_data('parameters',\n ' '.join([self.add_dependency(param_type, this_class_name),\n param_name]))", "def _getBindTokens(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()" ]
[ "0.7179398", "0.6434004", "0.60884535", "0.6070172", "0.59735316", "0.5896598", "0.5733029", "0.5710626", "0.5708676", "0.5646252", "0.5533429", "0.55079865", "0.5417713", "0.53833014", "0.5378985", "0.5373941", "0.5350034", "0.5324678", "0.5298662", "0.52942353", "0.52673304", "0.51965976", "0.5182273", "0.51668483", "0.5160122", "0.5154487", "0.51376337", "0.5135758", "0.51221883", "0.50912505" ]
0.8272173
0
(Deprecated) Return a vector of all BindParam elements in this shader reference.
def _getBindParams(self): warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2) return list()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getBindInputs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.getInputs()", "def _getBindTokens(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def _getActiveParameters(self):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def _getParameters(self):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def bind_params(self, param_list):\n b_list = None\n if param_list:\n b_list = []\n for param in param_list:\n bound_param = self.param_binding(param)\n # DEBUG\n # print(\"******bound_param {} param {} b_list {}\".format(bound_param, param, b_list)) # pylint: disable-msg=line-too-long\n # END-DEBUG\n b_list.append(bound_param if bound_param else param)\n return b_list", "def param(self):\r\n paramlist = []\r\n gradlist = []\r\n\r\n for layer in self.layers:\r\n try:\r\n layer_param, layer_grad = layer.param()\r\n paramlist = paramlist + layer_param\r\n gradlist = gradlist + layer_grad\r\n except ValueError:\r\n continue\r\n return paramlist, gradlist", "def get_params(self):\n return deepcopy(np.hstack([to_numpy(v).flatten() for v in\n self.parameters()]))", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def get_params(self):\n return list(self.params.values())", "def param_values(self):\n return self._param_values", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def params_with_grad(self) -> List[Parameter]:\n return [p for p in self.parameters() if p.grad is not None]", "def parameter_values(self) -> List[Tuple[str, Any]]:\n pvs = [(param, getattr(self, variable))\n for variable, param in self.variable_name_to_query_param.items()]\n return [(p, v) for p, v in pvs if v is not None]", "def params_with_grad(self) -> List[Parameter]:\n return [p for p in self.parameters() if p.grad is not None]", "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def _addBindParam(self, name, type = DEFAULT_TYPE_STRING):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.addInput(name, type)", "def parameters(self, requires_grad_only=True):\n filter_cond = lambda param: param.requires_grad if requires_grad_only else True\n return (param for param in super().parameters() if filter_cond(param))", "def params(self):\n params = []\n\n for item in self._definition.get('params', []):\n params.append(Parameter(**item))\n\n return params", "def parameters(self):\n return self._params", "def params(self):\n return tuple(self._params)", "def get_param_names(self):\n return list(self.params.keys())", "def get_forward_parameter_list(self):\n parameterlist = []\n parameterlist.append(self.weights)\n if self.bias is not None:\n parameterlist.append(self.bias)\n return parameterlist", "def params(self):\n params = []\n\n for v in vars(self).values():\n params.extend(self.__computeParams(v))\n\n if isinstance(v, list):\n for p in v:\n params.extend(self.__computeParams(p))\n\n return params", "def parameters(self) -> List[Parameter]:\n return self._parameters", "def param(self):\n return []", "def param(self):\n return []", "def get_bindable_vars(self):\n return self.local_vars.keys() + self.parent.get_bindable_vars()", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def parameters(self):\n return self.vars" ]
[ "0.73175657", "0.697158", "0.6510387", "0.6437608", "0.6387305", "0.63770574", "0.63090634", "0.62420833", "0.6186843", "0.61844945", "0.61839473", "0.6061543", "0.60109484", "0.59970754", "0.59630865", "0.5951487", "0.59331584", "0.5922793", "0.58891183", "0.5887415", "0.5868754", "0.58602244", "0.58564746", "0.58530265", "0.5844446", "0.5844446", "0.5828331", "0.58121836", "0.5805243", "0.5802222" ]
0.8280635
0
(Deprecated) Return a vector of all BindToken elements in this shader reference.
def _getBindTokens(self): warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2) return list()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getBindParams(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def _getBindInputs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.getInputs()", "def _getShaderRefs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return getShaderNodes(self)", "def _getActiveShaderRefs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return getShaderNodes(self)", "def tokens(self):\n # type: () -> List[Token]\n return self._tokens", "def tokens(self):\n return self.__tokens", "def __get_references(self):\n named_references = []\n for usage in self.xml_cache.get_xml_tree(\"usagemodel\"):\n variable_usages = usage.findall(\".//namedReference__VariableUsage\")\n for name in variable_usages:\n named_references.append(name.get(\"referenceName\"))\n return named_references", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def tokens(self):\n return self._tokens", "def _get_active_uniforms(self):\n\n count = gl.glGetProgramiv(self.handle, gl.GL_ACTIVE_UNIFORMS)\n\n # This match a name of the form \"name[size]\" (= array)\n regex = re.compile(\"\"\"(?P<name>\\w+)\\s*(\\[(?P<size>\\d+)\\])\\s*\"\"\")\n uniforms = []\n for i in range(count):\n name, size, gtype = gl.glGetActiveUniform(self.handle, i)\n # This checks if the uniform is an array\n # Name will be something like xxx[0] instead of xxx\n m = regex.match(name)\n # When uniform is an array, size corresponds to the highest used index\n if m:\n name = m.group('name')\n if size >= 1:\n for i in range(size):\n name = '%s[%d]' % (m.group('name'),i)\n uniforms.append((name, gtype))\n else:\n uniforms.append((name, gtype))\n\n return uniforms", "def free_symbols(self) -> Iterable[sympy.Symbol]:\n return get_free_symbols(self.params)", "def free_symbols(self) -> Iterable[sympy.Symbol]:\n return get_free_symbols(self.params)", "def token_values(self):\n return self._token_values", "def free_symbols(self):\n return ({j for i in self.args for j in i.free_symbols\n .difference(self.variables)})", "def bindings(self):\n return self.__bindings", "def keyrefs(self):\n return list(self.data)", "def get_tokens(self) -> List[str]:\n return self.tokens", "def _getActiveParameters(self):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def argument_list(self):\n answer = self._call('argument_list')\n return answer.names", "def references(self):\n return tuple(self.__references)", "def get_light_list(self):\n return self.light_array", "def valve_name_list(self):\n return list(self._link_reg.valve_names)", "def listglobal(self):\n return list(self.attributes.keys())", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def _getParameters(self):\n warnings.warn(\"This function is deprecated; parameters have been replaced with uniform inputs in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def tokens(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenList)['tokens']", "def keyrefs(self):\n return [ref(key) for key in self.iterkeys()]", "def tokens(self):\n tokens = [k for k in self.tok2ind.keys()\n if k not in {'<NULL>', '<UNK>'}]\n return tokens", "def arg_to_str_list(self) -> list:\n arg_list = []\n for arg in [*self.args]:\n if hasattr(arg, \"_ref\"):\n arg_list.append(arg.ref)\n else:\n arg_list.append(arg)\n return arg_list" ]
[ "0.7346781", "0.71642834", "0.6264189", "0.6206269", "0.5697967", "0.5595624", "0.55848074", "0.5566121", "0.5566121", "0.5566121", "0.55017257", "0.54983807", "0.54983807", "0.5487443", "0.5385787", "0.53534365", "0.53367513", "0.5262191", "0.52297187", "0.5215819", "0.52116686", "0.52098155", "0.52079576", "0.5203989", "0.5203154", "0.5200567", "0.5186552", "0.51767033", "0.5151994", "0.514772" ]
0.8395629
0
(Deprecated) Return a vector of all shader references in this material element.
def _getShaderRefs(self): warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2) return getShaderNodes(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getActiveShaderRefs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return getShaderNodes(self)", "def shaders(self):\n\n shaders = []\n shaders.extend(self._verts)\n shaders.extend(self._frags)\n shaders.extend(self._geoms)\n return shaders", "def _getBindTokens(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def _getBindParams(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def _getBindInputs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.getInputs()", "def _getMaterials(self):\n warnings.warn(\"This function is deprecated; call Document.getMaterialNodes() instead.\", DeprecationWarning, stacklevel = 2)\n return self.getMaterialNodes()", "def get_shaders(self, nodes):\n shaders = []\n # Fill the assigned shader list\n for node in nodes:\n shader = mc.listConnections(\"{0}.instObjGroups[0]\".format(node))\n if shader is not None:\n shaders.append(shader)\n else:\n shaders.append([])\n return shaders", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def getConnectedShaders(self):\n self.logger.debug(\"Connected Shaders\")\n\n connected = []\n for connections in pm.listConnections(self.data['shapeNode'], plugs=True, connections=True):\n if cmds.getClassification(connections[-1].nodeType(), satisfies=\"shader\"):\n self.logger.debug(\"Connected shader : %s\" % connections[-1].node())\n connected.append(connections[-1].node())\n return connected", "def _get_all_uniforms(self):\n\n uniforms = []\n for shader in self._verts:\n uniforms.extend(shader.uniforms)\n for shader in self._frags:\n uniforms.extend(shader.uniforms)\n for shader in self._geoms:\n uniforms.extend(shader.uniforms)\n uniforms = list(set(uniforms))\n return uniforms", "def bs_getShaders(obj):\n pm.select(obj)\n pm.windows.hyperShade(shaderNetworksSelectMaterialNodes=True)\n return pm.ls(sl=True) # Returns all shaders associated with the object (shape, face etc)", "def refmags(self):\n return self.__ref_mags", "def _get_all_attributes(self):\n\n attributes= []\n for shader in self._verts:\n attributes.extend(shader.attributes)\n # No attribute in fragment shaders\n attributes = list(set(attributes))\n return attributes", "def references(self):\n return tuple(self.__references)", "def get_material_features(self):\n return self.material_features", "def __get_references(self):\n named_references = []\n for usage in self.xml_cache.get_xml_tree(\"usagemodel\"):\n variable_usages = usage.findall(\".//namedReference__VariableUsage\")\n for name in variable_usages:\n named_references.append(name.get(\"referenceName\"))\n return named_references", "def references(self) -> \"IterableList[Reference]\":\n return Reference.list_items(self)", "def getFragmentShader(self):\n return self.fshader", "def _get_active_uniforms(self):\n\n count = gl.glGetProgramiv(self.handle, gl.GL_ACTIVE_UNIFORMS)\n\n # This match a name of the form \"name[size]\" (= array)\n regex = re.compile(\"\"\"(?P<name>\\w+)\\s*(\\[(?P<size>\\d+)\\])\\s*\"\"\")\n uniforms = []\n for i in range(count):\n name, size, gtype = gl.glGetActiveUniform(self.handle, i)\n # This checks if the uniform is an array\n # Name will be something like xxx[0] instead of xxx\n m = regex.match(name)\n # When uniform is an array, size corresponds to the highest used index\n if m:\n name = m.group('name')\n if size >= 1:\n for i in range(size):\n name = '%s[%d]' % (m.group('name'),i)\n uniforms.append((name, gtype))\n else:\n uniforms.append((name, gtype))\n\n return uniforms", "def dataShader(self):\n\t\treturn self._shader", "def extractUniforms(constants, refMatrix):\n uvOffsetScale = constants['$Globals']['webgl_fa7f624db8ab37d1']\n mdata = constants['$Globals']['webgl_3c7b7f37a9bd4c1d']\n matrix = Matrix([\n mdata[0:4],\n mdata[4:8],\n mdata[8:12],\n [0, 0, 0, 1],\n ])\n if refMatrix is None:\n # Rotate around Y because Google Maps uses X as up axis\n refMatrix = Matrix.Rotation(-pi/2, 4, 'Y') @ matrix.inverted()\n matrix = refMatrix @ matrix\n \n matrix[0][3] *= .0039\n matrix[1][3] *= .0039\n matrix[2][3] *= .0039\n\n return uvOffsetScale, matrix, refMatrix", "def get_light_list(self):\n return self.light_array", "def get_references(self):\n\n return self._refs", "def convert_shaders(self):\n raise NotImplementedError()", "def get_references(self):\n return self._references", "def _get_all_vertices(self, ref_frame='WORLD') -> np.ndarray:\n\n\t\tdepsgraph = bpy.context.evaluated_depsgraph_get() # to account for deformations\n\n\t\tif ref_frame not in {'LOCAL', 'WORLD'}:\n\t\t\traise ValueError(f\"Invalid ref_frame: {ref_frame}. Must be one of ['LOCAL', 'WORLD']\")\n\n\t\tverts = []\n\n\t\tfor mesh in self._meshes:\n\n\t\t\t# use bmesh to get vertices - this accounts for deformations in depsgraph\n\t\t\tbm = bmesh.new()\n\t\t\tbm.from_object(mesh, depsgraph)\n\t\t\tbm.verts.ensure_lookup_table()\n\t\t\tmesh_verts = np.array([x.co for x in bm.verts])\n\t\t\tbm.free()\n\n\t\t\tif ref_frame == 'WORLD':\n\t\t\t\tmesh_verts = np.dot(mesh.matrix_world, np.vstack((mesh_verts.T, np.ones(mesh_verts.shape[0]))))\n\n\t\t\tverts.append(mesh_verts)\n\n\t\tverts = np.concatenate(verts, axis=1)\n\t\tverts /= verts[3] # convert from homogeneous coordinates\n\t\treturn verts[:3].T", "def refs(self):\n return self._refs", "def compileShaders(self):\n raise NotImplementedError('compileShaders must be implemented by '\n '{} subclasses'.format(type(self).__name__))", "def get_grads(self):\n return deepcopy(np.hstack([to_numpy(v.grad).flatten() for v in self.parameters()]))", "def get_flat_output_refs(self):\n ret = []\n for role_key, role_obj in self.get_recipe_outputs().items():\n for item in role_obj[\"items\"]:\n ret.append(item[\"ref\"])\n return ret" ]
[ "0.7740527", "0.6775367", "0.6466401", "0.6452211", "0.6437122", "0.6314355", "0.62679857", "0.62556607", "0.6210403", "0.6105485", "0.596709", "0.5940758", "0.59146404", "0.5878404", "0.5871508", "0.5811813", "0.5789575", "0.57884926", "0.5763394", "0.57063866", "0.566444", "0.55803376", "0.55391747", "0.5518208", "0.5489697", "0.5423572", "0.5322535", "0.5321836", "0.5321757", "0.53204215" ]
0.7934999
0
(Deprecated) Return a vector of all shader references in this material element, taking material inheritance into account.
def _getActiveShaderRefs(self): warnings.warn("This function is deprecated; shader references have been replaced with shader nodes in 1.38.", DeprecationWarning, stacklevel = 2) return getShaderNodes(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getShaderRefs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return getShaderNodes(self)", "def shaders(self):\n\n shaders = []\n shaders.extend(self._verts)\n shaders.extend(self._frags)\n shaders.extend(self._geoms)\n return shaders", "def _getMaterials(self):\n warnings.warn(\"This function is deprecated; call Document.getMaterialNodes() instead.\", DeprecationWarning, stacklevel = 2)\n return self.getMaterialNodes()", "def _get_all_uniforms(self):\n\n uniforms = []\n for shader in self._verts:\n uniforms.extend(shader.uniforms)\n for shader in self._frags:\n uniforms.extend(shader.uniforms)\n for shader in self._geoms:\n uniforms.extend(shader.uniforms)\n uniforms = list(set(uniforms))\n return uniforms", "def get_shaders(self, nodes):\n shaders = []\n # Fill the assigned shader list\n for node in nodes:\n shader = mc.listConnections(\"{0}.instObjGroups[0]\".format(node))\n if shader is not None:\n shaders.append(shader)\n else:\n shaders.append([])\n return shaders", "def _getBindInputs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.getInputs()", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def _getBindParams(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def getConnectedShaders(self):\n self.logger.debug(\"Connected Shaders\")\n\n connected = []\n for connections in pm.listConnections(self.data['shapeNode'], plugs=True, connections=True):\n if cmds.getClassification(connections[-1].nodeType(), satisfies=\"shader\"):\n self.logger.debug(\"Connected shader : %s\" % connections[-1].node())\n connected.append(connections[-1].node())\n return connected", "def _getBindTokens(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def bs_getShaders(obj):\n pm.select(obj)\n pm.windows.hyperShade(shaderNetworksSelectMaterialNodes=True)\n return pm.ls(sl=True) # Returns all shaders associated with the object (shape, face etc)", "def _get_all_attributes(self):\n\n attributes= []\n for shader in self._verts:\n attributes.extend(shader.attributes)\n # No attribute in fragment shaders\n attributes = list(set(attributes))\n return attributes", "def refmags(self):\n return self.__ref_mags", "def references(self) -> \"IterableList[Reference]\":\n return Reference.list_items(self)", "def get_material_features(self):\n return self.material_features", "def convert_shaders(self):\n raise NotImplementedError()", "def _get_active_uniforms(self):\n\n count = gl.glGetProgramiv(self.handle, gl.GL_ACTIVE_UNIFORMS)\n\n # This match a name of the form \"name[size]\" (= array)\n regex = re.compile(\"\"\"(?P<name>\\w+)\\s*(\\[(?P<size>\\d+)\\])\\s*\"\"\")\n uniforms = []\n for i in range(count):\n name, size, gtype = gl.glGetActiveUniform(self.handle, i)\n # This checks if the uniform is an array\n # Name will be something like xxx[0] instead of xxx\n m = regex.match(name)\n # When uniform is an array, size corresponds to the highest used index\n if m:\n name = m.group('name')\n if size >= 1:\n for i in range(size):\n name = '%s[%d]' % (m.group('name'),i)\n uniforms.append((name, gtype))\n else:\n uniforms.append((name, gtype))\n\n return uniforms", "def references(self):\n return tuple(self.__references)", "def dataShader(self):\n\t\treturn self._shader", "def compileShaders(self):\n raise NotImplementedError('compileShaders must be implemented by '\n '{} subclasses'.format(type(self).__name__))", "def getFragmentShader(self):\n return self.fshader", "def __get_references(self):\n named_references = []\n for usage in self.xml_cache.get_xml_tree(\"usagemodel\"):\n variable_usages = usage.findall(\".//namedReference__VariableUsage\")\n for name in variable_usages:\n named_references.append(name.get(\"referenceName\"))\n return named_references", "def extractUniforms(constants, refMatrix):\n uvOffsetScale = constants['$Globals']['webgl_fa7f624db8ab37d1']\n mdata = constants['$Globals']['webgl_3c7b7f37a9bd4c1d']\n matrix = Matrix([\n mdata[0:4],\n mdata[4:8],\n mdata[8:12],\n [0, 0, 0, 1],\n ])\n if refMatrix is None:\n # Rotate around Y because Google Maps uses X as up axis\n refMatrix = Matrix.Rotation(-pi/2, 4, 'Y') @ matrix.inverted()\n matrix = refMatrix @ matrix\n \n matrix[0][3] *= .0039\n matrix[1][3] *= .0039\n matrix[2][3] *= .0039\n\n return uvOffsetScale, matrix, refMatrix", "def get_light_list(self):\n return self.light_array", "def info_materials_polymer_get():\n materials = _material_by_group(974) # 974 == intermediate group\n return materials, 200", "def find_references(self):\n cls = self.__class__\n nodes = []\n for sobj in self._std.FindDependances(self.get_sobj()):\n nodes.append(cls(self._std, self._bld, sobj.GetID()))\n return nodes", "def surfaceShaderList(*args, add: name=None, remove: name=None, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def get_flat_output_refs(self):\n ret = []\n for role_key, role_obj in self.get_recipe_outputs().items():\n for item in role_obj[\"items\"]:\n ret.append(item[\"ref\"])\n return ret", "def get_references(self):\n\n return self._refs", "def product_size_materials_rel(self):\n return self._product_size_materials_rel" ]
[ "0.757915", "0.66297746", "0.62730986", "0.61996865", "0.61774623", "0.6176948", "0.60849196", "0.60422945", "0.5975865", "0.5874039", "0.5874017", "0.5842542", "0.58101964", "0.57584727", "0.57190543", "0.56604165", "0.56526685", "0.5576169", "0.5570131", "0.55684435", "0.5549801", "0.5496513", "0.5488584", "0.5394361", "0.5338422", "0.52731055", "0.52557397", "0.52161074", "0.5173057", "0.51585436" ]
0.7411408
1
Set the value of a geomprop by its name, creating a child element to hold the geomprop if needed.
def _setGeomPropValue(self, name, value, typeString = ''): method = getattr(self.__class__, "_setGeomPropValue" + getTypeString(value)) return method(self, name, value, typeString)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_simple(parent, name, value):\n element = parent.find('./' + name) \n\n if element is None:\n element = ET.SubElement(parent, name)\n element.text = value\n else:\n element.text = value", "def add_geomean_to_product_data(product, prop_name, geomean_val):\n\tfor prop_data_list in product['data']:\n\t\tif prop_data_list[0] == prop_name:\n\t\t\t# prop_data_list.append(geomean_val)\n\t\t\tprop_data_list.insert(len(prop_data_list)-1, roundData(prop_name, geomean_val)) # inserts geomean before Measured column\n\treturn product", "def set_value(name, value):\n\n # Get existing named value\n named_value = get_named_value_raw(name)\n\n if named_value is None:\n # Create new named value\n named_value = NamedValue()\n named_value.name = name\n\n # Edit value\n named_value.value = value\n\n # Save\n named_value.put()\n\n # Finished\n return named_value", "def setGeoInfo(self, value):\n\n if isinstance(value, ElementTree.Element):\n gi_key = self._child_xml_ns_key.get('GeoInfos', self._xml_ns_key)\n value = GeoInfoType.from_node(value, self._xml_ns, ns_key=gi_key)\n elif isinstance(value, dict):\n value = GeoInfoType.from_dict(value)\n\n if isinstance(value, GeoInfoType):\n self._GeoInfos.append(value)\n else:\n raise TypeError('Trying to set GeoInfo element with unexpected type {}'.format(type(value)))", "def EditGizmoName(rig, gizmo_name):\n\n hierarchy_mod = rig.get_hierarchy_modifier()\n selection = hierarchy_mod.get_selection()\n\n rig_elements = control_rig_utils.get_elements_by_rig_type(rig, selection, unreal.RigControl)\n\n for rig_element in rig_elements:\n \n rig_element.set_editor_property(\"gizmo_name\", gizmo_name)\n hierarchy_mod.set_control(rig_element)", "def __setitem__(self, name, value):\n self.gattrs[name] = value", "def set_geometry(self, selection_name, geometry):", "def _set_guc(self, name, value):\n if value.lower() != 'default':\n value = util.quote_string(self, value)\n else:\n value = b'default'\n self._execute_command(ascii_to_bytes('SET %s TO ' % name) + value)", "def insert_in_tree(self, pic_name, pic_num, crop_num, is_crop=False):\n \n crop = self.communicator.image_store.get_crop(pic_num, crop_num)\n \n # insert the picture/crop name in column 0\n if (is_crop == False):\n myiter = self.tree_store.append(None, None)\n if crop.available == True:\n self.tree_store.set_value(myiter, \\\n 0, '<span foreground=\"#000000\"><b>' + pic_name + '</b></span>')\n else:\n self.tree_store.set_value(myiter, \\\n 0, '<span foreground=\"#A0A0A0\"><b>' + pic_name + '</b></span>')\n elif (is_crop == True):\n #determine iter that points to row containing pic_num\n # in column 1\n parent = None\n for i in range(0, len(self.tree_store)):\n if (pic_num == self.tree_store[i][1]):\n #found the parent, insert the child\n parent = self.tree_store[i].iter\n myiter = self.tree_store.append(parent, None)\n self.tree_store.set_value(myiter, 0, '<span foreground=\"#000000\"><b>' + pic_name + '</b></span>')\n break\n # expand the row to show the crop\n self.image_tree.expand_row(self.tree_store.get_path(parent), True)\n\n # fill in the remaining columns\n self.tree_store.set_value(myiter, 1, pic_num)\n self.tree_store.set_value(myiter, 2, crop_num)\n self.tree_store.set_value(myiter, 3, \"0%\")\n \n return myiter", "def namePush(ctxt, value):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n ret = libxml2mod.namePush(ctxt__o, value)\n return ret", "def name(self, name):\n\n self.container['name'] = name", "def name(self, name):\n\n self.container['name'] = name", "def _makeLocationElement(self, locationObject, name=None):\n\n locElement = ET.Element(\"location\")\n if name is not None:\n locElement.attrib['name'] = name\n for dimensionName, dimensionValue in locationObject.items():\n dimElement = ET.Element('dimension')\n dimElement.attrib['name'] = dimensionName\n if type(dimensionValue)==tuple:\n dimElement.attrib['xvalue'] = \"%f\"%dimensionValue[0]\n dimElement.attrib['yvalue'] = \"%f\"%dimensionValue[1]\n else:\n dimElement.attrib['xvalue'] = \"%f\"%dimensionValue\n locElement.append(dimElement)\n return locElement", "def create_and_assign_lot(self, cr, uid, id, name, context=None):\n obj = self.browse(cr, uid, id, context)\n product_id = obj.product_id.id\n val = {'product_id': product_id}\n new_lot_id = False\n if name:\n lots = self.pool.get('stock.production.lot').search(\n cr,\n uid,\n ['&', ('name', '=', name), ('product_id', '=', product_id)],\n context=context\n )\n if lots:\n new_lot_id = lots[0]\n val.update({'name': name})\n\n if not new_lot_id:\n new_lot_id = self.pool.get('stock.production.lot').create(cr, uid, val, context=context)\n self.write(cr, uid, id, {'lot_id': new_lot_id}, context=context)", "def set(self, name, value):\n pass", "def setparentname(self, imagename):\n return _coordsys.coordsys_setparentname(self, imagename)", "def set_property(wellorcontainer, property_name, value):\n wells = convert_to_wellgroup(wellorcontainer)\n \n if not isinstance(value, str):\n value = str(value)\n \n for well in wells:\n assert isinstance(well, Well)\n well.properties[property_name] = value", "def __setitem__(self, name, value):\r\n return self.set(name=value)", "def populate_field(self, name, value):\n locator = self._get_input_field_locator(name)\n self._populate_field(locator, value)", "def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value", "def setElementName(self, *args):\n return _libsbml.Point_setElementName(self, *args)", "def setName(self, name: str, /) -> Any:\n ...", "def propset(self, name, value, *args):\r\n d = py.path.local.mkdtemp() \r\n try: \r\n p = d.join('value') \r\n p.write(value) \r\n self._svn('propset', name, '--file', str(p), *args)\r\n finally: \r\n d.remove()", "def name(self, value):\n self._name = value", "def name(self, value):\n self._name = value", "def set(self, name, value):\n self.__getitem__(name).clear()\n self.add(name, value)", "def on_name_change(self, txt):\n self.mdl.cmp.part_name = txt \n self.refresh_svg_canvas()", "def set(self, name1, natl, item):\n if name1 not in self.data: self.data[name1] = {}\n self.data[name1][natl] = item", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def setValue(self, name: unicode, value: object) -> None:\n ..." ]
[ "0.56408846", "0.53583366", "0.5214419", "0.5200524", "0.5169286", "0.5159845", "0.5102088", "0.50392735", "0.50278354", "0.5018636", "0.49757773", "0.49757773", "0.49658227", "0.49325004", "0.48699766", "0.4865401", "0.48650134", "0.4847803", "0.48443377", "0.48255238", "0.48240253", "0.48201823", "0.48181736", "0.47977087", "0.47977087", "0.47783193", "0.47779742", "0.47728842", "0.47699195", "0.47552994" ]
0.5464608
1
(Deprecated) Add a material element to the document.
def _addMaterial(self, name): warnings.warn("This function is deprecated; call Document.addMaterialNode() instead.", DeprecationWarning, stacklevel = 2) return self.addMaterialNode(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_AddMaterial(self, *args)", "def append_material(self, material):\n # First check if asset attribute exists; if not, define the asset attribute\n if not hasattr(self, \"asset\"):\n self.asset = ET.Element(\"asset\")\n # If the material name is not in shared materials, add this to our assets\n if material.name not in self.shared_materials:\n self.asset.append(ET.Element(\"texture\", attrib=material.tex_attrib))\n self.asset.append(ET.Element(\"material\", attrib=material.mat_attrib))\n # Add this material name to shared materials if it should be shared\n if material.shared:\n self.shared_materials.add(material.name)\n self.shared_textures.add(material.tex_attrib[\"name\"])\n # Update prefix for assets\n add_prefix(root=self.asset, prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)", "def link_material(obj, mat):\n if not has_material(obj, mat.name):\n obj.data.materials.append(mat)", "def SetMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_SetMaterial(self, *args)", "def add_material_page(wiz, title, params):\n add_grid_page(wiz, u\"Material properties\", title, params)", "def setMaterial(obj=None,mat=None):\n\n\tif obj is None:\n\t\tobj = bpy.context.object\n\tif obj.data.materials:\n\t\t# assign to 1st material slot\n\t\tobj.data.materials[0] = mat\n\telse:\n\t\t# no slots\n\t\tobj.data.materials.append(mat)", "def set_material(self, material):\r\n for b in self.buf:\r\n b.set_material(material)", "def MaterialTool(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_MaterialTool(*args)", "def create_material(name, diffuse, alpha):\n mat = bpy.data.materials.new(name)\n mat.diffuse_color = diffuse\n mat.diffuse_intensity = 1.0\n mat.alpha = alpha\n if alpha:\n mat.use_transparency = True\n return mat", "def create_material(self, name, pbmr=None, emissive=None):\n new_material = self._build_material(pbmr, emissive)\n\n self.materials.append(new_material)\n\n if name:\n self.materials_map[name] = self._last_index(self.materials)\n\n return self._last_index(self.materials)", "def XCAFDoc_DocumentTool_MaterialTool(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_MaterialTool(*args)", "def AddDispersionMaterial(GeometryName,RGBData):\n\n r,g,b=RGBData\n onlyR = tuple([r,0,0,1])\n onlyG = tuple([0,g,0,1])\n onlyB = tuple([0,0,b,1])\n\n\n currentMaterial = bpy.data.materials.new(name='TypeA'+GeometryName)\n currentMaterial.use_nodes = True\n nodes = currentMaterial.node_tree.nodes\n\n math01 = nodes.new(\"ShaderNodeMath\")\n math01.operation = \"POWER\"\n\n glassBSDF01 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF01.inputs[0].default_value = onlyR\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF01.inputs[1])\n\n glassBSDF02 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF02.inputs[0].default_value = onlyG\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF02.inputs[1])\n\n glassBSDF03 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF03.inputs[0].default_value = onlyB\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF03.inputs[1])\n\n math02 = nodes.new(\"ShaderNodeMath\")\n currentMaterial.node_tree.links.new(math02.outputs[0],glassBSDF02.inputs[2])\n\n math03 = nodes.new(\"ShaderNodeMath\")\n currentMaterial.node_tree.links.new(math02.outputs[0],math03.inputs[1])\n currentMaterial.node_tree.links.new(math03.outputs[0],glassBSDF01.inputs[2])\n\n addShader01 = nodes.new(\"ShaderNodeAddShader\")\n currentMaterial.node_tree.links.new(glassBSDF01.outputs[0],addShader01.inputs[0])\n currentMaterial.node_tree.links.new(glassBSDF02.outputs[0],addShader01.inputs[1])\n\n addShader02 = nodes.new(\"ShaderNodeAddShader\")\n currentMaterial.node_tree.links.new(addShader01.outputs[0],addShader02.inputs[0])\n currentMaterial.node_tree.links.new(glassBSDF03.outputs[0],addShader02.inputs[1])\n\n volumeAbs = nodes.new(\"ShaderNodeVolumeAbsorption\")\n\n materialOutput=nodes.get(\"Material Output\")\n currentMaterial.node_tree.links.new(addShader02.outputs[0],materialOutput.inputs[0])\n currentMaterial.node_tree.links.new(volumeAbs.outputs[0],materialOutput.inputs[1])\n\n bpy.data.objects[GeometryName].data.materials.append(currentMaterial)", "def XCAFDoc_MaterialTool_Set(*args):\n return _XCAFDoc.XCAFDoc_MaterialTool_Set(*args)", "def _append_mag_scaling_rel(element, mag_scale_rel):\n msr = etree.Element(NRML04_MAG_SCALE_REL)\n msr.text = mag_scale_rel\n element.append(msr)", "def setMaterial(self,massFraction,polymer):\n M = Materials()\n num = self.material['Detector']['mt']\n if polymer == 'PS':\n self.material['Detector']['matString'] = M.GetPSLiF(massFraction,num)\n elif polymer == 'PEN':\n self.material['Detector']['matString'] = M.GetPENLiF(massFraction,num)\n else:\n raise ValueError('Polymer {} is not in the material database'.format(polymer))", "def _getMaterials(self):\n warnings.warn(\"This function is deprecated; call Document.getMaterialNodes() instead.\", DeprecationWarning, stacklevel = 2)\n return self.getMaterialNodes()", "def material_library_reference(self, material_library_reference):\n\n self._material_library_reference = material_library_reference", "def define_materials():\n global robot\n robot.add_material(ur.Material('Black', ur.Color(0.1, 0.1, 0.1, 1)))\n robot.add_material(ur.Material('LightGrey', ur.Color(0.9, 0.9, 0.9, 1)))\n robot.add_material(ur.Material('Grey', ur.Color(0.6, 0.6, 0.6, 1)))\n robot.add_material(ur.Material('DarkGrey', ur.Color(0.3, 0.3, 0.3, 1)))", "def add_molecule(self, name, cell=None):\n print 'DATA.add_molecule is deprecated. Please use DATA.give_molecule'\n self[name] = MOLECULE(name=name, cell=cell)", "def materials_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n view = context.space_data\r\n thumbnails_path = get_directory('icons')\r\n library_path = get_library_path()\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n if AM.as_mat_scene:\r\n thumb_list = thumb + [\"AM_Cloth\", \"AM_Sphere\"]\r\n else: \r\n thumb_list = thumb\r\n\r\n cam_is_valid = False\r\n obj_is_valid = False\r\n \r\n \r\n if not AM.as_mat_scene and not bpy.context.object:\r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n box.label(\"No active_object in the scene\", icon='ERROR')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n elif not AM.as_mat_scene and not bpy.context.active_object.active_material:\r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n box.label(\"The object have no material\", icon='ERROR')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n if AM.as_mat_scene and not isdir(join(library_path, 'materials', \"Render Scenes\")):\r\n box.operator(\"object.create_rder_scn_lib\", text = \"Create render scene library\", icon = 'FILESEL')\r\n box.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n \r\n if AM.as_mat_scene:\r\n asset_name = AM.scene_name\r\n else:\r\n active_mat = context.active_object.active_material\r\n asset_name = active_mat.name\r\n \r\n if len(bpy.context.active_object.material_slots) == 1:\r\n AM.multi_materials = False\r\n \r\n if AM.as_mat_scene and (not asset_name in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace') or\\\r\n not AM.as_mat_scene and (AM.multi_materials and get_valid_materials() or not AM.multi_materials and asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace'): \r\n if not AM.multi_materials:\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2 and AM.replace_rename == 'rename':\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if AM.as_mat_scene:\r\n box.prop(AM, \"scene_name\", text = \"\")\r\n else:\r\n box.prop(AM, \"rename_mat\", text=\"\")\r\n \r\n box.prop(AM, \"as_mat_scene\", text = \"Save as material scene\")\r\n if not AM.as_mat_scene and len(bpy.context.active_object.material_slots) >= 2:\r\n if len(get_valid_materials()) != len(bpy.context.active_object.material_slots) and AM.multi_materials:\r\n box.label(\"Some materials wont be added\", icon = 'ERROR')\r\n box.label(\" because there already exist\")\r\n row = box.row()\r\n row.prop(AM, \"multi_materials\", text = \"All materials\")\r\n if AM.as_mat_scene:\r\n row = box.row(align = True)\r\n row.label(\"Scene name:\")\r\n row.prop(AM, \"scene_name\", text = \"\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n if AM.as_mat_scene:\r\n for obj in context.scene.objects:\r\n if obj.type == 'CAMERA':\r\n cam_is_valid = True\r\n \r\n if len([obj for obj in context.selected_objects if obj.type != 'CAMERA' and bpy.context.active_object == obj]) == 1:\r\n obj_is_valid = True\r\n \r\n row = box.row()\r\n row.label(\"Selected object rendering\", icon = 'FILE_TICK' if obj_is_valid else 'CANCEL')\r\n row = box.row()\r\n row.label(\"Camera in the scene\", icon = 'FILE_TICK' if cam_is_valid else 'CANCEL')\r\n if not cam_is_valid:\r\n row = box.row()\r\n row.operator(\"object.camera_add\", text = \"Add camera\", icon = 'OUTLINER_OB_CAMERA')\r\n \r\n if not AM.as_mat_scene:\r\n # --------------------- # \r\n # RENDER THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'render':\r\n row = box.row(align = True)\r\n row.label(\"Thumbnail:\")\r\n row.prop(AM, \"mat_thumb_type\", text = \"\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n if AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n\r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if (AM.as_mat_scene and AM.scene_name and cam_is_valid and obj_is_valid or not AM.as_mat_scene) and (AM.render_type == 'render' or (asset_name not in thumb_list or AM.replace_rename == 'replace') and AM.render_type == 'opengl' or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n if AM.as_mat_scene:\r\n row.operator(\"object.add_scene_in_library\", text=\"OK\", icon='FILE_TICK')\r\n else:\r\n row.operator(\"object.add_material_in_library\", text=\"OK\", icon='FILE_TICK')\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n if AM.multi_materials and not get_valid_materials():\r\n box.label(\"All materials already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2:\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n if len(bpy.context.active_object.material_slots) >= 2:\r\n box.prop(AM, \"multi_materials\", text = \"All materials\")\r\n else:\r\n AM.multi_materials = False\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if AM.as_mat_scene:\r\n box.prop(AM, \"scene_name\", text = \"\")\r\n else:\r\n box.prop(AM, \"rename_mat\", text=\"\")\r\n \r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def built_in_material(self, built_in_material):\n\n self._built_in_material = built_in_material", "def add_material(self, name, b_coeff, c_coeff):\n \"\"\"\n\n :return:\n \"\"\"\n l_mat = np.linspace(200e-9, 2000e-9, 5000)\n c = 299792458.0\n w_mat = 2 * np.pi * c / l_mat\n l2_mat = (l_mat * 1e6) ** 2\n n_tmp = 0.0\n for ind, b in enumerate(b_coeff):\n n_tmp += b*l2_mat / (l2_mat - c_coeff[ind])\n n = np.sqrt(1 + n_tmp)\n n_ip = interp1d(w_mat, n, bounds_error=False, fill_value=np.nan, kind=\"quadratic\")\n self.materials[name] = n_ip", "def define_material(self):\n\n # Check which class should be called.\n const_eqn = self.config['material']['const_eqn']\n if isclass(const_eqn):\n mat_class = self.config['material']['const_eqn']\n elif const_eqn == 'lin_elastic':\n mat_class = materials.solid_materials.LinearIsoMaterial\n elif const_eqn == 'neo_hookean':\n mat_class = materials.solid_materials.NeoHookeMaterial\n elif const_eqn == 'demiray':\n mat_class = materials.solid_materials.DemirayMaterial\n elif const_eqn == 'fung':\n mat_class = materials.solid_materials.FungMaterial\n elif const_eqn == 'guccione':\n mat_class = materials.solid_materials.GuccioneMaterial\n elif const_eqn == 'holzapfel_ogden':\n mat_class = materials.solid_materials.HolzapfelOgdenMaterial\n elif const_eqn == 'newtonian' or const_eqn == 'stokes':\n mat_class = materials.fluids.NewtonianFluid\n else:\n raise NotImplementedError(\"Shouldn't be in here...\")\n\n # Create an instance of the material class and store\n # as member data.\n try:\n inverse = self.config['formulation']['inverse']\n except KeyError:\n inverse = False\n self._material = mat_class(inverse=inverse,\n **self.config['material'])\n\n return None", "def Set(*args):\n return _XCAFDoc.XCAFDoc_MaterialTool_Set(*args)", "def create_object_material(obj, mat_name):\n if not has_material(obj, mat_name):\n if bpy.data.materials.get(mat_name, None):\n # XXX if material with this name already exists in another object\n # append the object name to this material name\n mat_name += \".{}\".format(obj.name)\n\n mat = bpy.data.materials.new(mat_name)\n link_material(obj, mat)\n return mat\n return obj.data.materials.get(mat_name)", "def GetMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_GetMaterial(self, *args)", "def addElement(name, defaultUri=None, content=None):", "def append_material_code(df, v, attr):\n mapping_file = pd.read_csv(v['append_material_codes'])\n\n # if material is identified in the activity set, use that material to\n # append the abbreviation, if not, then merge the mapping file to the df\n if attr.get('material') is not None:\n mapping_dict = mapping_file.set_index('Material').to_dict()['Abbr']\n abbr = mapping_dict.get(attr.get('material'))\n for s in ['SectorProducedBy', 'SectorConsumedBy']:\n df[s] = np.where((df[s] is not None) and (df[s] != ''),\n df[s] + abbr, df[s])\n else:\n # add materials\n df = df.merge(mapping_file, left_on='Flowable', right_on='Material')\n for s in ['SectorProducedBy', 'SectorConsumedBy']:\n df[s] = np.where((df[s] is not None) and (df[s] != ''),\n df[s] + df['Abbr'], df[s])\n # drop cols from mapping file\n df = df.drop(columns=['Material', 'Abbr'])\n\n return df", "def _material_delete(sender, instance, using, **kwargs):\n Booking.objects.filter(material=instance).update(\n material=None, custom_material=instance.name\n )", "def create_blender_material(self, ogremat, mat, meshId, matIdx):\n logger.debug(\"create_blender_material\")\n textures = ogremat.textures\n bmat = None\n idx = 0\n mat_name = mat[\"name\"].split(\"/\")[0]\n try:\n bmat = bpy.data.materials[mat_name]\n if bversion == 3:\n bmat.name = \"tobedeleted\"\n bmat = bpy.data.materials.new(mat_name)\n except:\n bmat = bpy.data.materials.new(mat_name)\n self.set_uuid(bmat, ogremat.uuid)\n # material base properties\n if ogremat.doambient:\n if bversion == 2:\n bmat.setAmb(ogremat.ambient)\n else:\n bmat.ambient = ogremat.ambient\n if ogremat.specular:\n if bversion == 2:\n bmat.setSpec(1.0)\n bmat.setSpecCol(ogremat.specular[:3])\n bmat.setHardness(int(ogremat.specular[3]*4.0))\n else:\n bmat.specular_intensity = 1.0\n ogremat.specular[:3]\n bmat.specular_color = ogremat.specular[:3]\n bmat.specular_hardness = int(ogremat.specular[3]*4.0)\n if ogremat.alpha < 1.0:\n bmat.alpha = ogremat.alpha\n # specular\n for layerName, textureId in ogremat.layers.items():\n if layerName == 'shadowMap':\n if bversion == 2:\n bmat.setMode(Blender.Material.Modes['SHADOWBUF'] & bmat.getMode())\n else:\n bmat.use_cast_buffer_shadows = True\n if textureId:\n textureId = textureId\n pars = (bmat, layerName, mat[\"name\"], ogremat, idx, meshId,\n matIdx)\n if textureId in self._imported_assets:\n btex = self._imported_assets[textureId]\n self.layer_ready(btex, *pars)\n elif self.simrt:\n pars = (textureId,) + pars\n if not self.Asset.downloadAsset(textureId, 0,\n self.texture_downloaded, \n pars,\n main=self.doTextureDownloadTranscode):\n self.add_texture_callback(textureId, self.layer_ready, pars[1:])\n idx += 1\n self._imported_materials[mat[\"name\"]] = bmat\n return bmat" ]
[ "0.7026196", "0.6792848", "0.667132", "0.59274834", "0.5922771", "0.5812614", "0.57446635", "0.5629134", "0.5546216", "0.55392534", "0.5464084", "0.5419899", "0.53916496", "0.53648096", "0.53525424", "0.53359", "0.53133327", "0.5275986", "0.51976895", "0.51882774", "0.5178728", "0.5116233", "0.5108306", "0.50630844", "0.505187", "0.50364995", "0.50244355", "0.49948886", "0.49836716", "0.49805593" ]
0.8210709
0
(Deprecated) Return a vector of all materials in the document.
def _getMaterials(self): warnings.warn("This function is deprecated; call Document.getMaterialNodes() instead.", DeprecationWarning, stacklevel = 2) return self.getMaterialNodes()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def info_materials_polymer_get():\n materials = _material_by_group(974) # 974 == intermediate group\n return materials, 200", "def info_materials_get():\n materials = _material_by_group() # empty means all groups\n return materials, 200", "def info_materials_raw_get():\n materials = _material_by_group(427) # 427 == intermediate group\n return materials, 200", "def get_material_features(self):\n return self.material_features", "def materials(self):\n return MaterialManager(session=self._session)", "def info_materials_gas_get():\n materials = _material_by_group(711) # 711 == intermediate group\n return materials, 200", "def GetMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_GetMaterial(self, *args)", "def get_materials(dbpath):\n odb = openOdb(path=dbpath)\n _materials = []\n for _name in odb.materials.items():\n _materials.append(_name)\n odb.close()\n return _materials", "def read_all():\n # Query the database for all the materials\n materials = Material.query.order_by(Material.family_id, Material.material_name).all()\n\n # Serialize the list of materials from our data\n material_schema = MaterialSchema(many=True, exclude=[\"family.materials\"])\n data = material_schema.dump(materials).data\n return data", "def show_materials(self):\n print('\\nThe materials with known dielectric properties are:\\n')\n pprint.pprint(mats.Electrical.props)\n# pprint.pprint(mats.Electrical.DIELECTRIC)\n print('\\nThe materials with known loss tangents are:\\n')\n pprint.pprint(mats.Electrical.props)\n# pprint.pprint(mats.Electrical.LOSS_TAN)\n return", "def info_materials_booster_get():\n materials = _material_by_group(712) # 712 == intermediate group\n return materials, 200", "def get_material_set(**kw):\n mat_ids = set()\n volumes = get_volume_list()\n for v in volumes:\n d = volume_metadata( v )\n if( kw.get('with_rho') is True ):\n # rho is undefined for the void material and dagmc may return anything.\n if d['material'] == 0:\n mat_ids.add( (d['material'], 0.0) )\n else:\n mat_ids.add( (d['material'], d['rho']) )\n else:\n mat_ids.add( d['material'] )\n return mat_ids", "def info_materials_composites_get():\n materials = _material_by_group(429) # 429 == intermediate group\n return materials, 200", "def info_materials_intermediates_get():\n materials = _material_by_group(428) # 428 == intermediate group\n return materials, 200", "def materials(cls) -> MaterialSelector:\n selector: MaterialSelector = cls._materials\n return selector", "def product_size_materials(self):\n return self._product_size_materials", "def get_materials_properties(dbpath): #<un-named>nook\n odb = openOdb(path=dbpath)\n data = []\n for _name,_mat in odb.materials.items():\n _elastic_mod = _mat.elastic.table[0][0]\n _poisson = _mat.elastic.table[0][1]\n if hasattr(_mat,\"plastic\"):\n _plastic = _mat.plastic.table\n else:\n _plastic = []\n data.append((_name,_elastic_mod,_poisson,_plastic))\n odb.close()\n return data", "def _addMaterial(self, name):\n warnings.warn(\"This function is deprecated; call Document.addMaterialNode() instead.\", DeprecationWarning, stacklevel = 2)\n return self.addMaterialNode(name)", "def XCAFDoc_DocumentTool_MaterialTool(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_MaterialTool(*args)", "def MaterialTool(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_MaterialTool(*args)", "def MaterialsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_MaterialsLabel(*args)", "def material(self):\n return self._F_Mstr", "def get_material_mapping(self):\n return {name: self.get_material(name) for name in self.parts.keys()}", "def get_materials_from_blender_objects(blender_objects):\n materials = set()\n meshes = {ob.data for ob in blender_objects if ob.type == 'MESH'}\n for ob in meshes:\n if not ob.materials:\n continue\n materials.add(ob.materials[0])\n return sorted(materials, key=lambda m: m.name)", "def GetMaterialLabels(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_GetMaterialLabels(self, *args)", "def get_all_assets(self):\n return c4d.documents.GetAllAssets(self._document, False, '')", "def XCAFDoc_DocumentTool_MaterialsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_MaterialsLabel(*args)", "def load_materials(self):\n # Create material objects\n for meta_mat in self.gltf.materials:\n mat = Material(meta_mat.name)\n mat.color = meta_mat.baseColorFactor or [1.0, 1.0, 1.0, 1.0]\n mat.double_sided = meta_mat.doubleSided\n\n if meta_mat.baseColorTexture is not None:\n mat.mat_texture = self.textures[meta_mat.baseColorTexture[\"index\"]]\n\n self.materials.append(mat)\n self.scene.materials.append(mat)", "def create_materials_from_data(textures):\n\n materials = []\n\n #Set colour to incremenet from 0 - 8\n colour_inc = 1.0 / len(textures)\n colour = 0\n\n for current_material in textures:\n mat = bpy.data.materials.new(current_material[0])\n mat.diffuse_color = (0, colour, 0,)\n mat.diffuse_shader = 'LAMBERT' \n mat.diffuse_intensity = 1.0 \n mat.specular_color = (1, 1, 1,)\n mat.specular_shader = 'COOKTORR'\n mat.specular_intensity = 0.5\n mat.alpha = 1\n mat.ambient = 1\n mat.use_shadeless = True\n\n mtex = mat.texture_slots.add()\n mtex.texture = current_material[1]\n mtex.texture_coords = 'UV'\n mtex.use_map_color_diffuse = True \n\n materials.append(mat)\n colour += colour_inc\n \n return materials", "def XCAFDoc_MaterialTool_Set(*args):\n return _XCAFDoc.XCAFDoc_MaterialTool_Set(*args)" ]
[ "0.72720134", "0.7060891", "0.67700535", "0.6652309", "0.6613427", "0.64064366", "0.638908", "0.63752085", "0.63551617", "0.633049", "0.62788475", "0.6175576", "0.6156727", "0.6118103", "0.61163384", "0.601052", "0.59336454", "0.57874507", "0.5771847", "0.57317346", "0.57198334", "0.57037646", "0.5695987", "0.5662841", "0.562117", "0.55636215", "0.55608225", "0.54762733", "0.5475506", "0.5431953" ]
0.7945748
0
Return the default data search path.
def getDefaultDataSearchPath(): return FileSearchPath(os.path.dirname(__file__))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'vehicles_dataset_v{}'.format(self._version))", "def default_data_dir(self):\n return self._default_data_dir", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'visual_genome')", "def _get_default_path(self):\n return os.path.join(cfg.ROOT_DIR, 'data', 'KITTI')", "def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path", "def get_data_path():\n\treturn _paths[_DATA_DIRECTORY_KEY]", "def _get_default_path(self):\n # return os.path.join(datasets.ROOT_DIR, 'data', 'MSRC21')\n # set local path\n return u'/Users/danilonunes/workspace/datasets/msrc21/'", "def get_default_paths():\n DATA_ROOT = os.environ.get(\"DATA_ROOT\", \"data\")\n defaults = {\n \"TOKENIZE_DATA_DIR\": DATA_ROOT + \"/tokenize\",\n \"MWT_DATA_DIR\": DATA_ROOT + \"/mwt\",\n \"LEMMA_DATA_DIR\": DATA_ROOT + \"/lemma\",\n \"POS_DATA_DIR\": DATA_ROOT + \"/pos\",\n \"DEPPARSE_DATA_DIR\": DATA_ROOT + \"/depparse\",\n \"ETE_DATA_DIR\": DATA_ROOT + \"/ete\",\n \"NER_DATA_DIR\": DATA_ROOT + \"/ner\",\n \"CHARLM_DATA_DIR\": DATA_ROOT + \"/charlm\",\n \"SENTIMENT_DATA_DIR\": DATA_ROOT + \"/sentiment\",\n \"CONSTITUENCY_DATA_DIR\": DATA_ROOT + \"/constituency\",\n\n # Set directories to store external word vector data\n \"WORDVEC_DIR\": \"extern_data/wordvec\",\n\n # TODO: not sure what other people actually have\n # TODO: also, could make this automatically update to the latest\n \"UDBASE\": \"extern_data/ud2/ud-treebanks-v2.11\",\n \"UDBASE_GIT\": \"extern_data/ud2/git\",\n\n \"NERBASE\": \"extern_data/ner\",\n \"CONSTITUENCY_BASE\": \"extern_data/constituency\",\n \"SENTIMENT_BASE\": \"extern_data/sentiment\",\n\n # there's a stanford github, stanfordnlp/handparsed-treebank,\n # with some data for different languages\n \"HANDPARSED_DIR\": \"extern_data/handparsed-treebank\",\n\n # directory with the contents of https://nlp.stanford.edu/projects/stanza/bio/\n # on the cluster, for example, /u/nlp/software/stanza/bio_ud\n \"BIO_UD_DIR\": \"extern_data/bio\",\n\n # data root for other general input files, such as VI_VLSP\n \"EXTERN_DIR\": \"extern_data\",\n }\n\n paths = { \"DATA_ROOT\" : DATA_ROOT }\n for k, v in defaults.items():\n paths[k] = os.environ.get(k, v)\n\n return paths", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)", "def get_data_path():\n return os.getcwd() + \"/data/\"", "def _get_default_path(self):\n return os.path.join(action_datasets.ROOT_DIR, 'data', 'Actions')", "def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep", "def _get_default_path(self):\n\n raise NotImplementedError()", "def dataPath(self):\n return ''", "def DefaultPath(self) -> str:\n return self.m_def_path", "def data_directory(self):\n\n return self.get_raw(\"data_directory\")", "def get_data_dir():\n return Path(current_app.config[\"USER_DIR\"]) / \"data\"", "def data_path(self):\n raise NotImplementedError", "def data_dir(self) -> Path:\n return self._data_dir", "def get_data_dir(self):\n return self.data_dir", "def get_data_path():\n\n # Get pathname absolute or relative.\n path = os.path.join(\n os.path.dirname(__file__), __malstor_data_directory__)\n\n abs_data_path = os.path.abspath(path)\n if not os.path.exists(abs_data_path):\n raise project_path_not_found\n\n return abs_data_path", "def defaultDirectory(self):\n return self.__defaultDirectory", "def data_dir(self):\r\n return self._data_dir", "def data_dir():\n return _config.datadir", "def data_dir(self):\n return self._data_dir", "def data_dir(self):\n return self._data_dir", "def get_default_data_image_path():\n misc_path = pkg_resources.resource_filename('sst', 'misc/')\n return os.path.abspath(os.path.join(misc_path, \"um_000000.png\"))", "def datadir():\n return '../data/'", "def data_dir(cls) -> Union[str, Path]:\n if cls._data_dir is None:\n msg = \"data_dir not supplied, defaulting to working_dir\"\n run_log.warning(msg)\n return cls.working_dir\n else:\n return cls._data_dir" ]
[ "0.8129479", "0.8007878", "0.79695016", "0.79296505", "0.7858564", "0.7470226", "0.743667", "0.7417918", "0.74098426", "0.74098426", "0.7362305", "0.7316143", "0.72809714", "0.72342837", "0.72125745", "0.7184736", "0.7136621", "0.70106024", "0.69430655", "0.6912006", "0.68595666", "0.683762", "0.68349016", "0.6828683", "0.68005913", "0.67608595", "0.67608595", "0.67582464", "0.6710663", "0.66990995" ]
0.8906264
0
Return list of default data library folders
def getDefaultDataLibraryFolders(): return [ 'libraries' ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_default_paths():\n DATA_ROOT = os.environ.get(\"DATA_ROOT\", \"data\")\n defaults = {\n \"TOKENIZE_DATA_DIR\": DATA_ROOT + \"/tokenize\",\n \"MWT_DATA_DIR\": DATA_ROOT + \"/mwt\",\n \"LEMMA_DATA_DIR\": DATA_ROOT + \"/lemma\",\n \"POS_DATA_DIR\": DATA_ROOT + \"/pos\",\n \"DEPPARSE_DATA_DIR\": DATA_ROOT + \"/depparse\",\n \"ETE_DATA_DIR\": DATA_ROOT + \"/ete\",\n \"NER_DATA_DIR\": DATA_ROOT + \"/ner\",\n \"CHARLM_DATA_DIR\": DATA_ROOT + \"/charlm\",\n \"SENTIMENT_DATA_DIR\": DATA_ROOT + \"/sentiment\",\n \"CONSTITUENCY_DATA_DIR\": DATA_ROOT + \"/constituency\",\n\n # Set directories to store external word vector data\n \"WORDVEC_DIR\": \"extern_data/wordvec\",\n\n # TODO: not sure what other people actually have\n # TODO: also, could make this automatically update to the latest\n \"UDBASE\": \"extern_data/ud2/ud-treebanks-v2.11\",\n \"UDBASE_GIT\": \"extern_data/ud2/git\",\n\n \"NERBASE\": \"extern_data/ner\",\n \"CONSTITUENCY_BASE\": \"extern_data/constituency\",\n \"SENTIMENT_BASE\": \"extern_data/sentiment\",\n\n # there's a stanford github, stanfordnlp/handparsed-treebank,\n # with some data for different languages\n \"HANDPARSED_DIR\": \"extern_data/handparsed-treebank\",\n\n # directory with the contents of https://nlp.stanford.edu/projects/stanza/bio/\n # on the cluster, for example, /u/nlp/software/stanza/bio_ud\n \"BIO_UD_DIR\": \"extern_data/bio\",\n\n # data root for other general input files, such as VI_VLSP\n \"EXTERN_DIR\": \"extern_data\",\n }\n\n paths = { \"DATA_ROOT\" : DATA_ROOT }\n for k, v in defaults.items():\n paths[k] = os.environ.get(k, v)\n\n return paths", "def getDefaultDataSearchPath():\n return FileSearchPath(os.path.dirname(__file__))", "def default_data_dir(self):\n return self._default_data_dir", "def datadir():\n return '../data/'", "def library_dirs(self):", "def default_search_folders(app_name):\n return [\n \"%s/cdis/%s\" % (XDG_DATA_HOME, app_name),\n \"/usr/share/cdis/%s\" % app_name,\n \"%s/gen3/%s\" % (XDG_DATA_HOME, app_name),\n \"/usr/share/gen3/%s\" % app_name,\n \"/var/www/%s\" % app_name,\n \"/etc/gen3/%s\" % app_name,\n ]", "def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path", "def data_dir():\n return _config.datadir", "def find_data(self):\n data_list = []\n for root, dirs, files in os.walk(pathfinder.data_path()):\n for name in files:\n data_list.append(os.path.join(root, name))\n return data_list", "def get_library_dirs():\n if DAALTK_HOME_ENV_VAR not in os.environ:\n raise Exception(\"Required environment variable %s not set\" % DAALTK_HOME_ENV_VAR)\n\n daaltk_home = os.environ[DAALTK_HOME_ENV_VAR]\n return [daaltk_home, os.path.join(daaltk_home, LIB_DIR)]", "def get_data_directory():\n return gdc19.DATA_DIRECTORY", "def list_all():\n if os.path.exists(DATA_DIR):\n return os.listdir(DATA_DIR)\n return []", "def data_directories(self):\n\n return [simulation.outdir for simulation in self.simulations]", "def get_data_files():\n\n data_files = []\n for d, dirs, filenames in os.walk(share_jupyterhub):\n rel_d = os.path.relpath(d, here)\n data_files.append((rel_d, [os.path.join(rel_d, f) for f in filenames]))\n return data_files", "def data_directory(self):\n\n return self.get_raw(\"data_directory\")", "def get_setup_data_files(self):\n data_files = CMakeBuilder.default_setup_data_files\n if gitutil.has_submodules():\n data_files.append(\".gitmodules\")\n data_files.append(\n os.path.join(self.dist_dir, gitutil.gitmodules_status_name)\n )\n return [(\"\", data_files)]", "def get_data_dir():\n return Path(current_app.config[\"USER_DIR\"]) / \"data\"", "def get_data_dir():\n rootdir = os.path.dirname(__file__)\n libdir = rootdir + os.sep + \"data\"\n return libdir", "def root_directory_list(self) -> str:\n return self.__root_directory_list", "def getDataFiles(directoryName):\r\n \r\n return listdir(directoryName)", "def valid_datastores(cls):\n\n dblist = os.listdir(DATASTORE_DIR)\n return dblist", "def get_data_files():\n data_files = []\n\n # Walk through the data directory, adding all files\n data_generator = os.walk('pypeit/data')\n for path, directories, files in data_generator:\n for f in files:\n data_path = '/'.join(path.split('/')[1:])\n data_files.append(os.path.join(data_path, f))\n\n # Add pipeline and spectrograph settings\n settings = glob.glob('pypeit/settings/settings.*')\n settings = ['/'.join(path.split('/')[1:]) for path in settings]\n data_files.extend(settings)\n\n return data_files", "def get_data_dir() -> str:\n os.makedirs(DEFAULT_OUTPUT_DIR, exist_ok=True)\n return DEFAULT_OUTPUT_DIR", "def avail(self):\n\n return os.listdir(self.datadir)", "def fetch_dset_dirs(dset_name=None):\n assert (dset_name is None) or (dset_name in DATASET_DIRS), \"invalid name\"\n\n dset_name = \"default\" if dset_name is None else dset_name\n\n home = os.path.expanduser(\"~\")\n\n return list(os.path.join(home, d) for d in DATASET_DIRS[dset_name])", "def library_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-L'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-L for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def data_dir():\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "def _set_dirs(self, datafolder):\n self.List_of_dir = []\n self.List_of_files = dict()\n folders = os.listdir(datafolder)\n folders.sort()\n for i in folders:\n if os.path.isdir(os.path.join(datafolder,i)) and i != '.ipynb_checkpoints': # ignore .ipynb_checkpoints, allowing the generator to work in Amazon\n self.List_of_dir.append(os.path.join(datafolder,i))\n self.List_of_files[os.path.join(datafolder,i)]=[]\n for file in os.listdir(os.path.join(datafolder, i, 'Input')):\n if file.split('.')[-1] == 'hdf5':\n self.List_of_files[os.path.join(datafolder,i)].append(file.split('.')[-2])\n self._nb_dir = len(self.List_of_dir)", "def GetDataDir():\n sp = wx.StandardPaths.Get()\n return sp.GetUserDataDir()", "def lib_directories(self):\n if self._lib_directories is None:\n self._lib_directories = []\n app_path = os.getcwd()\n contents = os.listdir(app_path)\n for c in contents:\n # ensure content starts with lib, is directory, and is readable\n if c.startswith('lib') and os.path.isdir(c) and (os.access(c, os.R_OK)):\n self._lib_directories.append(c)\n return sorted(self._lib_directories, reverse=True)" ]
[ "0.717137", "0.70716625", "0.6955009", "0.6915999", "0.68801606", "0.68446493", "0.6835322", "0.6751219", "0.66922414", "0.6663725", "0.66411805", "0.66054195", "0.6547148", "0.65449494", "0.65448755", "0.6523286", "0.649509", "0.64830816", "0.6471197", "0.6460034", "0.6445672", "0.64269006", "0.6372019", "0.636878", "0.6362273", "0.63423216", "0.63382125", "0.63333446", "0.63317317", "0.6301699" ]
0.89339906
0
Get a list of objects labeled as frame guides in the current 3D scene.
def get_guides(data): return data.groups["Frames"].objects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getBrailleRegionsForAnimation(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForAnimation\", obj)\n\n text = \"\"\n text = self._script.appendString(\n text, self._script.getDisplayedLabel(obj))\n text = self._script.appendString(\n text, self._script.getDisplayedText(obj))\n text = self._script.appendString(text, self._getTextForRole(obj))\n text = self._script.appendString(text, obj.description, \": \")\n\n regions = []\n componentRegion = braille.Component(obj, text)\n regions.append(componentRegion)\n\n return [regions, componentRegion]", "def get_static_objects_in_frame(self, frame: int) -> np.ndarray:\n obj_types = np.array(['boxAIK', 'cylinderAIK'])\n return self._get_objects_in_frame(frame, obj_types, 'oid', self.objects)", "def extract_labels_xyz(scene: \"Scenemaker\") -> List[Tuple[int, np.ndarray]]:\r\n objects = utils.select_collection(scene.target_collection)\r\n boxes_list = []\r\n\r\n for obj in objects:\r\n objclass = obj.name.split(\".\")[0]\r\n xyz = np.array(obj.dimensions)\r\n boxes_list.append((scene.name2num[objclass], xyz))\r\n\r\n return boxes_list", "def _getBrailleRegionsForFrame(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForFrame\", obj)\n\n regions = []\n\n text = \"\"\n text = self._script.appendString(\n text, self._script.getDisplayedLabel(obj))\n text = self._script.appendString(\n text, self._script.getDisplayedText(obj))\n text = self._script.appendString(text,\n self._script.getTextForValue(obj))\n text = self._script.appendString(text, self._getTextForRole(obj))\n\n # If this application has more than one unfocused alert or\n # dialog window, then add '(<m> dialogs)' to the braille context,\n # to let the user know.\n #\n alertAndDialogCount = \\\n self._script.getUnfocusedAlertAndDialogCount(obj)\n if alertAndDialogCount > 0:\n # Translators: this tells the user how many unfocused\n # alert and dialog windows plus the total number of\n # windows that this application has.\n #\n line = ngettext(\"(%d dialog)\",\n \"(%d dialogs)\",\n alertAndDialogCount) % alertAndDialogCount\n text = self._script.appendString(text, line)\n\n regions = []\n componentRegion = braille.Component(obj, text)\n regions.append(componentRegion)\n\n return [regions, componentRegion]", "def get_objects(self):\n return \\\n self,\\\n self.label,\\\n self.frame_controls, \\\n (\n self.button_decrease,\n self.scale_volume,\n self.button_increase\n )", "def extract_labels_full(scene: \"Scenemaker\") -> List[Tuple[int, np.ndarray]]:\r\n objects = utils.select_collection(scene.target_collection)\r\n boxes_list = []\r\n\r\n for obj in objects:\r\n objclass = obj.name.split(\".\")[0]\r\n dim = obj.dimensions\r\n rot = normalize_rotations(np.array(obj.rotation_euler))\r\n loc = change_to_spawnbox_coords(np.array(obj.location))\r\n boxes_list.append((scene.name2num[objclass], np.concatenate((loc, dim, rot))))\r\n\r\n return boxes_list", "def items(self):\n return _osgAnimation.mapVertexInfluence_items(self)", "def get_selected_objects (context):\n return [obj for obj in context.selected_objects if obj.type == 'MESH']", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def targets(self):\n self.renderer.begin_rendering(\"targets\")\n for target in self.targets:\n self.renderer.draw_rect_3d(target, 10, 10, True, self.renderer.blue())\n self.renderer.end_rendering()", "def get_all_drawables(self): \n drawables = []\n if len(self.component_list) > 0:\n for c in self.component_list:\n drawables.append(c.get_drawables())\n return drawables", "def plot_3d_object(object_):\n \n # Initialize renderer instance\n r = Renderer()\n\n # Add surfaces and goal regions to the renderer instance\n for surf in object_:\n r.add((object_[surf][0],'b',1))\n if len(object_[surf])>2:\n r.add((object_[surf][2],'r',1))\n r.add((gPoint(-15,-15,-15),'k',1))\n r.show()", "def graphicsItems(self):\n return self.ctrl.getGraphicsItems()", "def extract_labels_cps(scene: \"Scenemaker\") -> List[Tuple[int, np.ndarray]]:\r\n objects = utils.select_collection(scene.target_collection)\r\n boxes_list = []\r\n\r\n for obj in objects:\r\n objclass = obj.name.split(\".\")[0]\r\n cornermatrix = np.empty((8, 3))\r\n for j, corner in enumerate(obj.bound_box):\r\n cornermatrix[j] = corner\r\n\r\n boxes_list.append((scene.name2num[objclass], cornermatrix))\r\n\r\n return boxes_list", "def _getBrailleRegionsForSlider(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForSlider\", obj)\n\n regions = []\n\n text = \"\"\n text = self._script.appendString(\n text, self._script.getDisplayedLabel(obj))\n # Ignore the text on the slider.\n #text = self._script.appendString(\n # text, self._script.getDisplayedText(obj))\n text = self._script.appendString(text,\n self._script.getTextForValue(obj))\n text = self._script.appendString(text, self._getTextForRole(obj))\n text = self._script.appendString(text,\n self._getTextForRequiredObject(obj))\n\n regions = []\n componentRegion = braille.Component(obj, text)\n regions.append(componentRegion)\n\n return [regions, componentRegion]", "def _getBrailleRegionsForPanel(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForPanel\", obj)\n\n regions = []\n\n text = \"\"\n text = self._script.appendString(\n text, self._script.getDisplayedLabel(obj))\n\n # If there was no label for the panel, but it has a name, we'll\n # use the name.\n #\n if len(text) == 0:\n text = self._script.appendString(\n text, self._script.getDisplayedText(obj))\n\n text = self._script.appendString(text, self._getTextForRole(obj))\n\n regions = []\n componentRegion = braille.Component(obj, text)\n regions.append(componentRegion)\n\n return [regions, componentRegion]", "def extract_labels_std(self, scene: \"Scenemaker\") -> List[Tuple[int, np.ndarray]]:\r\n objects = utils.select_collection(scene.target_collection)\r\n camera = self.stdbboxcam\r\n boxes_list = []\r\n for obj in objects:\r\n objclass = obj.name.split(\".\")[0] # eg mackerel.002 -> mackerel\r\n box = camera_view_bounds_2d(scene=bpy.context.scene, cam_ob=camera, me_ob=obj)\r\n boxes_list.append((scene.name2num[objclass], np.array(box)))\r\n\r\n return boxes_list", "def bs_getShaders(obj):\n pm.select(obj)\n pm.windows.hyperShade(shaderNetworksSelectMaterialNodes=True)\n return pm.ls(sl=True) # Returns all shaders associated with the object (shape, face etc)", "def _getBrailleRegionsForPushButton(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForPushButton\", obj)\n\n regions = []\n\n text = \"\"\n text = self._script.appendString(\n text, self._script.getDisplayedLabel(obj))\n text = self._script.appendString(\n text, self._script.getDisplayedText(obj))\n\n # In Java, some push buttons don't have label and text.\n # In this case, we'll add to presentation the object description,\n # if exists.\n #\n if (not text) and (obj.description):\n text = self._script.appendString(text, obj.description)\n\n text = self._script.appendString(text, self._getTextForRole(obj))\n\n regions = []\n componentRegion = braille.Component(obj, text)\n regions.append(componentRegion)\n\n return [regions, componentRegion]", "def get_objects(self):\n objs = self.scene.get_objects()\n objs_attached = self.scene.get_attached_objects()\n return objs, objs_attached", "def components(self):\r\n return list(self._components)", "def getListOfAdditionalGraphicalObjects(self):\n return _libsbml.Layout_getListOfAdditionalGraphicalObjects(self)", "def get_components_drawables(self):\n # print self.component_list\n print len(self.component_list)\n for c in self.component_list:\n return c.get_drawables()", "def tools(self):\n tool1 = TranslatedShape(shape_in=\n RotatedShape(shape_in=\n Cylinder(radius=\n self.wheels_properties[\n 1] + 10.,\n height=400.,\n position=self.position),\n rotation_point=self.position,\n vector=Vector(1, 0, 0),\n angle=radians(90)),\n displacement=\n Vector(self.wheels_properties[0],\n 299.,\n -self.positions[1][0]))\n tool2 = TranslatedShape(shape_in=tool1,\n displacement=Vector(0.,\n 0.,\n self.positions[1][0]\n - self.positions[1][1]))\n tool3 = MirroredShape(shape_in=tool1,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n tool4 = MirroredShape(shape_in=tool2,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n return [tool1, tool2, tool3, tool4]", "def _getBrailleRegionsForEmbedded(self, obj, role=None):\n\n self._debugGenerator(\"_getBrailleRegionsForEmbedded\", obj)\n\n regions = []\n\n text = \"\"\n text = self._script.appendString(\n text, self._script.getDisplayedLabel(obj))\n text = self._script.appendString(\n text, self._script.getDisplayedText(obj))\n if not text:\n try:\n text = obj.getApplication().name\n except:\n pass\n\n regions = []\n componentRegion = braille.Component(obj, text)\n regions.append(componentRegion)\n\n return [regions, componentRegion]", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def getBroaderPlotList(self, iCiv, bBorder = False):\n\t\t\n\t\treturn self.getRegionPlotList(self.getCoreRegions(iCiv) + self.getNormalRegions(iCiv) + self.getBroaderRegions(iCiv), bBorder)", "def items(self):\n return _osgAnimation.BoneMap_items(self)", "def _getBrailleRegionsForPageTab(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForPageTab\", obj)\n\n text = \"\"\n text = self._script.appendString(\n text, self._script.getDisplayedLabel(obj))\n text = self._script.appendString(\n text, self._script.getDisplayedText(obj))\n text = self._script.appendString(text, self._getTextForRole(obj))\n\n if obj == orca_state.locusOfFocus:\n text = self._script.appendString(\n text, self._getTextForAvailability(obj))\n text = self._script.appendString(text,\n self._getTextForAccelerator(obj),\n \"\")\n\n regions = []\n componentRegion = braille.Component(obj, text)\n regions.append(componentRegion)\n\n return [regions, componentRegion]", "def get_object_detections(self):\n detections = self.__get_cropped_detections(self.image)\n return detections" ]
[ "0.56232345", "0.5550209", "0.5408272", "0.5292358", "0.52791625", "0.5228657", "0.5212502", "0.5169032", "0.5049105", "0.50468457", "0.5031301", "0.50303346", "0.49700224", "0.49568957", "0.49561343", "0.49337798", "0.4922755", "0.49168405", "0.48798805", "0.48694423", "0.4840078", "0.4839188", "0.48354247", "0.4834989", "0.48142886", "0.48036492", "0.47963768", "0.47742823", "0.47720823", "0.47553393" ]
0.639492
0
Get the reference frame type corresponding to a particular guide.
def get_guide_type(guide): # Maintained by naming convention in the Blender files. Sub-optimal. try: return guide.name[guide.name.rindex(".") + 1:] except: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getComponentType(cls):\n\n return 'Guide'", "def getComponentType(cls):\n\n return 'Guide'", "def get_typ(self, refobj):\n enum = cmds.getAttr(\"%s.type\" % refobj)\n try:\n return JB_ReftrackNode.types[enum]\n except IndexError:\n raise ValueError(\"The type on the node %s could not be associated with an available type: %s\" %\n (refobj, JB_ReftrackNode.types))", "def get_by_ref(self, reference):\n try:\n self.__logger.debug(\"reference %s\" % reference)\n return self.__filetypeList[reference]\n\n except KeyError as e:\n self.__logger.debug(\"can't find file format object\"\n \" for reference %s\" % reference)\n self.__logger.debug(e)\n sys.exit(1)", "def _GetReferenceObject(type_name: str) -> SchemaReference:\n return {\n \"$ref\": f\"#/components/schemas/{type_name}\",\n }", "def get_object_type(self, ref):\n ws = Workspace(self.ws_url)\n info = ws.get_object_info3({\"objects\": [{\"ref\": ref}]})\n obj_info = info.get(\"infos\", [[]])[0]\n if len(obj_info) == 0:\n raise RuntimeError(\"An error occurred while fetching type info from the Workspace. \"\n \"No information returned for reference {}\".format(ref))\n return obj_info[2]", "def type(self) -> URIType:\n if self.study_instance_uid is None:\n return URIType.SERVICE\n elif self.series_instance_uid is None:\n return URIType.STUDY\n elif self.sop_instance_uid is None:\n return URIType.SERIES\n elif self.frames is None:\n return URIType.INSTANCE\n return URIType.FRAME", "def getTypeReference(self):\r\n return self.implementationTypeRef", "def _get_frame_class(frame):\n if isinstance(frame, str):\n frame_names = frame_transform_graph.get_names()\n if frame not in frame_names:\n raise ValueError(\n f'Coordinate frame name \"{frame}\" is not a known '\n f\"coordinate frame ({sorted(frame_names)})\"\n )\n frame_cls = frame_transform_graph.lookup_name(frame)\n\n elif isinstance(frame, type) and issubclass(frame, BaseCoordinateFrame):\n frame_cls = frame\n\n else:\n raise ValueError(\n \"Coordinate frame must be a frame name or frame class, not a\"\n f\" '{frame.__class__.__name__}'\"\n )\n\n return frame_cls", "def get_caller_frame() -> FrameType:\n return cast(FrameType, cast(FrameType, inspect.currentframe()).f_back)", "def getReferenceDataType(program: ghidra.program.model.listing.Program, referredToDataType: ghidra.program.model.data.DataType) -> ghidra.program.model.data.DataType:\n ...", "def getTypeCode(self):\n return _libsbml.ReferenceGlyph_getTypeCode(self)", "def reference(reference):\n if ffi.typeof(reference) not in _reference_types:\n raise TypeError(\"Can't cast %r to vx_reference\" % reference)\n return ffi.cast('vx_reference', reference)", "def getDescriptorType(self): # real signature unknown; restored from __doc__\n pass", "def get_type(self):\n\t\treturn call_sdk_function('PrlFsInfo_GetType', self.handle)", "def getTypeCode(self):\n return _libsbml.SBaseRef_getTypeCode(self)", "def type(self) -> global___Type:", "def type(self):\n return BipType.get_at(self.ea)", "def getDependenceTypeAt(self, pos):\n return self.sentence[pos].getDependenceType()", "def _get_ref_from_taxonomy(fides_key: str) -> FideslangDataCategory:\n for item in DEFAULT_TAXONOMY.data_category:\n if item.fides_key == fides_key:\n return item\n\n raise common_exceptions.DataCategoryNotSupported(\n f\"The data category {fides_key} has no Fideslang reference.\"\n )", "def XrefTypeName(typecode):\n assert typecode in _ref_types, \"unknown reference type %d\" % typecode\n return _ref_types[typecode]", "def _type(self):\n return self._id[1]", "def target_type(self) -> str:\n return pulumi.get(self, \"target_type\")", "def target_type(self) -> Optional[str]:\n return pulumi.get(self, \"target_type\")", "def frtype(self):\n return self._frtype", "def paypal_reference_id_type_enum(self) -> ReferenceIdType:\n return _REFERENCE_ID_MAPPINGS.get(self.paypal_reference_id_type)", "def getParentType(soup, refs, currentType, tagType='entitytype'):\n propSchema = soup.find( 'schema', attrs={'namespace': getNamespace(currentType)})\n \n if propSchema is None:\n return False, None, None, None\n propEntity = propSchema.find( tagType, attrs={'name': getType(currentType)})\n \n if propEntity is None:\n return False, None, None, None\n\n currentType = propEntity.get('basetype')\n if currentType is None:\n return False, None, None, None\n \n currentType = currentType.replace('#','')\n SchemaNamespace, SchemaType = getNamespace(currentType), getType(currentType)\n propSchema = soup.find( 'schema', attrs={'namespace': SchemaNamespace})\n\n if propSchema is None:\n success, innerSoup, uri = getSchemaDetails(\n *refs.get(SchemaNamespace, (None,None)))\n if not success:\n return False, None, None, None\n innerRefs = getReferenceDetails(innerSoup)\n propSchema = innerSoup.find(\n 'schema', attrs={'namespace': SchemaNamespace})\n if propSchema is None:\n return False, None, None, None\n else:\n innerSoup = soup\n innerRefs = refs\n\n return True, innerSoup, innerRefs, currentType", "def dialogue_reference(self) -> Tuple[str, str]:\n enforce(self.is_set(\"dialogue_reference\"), \"dialogue_reference is not set.\")\n return cast(Tuple[str, str], self.get(\"dialogue_reference\"))", "def AceType(self):\n raw_type = super(AceHeader, self).AceType\n return ACE_CLASS_TYPE_MAPPER[raw_type]", "def getType(self): #$NON-NLS-1$\r" ]
[ "0.5693369", "0.5693369", "0.5446423", "0.5337169", "0.53119755", "0.529872", "0.52709115", "0.5264755", "0.5190898", "0.5159653", "0.51567864", "0.5133397", "0.50048554", "0.49838355", "0.49712083", "0.49645618", "0.48794225", "0.48541024", "0.48476246", "0.4845618", "0.48423275", "0.47823843", "0.47781745", "0.47454697", "0.47421953", "0.4737893", "0.47308448", "0.47293863", "0.47256213", "0.47097415" ]
0.7534499
0
Randomize the position of an object `obj` along some linear guide path `guide`.
def randomize_position(obj, guide): p1, p2 = get_guide_endpoints(guide) t = random.random() target_point = p1 + t * (p2 - p1) # update X and Y coordinates. obj.location[0] = target_point[0] obj.location[1] = target_point[1] return t
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randomize_distance(obj, guide, scale_bounds=(-2, 0)):\n p1, p2 = get_guide_endpoints(guide)\n midpoint = p1 / 2 + p2 / 2\n\n # Get vector perpendicular to the guide.\n diff_rot = Matrix.Rotation(math.pi / 2, 3, 'Z') * (p2 - p1)\n\n scale_factor = scale_bounds[0] + random.random() * (scale_bounds[1] - scale_bounds[0])\n target_point = midpoint + scale_factor * diff_rot\n\n obj.location[0] = target_point[0]\n obj.location[1] = target_point[1]\n\n return scale_factor", "def point_at(obj, target, roll=0):\n\tif not isinstance(target, mathutils.Vector):\n\t\ttarget = mathutils.Vector(target)\n\tloc = obj.location\n\t# direction points from the object to the target\n\tdirection = target - loc\n\n\tquat = direction.to_track_quat('-Z', 'Y')\n\n\t# /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py\n\tquat = quat.to_matrix().to_4x4()\n\trollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')\n\n\t# remember the current location, since assigning to obj.matrix_world changes it\n\tloc = loc.to_tuple()\n\tobj.matrix_world = quat * rollMatrix\n\tobj.location = loc", "def point_at(obj, target, roll=0):\n if not isinstance(target, mathutils.Vector):\n target = mathutils.Vector(target)\n loc = obj.location\n # direction points from the object to the target\n direction = target - loc\n\n quat = direction.to_track_quat('-Z', 'Y')\n\n # /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py\n quat = quat.to_matrix().to_4x4()\n rollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')\n\n # remember the current location, since assigning to obj.matrix_world changes it\n loc = loc.to_tuple()\n obj.matrix_world = quat * rollMatrix\n obj.location = loc", "def guide_at(self, pos):\n rose = self._rose\n pos = rose.mapFromGlobal(pos)\n return rose.guideAt(pos)", "def random_pose_next_to_goal(self, goal_pose, spat_rel, env):\n goal_pose = utils.xyz_to_pix(goal_pose[0], self.bounds, self.pix_size)\n obj_size = (0.04, 0.04, 0.04)\n erode_size = self.get_erode_size(obj_size)\n\n _, hmap, obj_mask = self.get_true_image(env)\n free = self.compute_free_space(env, obj_mask)\n\n # Find valid pose\n compute_angle_wrt_goal = functools.partial(\n compute_angle, x2=goal_pose[0], y2=goal_pose[1])\n\n def compute_dist(i, j):\n dist = np.sqrt((goal_pose[0] - i)**2 + (j - goal_pose[1])**2)\n return dist\n\n angle_from_goal = np.fromfunction(compute_angle_wrt_goal, free.shape)\n dist_from_goal = np.fromfunction(compute_dist, free.shape)\n is_valid_dist = np.vectorize(lambda x: x < erode_size * 2)\n is_valid = self.find_valid_region(spat_rel)\n\n # For each occupied region, expand the region a little bit more to avoid\n # placing objects too close by.\n free = cv2.erode(free, np.ones((erode_size, erode_size), np.uint8))\n free[~is_valid(angle_from_goal)] = 0\n free[~is_valid_dist(dist_from_goal)] = 0\n (\n free[0:erode_size, :],\n free[:, 0:erode_size],\n free[-erode_size:, :],\n free[:, -erode_size:],\n ) = (0, 0, 0, 0)\n\n if np.sum(free) == 0:\n print(\"There is no free space!!\")\n return None, None\n\n pos, rot = self.sample_pos_in_free_space(free, hmap, obj_size)\n return pos, rot", "def fill(obj, prob = 1, collide_obj = None, collide_callback = None) :\n for x in range(int(Globals.instance.WIDTH/Globals.instance.GRID_SIZE)):\n for y in range(int(Globals.instance.HEIGHT/Globals.instance.GRID_SIZE)):\n if random.uniform(0, 1) > prob:\n continue\n if at((x,y)) is None:\n o = obj(pos=(x,y))\n if collide_obj and collide_callback:\n if isinstance(collide_obj, (list, tuple)):\n for obj in collide_obj:\n o.collides(obj, collide_callback)\n else:\n o.collides(collide_obj, collide_callback)", "def place_obj(self,\n obj,\n top=None,\n size=None,\n reject_fn=None,\n max_tries=math.inf\n ):\n\n if top is None:\n top = (0, 0)\n\n if size is None:\n size = (self.grid.width, self.grid.height)\n\n num_tries = 0\n\n while True:\n # This is to handle with rare cases where rejection sampling\n # gets stuck in an infinite loop\n if num_tries > max_tries:\n raise RecursionError('rejection sampling failed in place_obj')\n\n num_tries += 1\n\n pos = np.array((\n self._rand_int(top[0], top[0] + size[0]),\n self._rand_int(top[1], top[1] + size[1])\n ))\n\n # Don't place the object on top of another object\n if self.grid.get(*pos) != None:\n continue\n\n # Don't place the object where the agent is\n if np.array_equal(pos, self.start_pos):\n continue\n\n if np.array_equal(pos, self.start_dpos):\n continue\n\n # Check if there is a filtering criterion\n if reject_fn and reject_fn(self, pos):\n continue\n\n break\n\n self.grid.set(*pos, obj)\n\n if obj is not None:\n obj.init_pos = pos\n obj.cur_pos = pos\n\n return pos", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def move_to_object(self, obj_img_pos, img_shape, obj_col, des_img_pos, img_thres):\n def show_binary(img_bin, des_img_pos, new_img_pos, img_thres):\n \"\"\"\n Show intermediate binary image while refining position.\n \"\"\"\n img_bgr = cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB)\n #draw tolerance lines\n #left/right vertical lines\n xl = des_img_pos[0] - img_thres\n xr = des_img_pos[0] + img_thres\n y1 = 0\n y2 = img_shape[1]\n cv2.line(img_bgr,(xl,y1),(xl,y2),(0,255,0),1)\n cv2.line(img_bgr,(xr,y1),(xr,y2),(0,255,0),1)\n #top/bottom horizontal lines\n yt = des_img_pos[1] - img_thres\n yb = des_img_pos[1] + img_thres\n x1 = 0\n x2 = img_shape[0]\n cv2.line(img_bgr,(x1,yt),(x2,yt),(0,255,0),1)\n cv2.line(img_bgr,(x1,yb),(x2,yb),(0,255,0),1)\n #draw circle at detected object\n cv2.circle(img_bgr,tuple(new_img_pos),6,(255,0,0),2)\n #show image\n cv2.imshow(window_name, img_bgr)\n cv2.waitKey(1000) & 0xFF\n \n cur_arm_pos = [self.x, self.y]\n move_inc = self.move_inc\n window_name = 'Refine position'\n col_thresh = self.close_col_thresh\n init_arm_pos = [self.init_x, self.init_y]\n scale = self.scale\n \n print(' Current obj img pos: '+str(obj_img_pos))\n \n #compute desired arm position\n des_arm_pos = self.world_pos_from_img_pos(obj_img_pos, \n img_shape, init_arm_pos, scale)\n print(' Desired arm position: '+str(des_arm_pos))\n \n #move arm to approximate position\n cur_arm_pos = self.move_to(des_arm_pos[0], des_arm_pos[1], \n self.move_to_height)\n new_img = self.update_img() #wait to update image\n \n #select new colour\n peg_col_close = self.choose_colours(new_img)\n \n #refine position\n new_img_pos, img_bin = self.find_colours(new_img, peg_col_close, \n num_objects=1, ab_dist_thresh=col_thresh)\n show_binary(img_bin, des_img_pos, new_img_pos, img_thres)\n while ( abs(new_img_pos[0] - des_img_pos[0]) > img_thres or \n abs(new_img_pos[1] - des_img_pos[1]) > img_thres ):\n #refine position\n cur_arm_pos = self.move_to_refine(des_img_pos, new_img_pos, \n cur_arm_pos, move_inc, img_thres)\n \n #update image\n new_img = self.update_img()\n \n #find new image position of peg\n new_img_pos, img_bin = self.find_colours(new_img, peg_col_close, \n num_objects=1, ab_dist_thresh=col_thresh)\n \n #show binary image\n show_binary(img_bin, des_img_pos, new_img_pos, img_thres)\n \n return cur_arm_pos", "def create_fixed_object(self):\n self.obj = self.img[self.y-self.rad:self.y+self.rad,\n self.x-self.rad:self.x+self.rad]", "def _get_random_pos_on_a_side(self):\n pass", "def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)", "def pickFittestRoad(obj, roads):\n fittestRid = -1\n accessPoint = Point(0, 0)\n\n PointA, PointB = objDiameter(obj)\n\n findRoad = False\n\n for road in roads:\n reference = road.geom.project(obj.centroid)\n tempAccessPoint = road.geom.interpolate(reference)\n PointC = (obj.centroid.x, obj.centroid.y)\n PointD = (tempAccessPoint.x, tempAccessPoint.y)\n deltaX = PointC[0] - PointA[0]\n deltaY = PointC[1] - PointA[1]\n PointE = (PointD[0] - deltaX, PointD[1] - deltaY)\n sideA = LineString((PointA, PointB)).length\n sideB = LineString((PointA, PointE)).length\n sideC = LineString((PointB, PointE)).length\n\n angle = getAngle(sideA, sideB, sideC)\n if angle > 90:\n angle = 180 - angle\n\n if angle > 30:\n # we think this angle is large enough\n # one more check, this straightLine CD had better not intersect another obj # NOQA\n lineCD = LineString((PointC, PointD))\n tid = obj.id\n cur.execute(\"select * from terraces \\\n where st_intersects(geom, st_geomfromtext('%s', 27700)) \\\n and tid != %d\" % (lineCD.wkt, tid)) # NOQA\n results = cur.fetchall()\n if not results:\n # which means no other terraces intersects lineCD\n findRoad = True\n fittestRid = road.id\n accessPoint = tempAccessPoint\n break\n\n if findRoad:\n return fittestRid, accessPoint\n\n else:\n # which means findRoad == False\n # we use the middle point of the roads[0] as access point\n terraceList[obj.id].projectType = 'special'\n road = roads[0]\n reference = road.geom.length * 0.5\n accessPoint = road.geom.interpolate(reference)\n fittestRid = road.id\n return fittestRid, accessPoint", "def move_objects(self):\n\n def get_new_obj_pose(curr_pos, curr_quat):\n angular_disp = 0.0\n delta_alpha = np.random.uniform(-angular_disp, angular_disp)\n delta_rot = Quaternion(axis=(0.0, 0.0, 1.0), radians=delta_alpha)\n curr_quat = Quaternion(curr_quat)\n newquat = delta_rot * curr_quat\n\n pos_ok = False\n while not pos_ok:\n const_dist = True\n if const_dist:\n alpha = np.random.uniform(-np.pi, np.pi, 1)\n d = 0.25\n delta_pos = np.array([d * np.cos(alpha), d * np.sin(alpha), 0.])\n else:\n pos_disp = 0.1\n delta_pos = np.concatenate([np.random.uniform(-pos_disp, pos_disp, 2), np.zeros([1])])\n newpos = curr_pos + delta_pos\n lift_object = False\n if lift_object:\n newpos[2] = 0.15\n if np.any(newpos[:2] > high_bound[:2]) or np.any(newpos[:2] < low_bound[:2]):\n pos_ok = False\n else:\n pos_ok = True\n\n return newpos, newquat\n\n for i in range(self.num_objects):\n curr_pos = self.sim.data.qpos[self._n_joints + i * 7: self._n_joints + 3 + i * 7]\n curr_quat = self.sim.data.qpos[self._n_joints + 3 + i * 7: self._n_joints + 7 + i * 7]\n obji_xyz, obji_quat = get_new_obj_pose(curr_pos, curr_quat)\n self.sim.data.qpos[self._n_joints + i * 7: self._n_joints + 3 + i * 7] = obji_xyz\n self.sim.data.qpos[self._n_joints + 3 + i * 7: self._n_joints + 7 + i * 7] = obji_quat.elements\n\n sim_state = self.sim.get_state()\n # sim_state.qpos[:] = sim_state.qpos\n sim_state.qvel[:] = np.zeros_like(sim_state.qvel)\n self.sim.set_state(sim_state)\n self.sim.forward()", "def set_random_pos(self, which):\n available = [[r, c] for r, row in enumerate(self.maze)\n for c, value in enumerate(row) if value == ' ']\n choice = random.choice(available)\n if which == 'starting':\n self.current_pos = choice\n elif which == 'finishing':\n self.finish_pos = choice", "def move_point_wline(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(1,max(1,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def position_object(level, object_string, placeable_positions=None, ideal_position=None):\n if ideal_position:\n if level[ideal_position] == EMPTY:\n level[ideal_position] = object_string\n return\n\n if placeable_positions == set([]):\n raise ValueError(f\"There are no placeable positions for object {object_string} in {level}\")\n\n if placeable_positions is None:\n placeable_positions = get_placeable_positions(level)\n if not placeable_positions:\n raise ValueError(f\"The level has no placeable positions for the object {object_string}: {level}\")\n\n obj_position = random.choice(list(placeable_positions))\n placeable_positions.remove(obj_position)\n level[obj_position] = object_string", "def mutate_point_rect(mutated_genome):\n seed = random.randint(0,1)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_rect(mutated_genome,index)\n else: #seed == 1:\n shift_point_rect(mutated_genome,index)", "def MoveRandom(self):\n r = random.randint(0,3)\n if r == 0: self.x += 1\n elif r == 1: self.y += 1\n elif r == 2: self.x -= 1\n elif r == 3: self.y -= 1", "def computeOffsets_guided(\n self, coatPath: str, pantsPath: str, tarPara: list, \n subObj: Mesh = None, subTex: str = None, is_hres: bool = True):\n \n smpl = self.hresSMPL.copy() if is_hres else self.stdSMPL.copy()\n splt = self.num_separation[ 'hres' if is_hres else 'std' ]\n \n ## per-vertex offsets\n v_offsets_t = np.zeros_like(smpl.r)\n \n ## Pants\n offset_pants_t = compute_offset_tPose(\n smpl, pantsPath, self.thresholds['pants'], splt, self.verbose_on\n )\n mask = np.linalg.norm(offset_pants_t, axis=1) > np.linalg.norm(v_offsets_t, axis=1)\n v_offsets_t[mask] = offset_pants_t[mask]\n \n ## coat\n # None for sub84 and others subs without coat in their folder\n if coatPath is not None: \n offset_coat_t = compute_offset_tPose(\n smpl, coatPath, self.thresholds['coats'], splt, self.verbose_on\n )\n mask = np.linalg.norm(offset_coat_t, axis=1) > np.linalg.norm(v_offsets_t, axis=1)\n v_offsets_t[mask] = offset_coat_t[mask]\n \n ## Dress body\n if self.verbose_on and subObj is not None:\n print('show mesh in self.computeOffsets_guided().')\n smpl = smplFromParas(smpl, v_offsets_t, tarPara[0], tarPara[1], tarPara[2])\n dressed_body = Mesh(smpl.r, smpl.f)\n dressed_body.vt = subObj.vt\n dressed_body.ft = subObj.ft\n dressed_body.set_texture_image(subTex)\n \n mvs = MeshViewers((1, 1))\n mvs[0][0].set_static_meshes([dressed_body])\n \n return v_offsets_t", "def quickMirror(objArray=None, upVector=[0,0,1], axis='X'):\n if objArray is None:\n objArray=pm.ls(sl=1)\n for obj in objArray:\n nSplit=libName.nameSplit(obj)\n if nSplit[-1][0] == 'L':\n nSplit[-1][0]='R'\n elif nSplit[-1][0] == 'R':\n nSplit[-1][0]='L'\n else:\n print 'obj \"%s\" has been skipped cause prefix is neither \"L\" nor \"R\"'\n break\n\n mirrorObj=libName.nameRevertOriginal(splitName=nSplit)\n if pm.objExists(mirrorObj) == 0:\n print 'obj %s doesnt Exists. Mirrorring Skipped!!!!'%(mirrorObj)\n\n else:\n loc=pm.spaceLocator(n=obj+'_tmpLocQuickMirror')\n locUp=pm.spaceLocator(n=obj+'_tmpLocQuickMirrorAim')\n locAim=pm.spaceLocator(n=obj+'_tmpLocQuickMirrorUp')\n mloc=pm.spaceLocator(n=obj+'_tmpLocQuickMirrorMirror')\n\n snap(driver=obj, driven=loc)\n snap(driver=obj, driven=mloc)\n pm.parent(locUp, locAim, loc)\n locAim.attr('t').set([1,0,0])\n locUp.attr('t').set(upVector)\n grpIn('mirrorGrpTmp', loc)\n\n pm.setAttr('mirrorGrpTmp.scale'+axis, -1)\n\n mloc.attr('translate'+axis).set( mloc.attr('translate'+axis).get() * -1 )\n\n aimCon=pm.aimConstraint(locAim, mloc, aimVector=[1,0,0], upVector=upVector, worldUpObject=locUp, worldUpType='object', mo=0)\n snap(driver=mloc, driven=mirrorObj)\n\n pm.delete('mirrorGrpTmp', mloc)", "def move_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5", "def rs_edit_upd(obj):\n verts = [x.co for x in obj.data.vertices]\n if verts[0] != Vector():\n fix = Vector(verts[0])\n for i in range(len(verts)):\n obj.data.vertices[i].co = obj.data.vertices[i].co - fix\n\n obj.data.update()\n obj.location += fix\n verts[1] = (verts[2] + verts[0]) / 2\n verts[3] = (verts[2] + verts[4]) / 2\n verts[5] = (verts[4] + verts[6]) / 2\n verts[7] = (verts[6] + verts[0]) / 2\n for i in range(len(verts)):\n obj.data.vertices[i].co = verts[i]\n\n obj.data.update()", "def reset_object_pose(self):\n roll = np.random.rand() * np.pi * 2\n pitch = np.random.rand() * np.pi * 2\n yaw = np.random.rand() * np.pi * 2\n pybullet.resetBasePositionAndOrientation(\n self.object_id,\n [0, 0, 0],\n pybullet.getQuaternionFromEuler([roll, pitch, yaw]))\n pos, rot = pybullet.getBasePositionAndOrientation(self.object_id)\n self.object_coords = coordinates.Coordinates(\n pos=pos, rot=coordinates.math.xyzw2wxyz(rot))", "def grasp_planning(object, object_pose1_world, object_pose2_world,\n palm_pose_l_object, palm_pose_r_object, N=200, init=True):\n primitive_name = 'grasping'\n # 0. get initial palm poses in world frame\n palm_poses_initial_world = planning_helper.palm_poses_from_object(\n object_pose=object_pose1_world,\n palm_pose_l_object=palm_pose_l_object,\n palm_pose_r_object=palm_pose_r_object)\n\n grasp_width = planning_helper.grasp_width_from_palm_poses(\n palm_pose_l_object, palm_pose_r_object)\n # grasp_width = grasp_width/1.025\n # grasp_width = 0.086 - 0.006\n # grasp_width = 0.086\n # grasp_width = 0.05206\n # grasp_width = 0.135\n # print(\"grasp width: \" + str(grasp_width))\n\n # 1. get lifted object poses\n object_pose_lifted_world = copy.deepcopy(object_pose1_world)\n\n if init:\n object_pose_lifted_world.pose.position.z += 0.05\n # object_pose_lifted_world.pose.position.z += 0.05\n object_pose2_world.pose.position.z += 0.0025\n\n # 2. get lifted palm poses\n palm_poses_lifted_world = planning_helper.palm_poses_from_object(\n object_pose=object_pose_lifted_world,\n palm_pose_l_object=palm_pose_l_object,\n palm_pose_r_object=palm_pose_r_object)\n\n # 3. get rotated object pose\n object_pose_rotated_world = copy.deepcopy(object_pose2_world)\n\n # if init:\n # object_pose_rotated_world.pose.position.z += 0.05\n object_pose_rotated_world.pose.position.z += 0.05\n\n palm_poses_rotated_world = planning_helper.palm_poses_from_object(\n object_pose=object_pose_rotated_world,\n palm_pose_l_object=palm_pose_l_object,\n palm_pose_r_object=palm_pose_r_object)\n\n # 4. get final configuration\n palm_poses_final_world = planning_helper.palm_poses_from_object(\n object_pose=object_pose2_world,\n palm_pose_l_object=palm_pose_l_object,\n palm_pose_r_object=palm_pose_r_object)\n\n # 3. generate pose plans\n # 3.1. initialize plan\n initial_plan = planning_helper.initialize_plan(\n palm_poses_initial=palm_poses_initial_world,\n object_pose_initial=object_pose1_world,\n primitive=primitive_name,\n plan_name='initial_config')\n\n # 3.2. lift the object\n lift_plan = planning_helper.move_cart_synchro(\n palm_poses_final=palm_poses_lifted_world,\n grasp_width=grasp_width,\n plan_previous=initial_plan,\n primitive=primitive_name,\n plan_name='lift_object',\n N=10)\n\n # 3.3. rotate the object\n rotate_plan = planning_helper.move_cart_synchro(\n palm_poses_final=palm_poses_rotated_world,\n grasp_width=grasp_width,\n plan_previous=lift_plan,\n primitive=primitive_name,\n plan_name='rotate_object_final',\n N=N/2,\n is_replan=True)\n\n # 3.4. place the object\n place_plan = planning_helper.move_cart_synchro(\n palm_poses_final=palm_poses_final_world,\n grasp_width=grasp_width,\n plan_previous=rotate_plan,\n primitive=primitive_name,\n plan_name='place_object',\n N=20)\n return [lift_plan] + [rotate_plan] + [place_plan]", "def random_pos(self, ):\n self.pos_item['needle'] = self.shuffle_pos()\n self.pos_item['ether'] = self.shuffle_pos()\n self.pos_item['tube'] = self.shuffle_pos()", "def randLoc(this):\n from temp_aber import randperc, trapch\n\n if randperc() > 50:\n this.locId = -5\n else:\n this.locId = -183\n\n trapch(this.locId)", "def mutate_point_wline(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point_wline(mutated_genome,index)\n elif seed == 1:\n remove_point_wline(mutated_genome,index)\n elif seed == 2:\n switch_points_wline(mutated_genome,index)\n elif seed == 3:\n shuffle_points_wline(mutated_genome,index)\n elif seed == 4:\n move_point_wline(mutated_genome,index)\n elif seed == 5:\n shift_point_wline(mutated_genome,index)\n elif seed == 6:\n increment_point_wline(mutated_genome,index)\n else: #seed == 7:\n decrement_point_wline(mutated_genome,index)", "def choose_starting_points(self, agent):\n # Left Side\n if agent % 4 == 1:\n if self.left_side[\"x_max\"] != self.left_side[\"x_min\"]:\n x = (self.left_side[\"x_max\"] + self.left_side[\"x_min\"])/2\n else:\n x = self.left_side[\"x_max\"]\n if self.left_side[\"y_max\"] != self.left_side[\"y_min\"]:\n y = (self.left_side[\"y_max\"] + self.left_side[\"y_min\"])/2\n else:\n y = self.left_side[\"y_max\"]\n # Right Side\n elif agent % 4 == 2:\n if self.right_side[\"x_max\"] != self.right_side[\"x_min\"]:\n x = (self.right_side[\"x_max\"] + self.right_side[\"x_min\"])/2\n else:\n x = self.right_side[\"x_max\"]\n if self.right_side[\"y_max\"] != self.right_side[\"y_min\"]:\n y = (self.right_side[\"y_max\"] + self.right_side[\"y_min\"])/2\n else:\n y = self.right_side[\"y_max\"]\n # Top\n elif agent % 4 == 3:\n if self.top[\"x_max\"] != self.top[\"x_min\"]:\n x = (self.top[\"x_max\"] + self.top[\"x_min\"])/2\n else:\n x = self.top[\"x_max\"]\n if self.top[\"y_max\"] != self.top[\"y_min\"]:\n y = (self.top[\"y_max\"] + self.top[\"y_min\"])/2\n else:\n y = self.top[\"y_max\"]\n # Bottom\n elif agent % 4 == 0:\n if self.bottom[\"x_max\"] != self.bottom[\"x_min\"]:\n x = (self.bottom[\"x_max\"] + self.bottom[\"x_min\"])/2\n else:\n x = self.bottom[\"x_max\"]\n if self.bottom[\"y_max\"] != self.bottom[\"y_min\"]:\n y = (self.bottom[\"y_max\"] + self.bottom[\"y_min\"])/2\n else:\n y = self.bottom[\"y_max\"]\n else:\n raise ValueError(\"Invalid number for sides!\")\n\n return x, y" ]
[ "0.72296953", "0.55692446", "0.5526078", "0.5247486", "0.52031153", "0.51109755", "0.509799", "0.50979686", "0.5030604", "0.49457812", "0.49372828", "0.49313664", "0.49087912", "0.48839825", "0.48837227", "0.48356238", "0.4801894", "0.47983634", "0.47845507", "0.47785735", "0.47656557", "0.4760809", "0.47601318", "0.47535637", "0.4712573", "0.47021112", "0.46929336", "0.46874505", "0.46708715", "0.46707565" ]
0.82932264
0
Center the position of an object `obj` along a linear guide path `guide`, and randomize its distance on the axis perpendicular to that guide.
def randomize_distance(obj, guide, scale_bounds=(-2, 0)): p1, p2 = get_guide_endpoints(guide) midpoint = p1 / 2 + p2 / 2 # Get vector perpendicular to the guide. diff_rot = Matrix.Rotation(math.pi / 2, 3, 'Z') * (p2 - p1) scale_factor = scale_bounds[0] + random.random() * (scale_bounds[1] - scale_bounds[0]) target_point = midpoint + scale_factor * diff_rot obj.location[0] = target_point[0] obj.location[1] = target_point[1] return scale_factor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randomize_position(obj, guide):\n p1, p2 = get_guide_endpoints(guide)\n t = random.random()\n target_point = p1 + t * (p2 - p1)\n\n # update X and Y coordinates.\n obj.location[0] = target_point[0]\n obj.location[1] = target_point[1]\n\n return t", "def center(self, obj):\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.", "def center(self, obj):\n return self.phy2abs.center(obj)", "def object_center(self, goal):\n self.depth_I = 0.\n def rc_to_obj(self, goal):\n \"\"\"Add constant rc commands to depth and heading hold\"\"\"\n channels = self.depth_heading_rc(goal)\n\n # move to object\n yrc_cmd = self.get_obj_y(goal, False)\n zrc_cmd = self.get_obj_z(goal)\n\n # an integrated controller\n self.depth_I += (zrc_cmd - self.pwm_center) * 0.1 # 1/10 Hz\n # limit depth integral\n if abs(self.depth_I) > self.depth_Imax:\n if self.depth_I < 0:\n self.depth_I = -self.depth_Imax\n else:\n self.depth_I = self.depth_Imax\n\n channels[self.xchannel] = goal.x_rc_vel\n channels[self.ychannel] = yrc_cmd\n channels[self.zchannel] += self.depth_I\n hout = self.get_heading_pwm(goal)\n return channels\n\n def is_term(self, goal):\n \"\"\"terminate when the gate is a certain size\"\"\"\n return self.object_width > self.maxwidth\n\n self.behavior_loop(goal, rc_to_obj, is_term)", "def point_at(obj, target, roll=0):\n\tif not isinstance(target, mathutils.Vector):\n\t\ttarget = mathutils.Vector(target)\n\tloc = obj.location\n\t# direction points from the object to the target\n\tdirection = target - loc\n\n\tquat = direction.to_track_quat('-Z', 'Y')\n\n\t# /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py\n\tquat = quat.to_matrix().to_4x4()\n\trollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')\n\n\t# remember the current location, since assigning to obj.matrix_world changes it\n\tloc = loc.to_tuple()\n\tobj.matrix_world = quat * rollMatrix\n\tobj.location = loc", "def point_at(obj, target, roll=0):\n if not isinstance(target, mathutils.Vector):\n target = mathutils.Vector(target)\n loc = obj.location\n # direction points from the object to the target\n direction = target - loc\n\n quat = direction.to_track_quat('-Z', 'Y')\n\n # /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py\n quat = quat.to_matrix().to_4x4()\n rollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')\n\n # remember the current location, since assigning to obj.matrix_world changes it\n loc = loc.to_tuple()\n obj.matrix_world = quat * rollMatrix\n obj.location = loc", "def move_center(obj):\n desktop = QApplication.desktop()\n dw = desktop.width()\n dh = desktop.height()\n size = obj.size()\n mw = size.width()\n mh = size.height()\n obj.move(dw/2-mw/2, dh/2-mh/2)", "def fill(obj, prob = 1, collide_obj = None, collide_callback = None) :\n for x in range(int(Globals.instance.WIDTH/Globals.instance.GRID_SIZE)):\n for y in range(int(Globals.instance.HEIGHT/Globals.instance.GRID_SIZE)):\n if random.uniform(0, 1) > prob:\n continue\n if at((x,y)) is None:\n o = obj(pos=(x,y))\n if collide_obj and collide_callback:\n if isinstance(collide_obj, (list, tuple)):\n for obj in collide_obj:\n o.collides(obj, collide_callback)\n else:\n o.collides(collide_obj, collide_callback)", "def _object_kinematics_params(self):\n obj_length, obj_width = self._obj_dims\n # Initial object position w.r.t its center\n obj_coords = np.matmul( # (2, 5) array of x-y coords of five points\n np.array([ # rotational matrix\n [np.cos(self._theta_init), np.sin(self._theta_init)],\n [-np.sin(self._theta_init), np.cos(self._theta_init)]\n ]),\n 0.5 * np.array([ # relative postion matrix\n [0, obj_length, obj_length, -obj_length, -obj_length],\n [0, obj_width, -obj_width, -obj_width, obj_width]\n ])\n )\n feat_vec_desired = obj_coords * self._fz_ratio\n\n # Global initial object position\n obj_coords += np.array([[self._x_obj_0], [self._y_obj_0]])\n speed = np.array([\n [(self._x_obj_f - self._x_obj_0) / self._t_sim],\n [(self._y_obj_f - self._y_obj_0) / self._t_sim]\n ])\n rot_speed = (self._theta_final - self._theta_init) / self._t_sim\n return obj_coords, speed, rot_speed, feat_vec_desired", "def computeOffsets_guided(\n self, coatPath: str, pantsPath: str, tarPara: list, \n subObj: Mesh = None, subTex: str = None, is_hres: bool = True):\n \n smpl = self.hresSMPL.copy() if is_hres else self.stdSMPL.copy()\n splt = self.num_separation[ 'hres' if is_hres else 'std' ]\n \n ## per-vertex offsets\n v_offsets_t = np.zeros_like(smpl.r)\n \n ## Pants\n offset_pants_t = compute_offset_tPose(\n smpl, pantsPath, self.thresholds['pants'], splt, self.verbose_on\n )\n mask = np.linalg.norm(offset_pants_t, axis=1) > np.linalg.norm(v_offsets_t, axis=1)\n v_offsets_t[mask] = offset_pants_t[mask]\n \n ## coat\n # None for sub84 and others subs without coat in their folder\n if coatPath is not None: \n offset_coat_t = compute_offset_tPose(\n smpl, coatPath, self.thresholds['coats'], splt, self.verbose_on\n )\n mask = np.linalg.norm(offset_coat_t, axis=1) > np.linalg.norm(v_offsets_t, axis=1)\n v_offsets_t[mask] = offset_coat_t[mask]\n \n ## Dress body\n if self.verbose_on and subObj is not None:\n print('show mesh in self.computeOffsets_guided().')\n smpl = smplFromParas(smpl, v_offsets_t, tarPara[0], tarPara[1], tarPara[2])\n dressed_body = Mesh(smpl.r, smpl.f)\n dressed_body.vt = subObj.vt\n dressed_body.ft = subObj.ft\n dressed_body.set_texture_image(subTex)\n \n mvs = MeshViewers((1, 1))\n mvs[0][0].set_static_meshes([dressed_body])\n \n return v_offsets_t", "def set_object_in_front_of_agent(sim, obj_id, z_offset=-1.5):\n agent_transform = sim.agents[0].scene_node.transformation_matrix()\n obj_translation = agent_transform.transform_point(\n np.array([0, 0, z_offset])\n )\n sim.set_translation(obj_translation, obj_id)\n\n obj_node = sim.get_object_scene_node(obj_id)\n xform_bb = habitat_sim.geo.get_transformed_bb(\n obj_node.cumulative_bb, obj_node.transformation\n )\n\n # also account for collision margin of the scene\n scene_collision_margin = 0.04\n y_translation = mn.Vector3(\n 0, xform_bb.size_y() / 2.0 + scene_collision_margin, 0\n )\n sim.set_translation(y_translation + sim.get_translation(obj_id), obj_id)", "def random_pose_next_to_goal(self, goal_pose, spat_rel, env):\n goal_pose = utils.xyz_to_pix(goal_pose[0], self.bounds, self.pix_size)\n obj_size = (0.04, 0.04, 0.04)\n erode_size = self.get_erode_size(obj_size)\n\n _, hmap, obj_mask = self.get_true_image(env)\n free = self.compute_free_space(env, obj_mask)\n\n # Find valid pose\n compute_angle_wrt_goal = functools.partial(\n compute_angle, x2=goal_pose[0], y2=goal_pose[1])\n\n def compute_dist(i, j):\n dist = np.sqrt((goal_pose[0] - i)**2 + (j - goal_pose[1])**2)\n return dist\n\n angle_from_goal = np.fromfunction(compute_angle_wrt_goal, free.shape)\n dist_from_goal = np.fromfunction(compute_dist, free.shape)\n is_valid_dist = np.vectorize(lambda x: x < erode_size * 2)\n is_valid = self.find_valid_region(spat_rel)\n\n # For each occupied region, expand the region a little bit more to avoid\n # placing objects too close by.\n free = cv2.erode(free, np.ones((erode_size, erode_size), np.uint8))\n free[~is_valid(angle_from_goal)] = 0\n free[~is_valid_dist(dist_from_goal)] = 0\n (\n free[0:erode_size, :],\n free[:, 0:erode_size],\n free[-erode_size:, :],\n free[:, -erode_size:],\n ) = (0, 0, 0, 0)\n\n if np.sum(free) == 0:\n print(\"There is no free space!!\")\n return None, None\n\n pos, rot = self.sample_pos_in_free_space(free, hmap, obj_size)\n return pos, rot", "def force_set(self, pos):\n self.rect.center = pos", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def guide_at(self, pos):\n rose = self._rose\n pos = rose.mapFromGlobal(pos)\n return rose.guideAt(pos)", "def distance_to(self, obj):\n\t\tx, y = self.position\n\t\tobj_x, obj_y = obj.position\n\t\treturn hypot(x - obj_x, y - obj_y)", "def reorient_obj(obj, step_ang, plane):\n start_angle = 0\n end_angle = math.pi / 2\n min_area = math.inf\n best_angle = 0\n start_axis = array.array(\"d\", obj.Centroid)\n end_axis = []\n index = [0] * 3\n\n if plane == \"xy\":\n index = [1, 1, 0]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1], obj.Centroid[2] + 1])\n elif plane == \"xz\":\n index = [1, 0, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1] + 1, obj.Centroid[2]])\n elif plane == \"yz\":\n index = [0, 1, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0] + 1, obj.Centroid[1], obj.Centroid[2]])\n\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n while start_angle <= end_angle:\n obj.Rotate3D(start_axis, end_axis, step_ang)\n # compute the area\n dims = [(max_pt[0] - min_pt[0]), (max_pt[1] - min_pt[1]), (max_pt[2] - min_pt[2])]\n curr_area = 1\n for dim in dims:\n if dim > 0:\n curr_area *= dim\n if curr_area < min_area:\n min_area = curr_area\n best_angle = start_angle\n start_angle += step_ang\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n # rotate the object using the best angle\n obj.Rotate3D(start_axis, end_axis, best_angle)", "def move(self):\n possible_steps = self.model.grid.get_neighborhood(\n self.pos,\n moore=False, # implements Von Neumann neighborhood\n include_center=False)\n new_position = self.random.choice(possible_steps)\n self.heading = [new_position[0] - self.pos[0],\n new_position[1] - self.pos[1]]\n self.model.grid.move_agent(self, new_position)", "def autoMove(self) :\n\n\t\tdx = Places.getLoc(self.targetPlace)[0] - self.avatarNP.getX()\n\t\tdy = Places.getLoc(self.targetPlace)[1] - self.avatarNP.getY()\n\t\tdist = math.sqrt(dx*dx + dy*dy)\n\t\th0 = self.avatarNP.getH()\n\t\tif dist < 4 :\n\t\t\t# pick new target and determine deltaH\n\t\t\tnbors = Places.getNeighbors(self.targetPlace)\n\t\t\tx = random.randint(0,len(nbors)-1)\n\t\t\tif nbors[x] == self.oldPlace :\n\t\t\t\tx = (1 if x == 0 else x-1)\n\t\t\tt = nbors[x]\n\t\t\th = self.heading(\n\t\t\t\tself.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\tPlaces.getLoc(t)[0], Places.getLoc(t)[1])\n\t\t\tself.deltaH = h - h0\n\t\t\tif self.deltaH > 180 : self.deltaH -= 360\n\t\t\telif self.deltaH < -180 : self.deltaH += 360\n\t\t\tself.deltaH /= 2\n\t\t\tself.oldPlace = self.targetPlace\n\t\t\tself.targetPlace = t\n\t\t\tself.turning = True\n\n\t\t# adjust heading and position\n\t\tt = self.targetPlace\n\t\th = self.heading(self.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\t Places.getLoc(t)[0], Places.getLoc(t)[1])\n\t\tdh1 = h - h0\n\t\tif dh1 > 180 : dh1 -= 360\n\t\telif dh1 < -180 : dh1 += 360\n\t\tif self.turning :\n\t\t\tdh2 = self.deltaH * globalClock.getDt()\n\t\t\tif math.fabs(dh1) <= math.fabs(dh2) : \n\t\t\t\tself.turning = False\n\t\t\telse :\n\t\t\t\th = h0 + dh2\n\t\tself.avatarNP.setH(h)\n\t\tself.avatarNP.setFluidY(self.avatarNP,-2 * globalClock.getDt())\n\t\t\n\t\treturn\n\n\t\t\"\"\"\n\t\tif self.rotateDir == -1:\n\t\t\tself.rotateDir = random.randint(1,25) #chances to rotate\n\t\tif self.rotateDuration == -1:\n\t\t\tself.rotateDuration = random.randint(200,400)\n\n\t\t# guide the moving direction of the bot\n\t\tif self.rotateDir <= 3 : # turn left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 40 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir <= 6 : # turn right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 50 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 7 : # turn big left\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() + \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telif self.rotateDir == 8 : # turn big right\n\t\t\tself.avatarNP.setH(self.avatarNP.getH() - \\\n\t\t\t\t\t 102 * globalClock.getDt())\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\telse :\n\t\t\tself.rotateDuration -= 1\n\t\t\tif self.rotateDuration <= 0:\n\t\t\t\tself.rotateDuration = -1\n\t\t\t\tself.rotateDir = -1\n\t\t\tself.avatarNP.setFluidPos(self.avatarNP, 0,\n\t\t\t\t\t-1 * globalClock.getDt(),\n\t\t\t\t\tself.avatarNP.getZ() )\n\t\t# moving forward\n\t\t#self.avatarNP.setFluidPos(self.avatarNP, 0,\n\t#\t\t\t\t-1 * globalClock.getDt(),\n\t#\t\t\t\tself.avatarNP.getZ() )\n\t\treturn\n\t\t\"\"\"", "def create_fixed_object(self):\n self.obj = self.img[self.y-self.rad:self.y+self.rad,\n self.x-self.rad:self.x+self.rad]", "def center(self, anchors):\n # self.anchors_ = boxes[np.random.choice(n, self.k, replace=True)]\n if isinstance(anchors, list):\n anchors = np.array(anchors)\n self.anchors_ = anchors", "def unitySpawn(objID, prefab, pos, yaw, scale=1):\n\n if prefab == \"wall\":\n scaling = [mv.WALL_WIDTH, scale, mv.WALL_HEIGHT]\n else:\n scaling = [scale, scale, scale]\n hsc.write(hsc.makeID(objID) + \" = Instantiate(\" + prefab + \",\" + hsc.vf(pos) + \",\" + hsc.qf(yaw) + \");\")\n hsc.write(hsc.makeID(objID) + \".transform.localScale = \" + hsc.vf(scaling) + \";\")\n if objID > hsc.maxID[0]:\n hsc.maxID[0] = objID", "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n num_joints = options.get('numberJoints')\n single_joint = options.get('singleJoint')\n pickWalk_parent = options.get('pickWalkParent')\n\n num_joints += 1\n if single_joint:\n num_joints = 1\n\n # Builde joints\n if single_joint:\n jnt_zero, plc, jnt = self.guide_joint(constraint_type='parent')\n zero, ctrl = self.guide_ctrl(shape='circle', color='light_blue', driver=jnt, axis='X')\n ctrls = [ctrl]\n zeros = [zero]\n\n else:\n jnt_zeros, plcs, jnts = self.guide_joint_chain('', num_joints=num_joints)\n zeros, ctrls = [], []\n for i, jnt in enumerate(jnts[:-1]):\n letter = utils.letters[i]\n zero, ctrl = self.guide_ctrl(name=letter, shape='circle',\n color='light_blue', driver=jnt, axis='X')\n zeros.append(zero)\n ctrls.append(ctrl)\n\n mc.xform(zeros, jnt_zeros, r=1, t=[-1*self.mirror_value, 0, 0])\n\n # lock stuff\n pivots = [mc.listRelatives(c, p=1)[0] for c in ctrls]\n utils.set_attrs(zeros, l=1, k=0)\n utils.set_attrs(pivots, 't s', l=1, k=0)\n\n mc.setAttr(self.guide_master+'.offsetTranslateX', -0.5*self.mirror_value)\n\n # This finalizes your guide.\n self.finalize_guide()", "def quickMirror(objArray=None, upVector=[0,0,1], axis='X'):\n if objArray is None:\n objArray=pm.ls(sl=1)\n for obj in objArray:\n nSplit=libName.nameSplit(obj)\n if nSplit[-1][0] == 'L':\n nSplit[-1][0]='R'\n elif nSplit[-1][0] == 'R':\n nSplit[-1][0]='L'\n else:\n print 'obj \"%s\" has been skipped cause prefix is neither \"L\" nor \"R\"'\n break\n\n mirrorObj=libName.nameRevertOriginal(splitName=nSplit)\n if pm.objExists(mirrorObj) == 0:\n print 'obj %s doesnt Exists. Mirrorring Skipped!!!!'%(mirrorObj)\n\n else:\n loc=pm.spaceLocator(n=obj+'_tmpLocQuickMirror')\n locUp=pm.spaceLocator(n=obj+'_tmpLocQuickMirrorAim')\n locAim=pm.spaceLocator(n=obj+'_tmpLocQuickMirrorUp')\n mloc=pm.spaceLocator(n=obj+'_tmpLocQuickMirrorMirror')\n\n snap(driver=obj, driven=loc)\n snap(driver=obj, driven=mloc)\n pm.parent(locUp, locAim, loc)\n locAim.attr('t').set([1,0,0])\n locUp.attr('t').set(upVector)\n grpIn('mirrorGrpTmp', loc)\n\n pm.setAttr('mirrorGrpTmp.scale'+axis, -1)\n\n mloc.attr('translate'+axis).set( mloc.attr('translate'+axis).get() * -1 )\n\n aimCon=pm.aimConstraint(locAim, mloc, aimVector=[1,0,0], upVector=upVector, worldUpObject=locUp, worldUpType='object', mo=0)\n snap(driver=mloc, driven=mirrorObj)\n\n pm.delete('mirrorGrpTmp', mloc)", "def orbit_eliptic(center, obj):\n obj.distance += obj.d_dist*obj.d_dist_dir\n\n if obj.distance < obj.min_dist:\n obj.d_dist_dir = 1\n\n elif obj.distance > obj.max_dist:\n obj.d_dist_dir = -1\n\n orbit_rotate(center, obj, obj.d_ang, obj.distance, obj.orbit_ang)", "def set_pub_robot_pose(self, x, y, yaw):\r\n self.publisher_robot.set_pose_by_center(x, y, yaw)", "def _set_anchor_center(img):\n img.anchor_x = int(img.width / 2)\n img.anchor_y = int(img.height / 2)", "def center_on_spawn(self):\n self.center_on(*self.world.metadata['playerStart'])", "def distance_to_static_object(self, object_type: str, object_points: np.ndarray, point: np.ndarray) -> np.ndarray:\n \n if object_type == 'boxAIK':\n # [\"tfl\", \"tfr\", \"tbl\", \"tbr\", \"bfl\", \"bfr\", \"bbl\", \"bbr\"]\n box3D = self.create_box(object_points[0], object_points[1], object_points[2])\n\n # Focusing on the botom front left corner of the cube we will obtain the local coordinate system\n x_vector = (box3D[5] - box3D[4]) # bfr - bfl\n y_vector = (box3D[6] - box3D[4]) # tfl - bfl\n z_vector = (box3D[0] - box3D[4]) # bbl - bfl\n\n x_local = x_vector / np.linalg.norm(x_vector)\n y_local = y_vector / np.linalg.norm(y_vector)\n z_local = z_vector / np.linalg.norm(z_vector)\n\n # Now we have to find the rotation to align our local coordinate system with the world coordinate system\n rotation, _ = R.align_vectors([[1,0,0],[0,1,0],[0,0,1]], [x_local, y_local, z_local])\n\n # Now we can apply the rotation to the box coordinates and to the point\n box3D_a = rotation.apply(box3D)\n point_a = rotation.apply(point)\n\n # Find the limits of the rotated box\n x_array = box3D_a[:,0]\n y_array = box3D_a[:,1]\n z_array = box3D_a[:,2]\n\n min_x = np.min(x_array)\n max_x = np.max(x_array)\n min_y = np.min(y_array)\n max_y = np.max(y_array)\n min_z = np.min(z_array)\n max_z = np.max(z_array)\n \n # First check if the point is inside, to directly return [0,0,0]\n if (point_a[0] > min_x and point_a[0] < max_x) and (point_a[1] > min_y and point_a[1] < max_y) and (point_a[2] > min_z and point_a[2] < max_z):\n return [0,0,0]\n\n # If its not inside, we calculate the closest point within the cube\n closest_point = [0,0,0]\n\n # X coordinate\n if point_a[0] < min_x:\n closest_point[0] = min_x\n elif point_a[0] > max_x:\n closest_point[0] = max_x\n else:\n closest_point[0] = point_a[0]\n\n # Y coordinate\n if point_a[1] < min_y:\n closest_point[1] = min_y\n elif point_a[1] > max_y:\n closest_point[1] = max_y\n else:\n closest_point[1] = point_a[1]\n \n # Z coordinate\n if point_a[2] < min_z:\n closest_point[2] = min_z\n elif point_a[2] > max_z:\n closest_point[2] = max_z\n else:\n closest_point[2] = point_a[2]\n \n # Then return the distance\n distance = (closest_point - point_a)\n return distance\n \n elif object_type == 'cylinderAIK':\n # For the cylinderAIK we have 2 points, top face center and top face radius point\n center_top = object_points[0]\n radius_top = object_points[1]\n \n # Radius of the top face, will be used later\n radius_distance = np.linalg.norm(center_top - radius_top)\n\n # Check if the point is above the cylinder\n if point[2] >= center_top[2]:\n # Check if the point is also inside of the silhouette of the top circle\n center_top_2D = np.asarray([center_top[0], center_top[1]])\n radius_top_2D = np.asarray([radius_top[0], radius_top[1]])\n point_2D = np.asarray([point[0], point[1]])\n\n radius_distance_2D = np.linalg.norm(center_top_2D - radius_top_2D)\n distance_2D = np.linalg.norm(center_top_2D - point_2D)\n\n if distance_2D <= radius_distance_2D:\n # Inside the silhouette. We just need to check the distance to the top face surface\n # Obtain the projection of the point into the surface plane by changing the Z value of the point\n projected_point = np.asarray([point[0], point[1], center_top[2]])\n # Then calculate the distance between the original point and the projected one\n distance = (projected_point - point)\n return distance\n else: \n # Outside the silhouette. We need to find the point in the top surface radius closest to the point\n # Obtain the projection of the point into the surface plane by changing the Y value of the point\n projected_point = np.asarray([point[0], point[1], center_top[2]])\n # Obtain the directional normalized vector between the center of the surface and the projected point\n direction_vector = (projected_point - center_top)\n direction = direction_vector / np.linalg.norm(direction_vector)\n # Multiply the direction by the radius of the surface to obtain the closest point on the edge\n closest_point = center_top + (direction * radius_distance)\n # Now we can just check the distance between the points\n distance = (closest_point - point)\n return distance\n else:\n # Find the cylinder center point at the same height as the outside point\n center_point = np.asarray([center_top[0], center_top[1], point[2]])\n # Obtain the directional normalized vector between the new center of the object and the point\n direction_vector = (point - center_point)\n direction = direction_vector / np.linalg.norm(direction_vector)\n # Multiply the direction by the radius to obtain the edge point of the object closest to the outside point \n closest_point = center_point + (direction * radius_distance)\n # Now we can check the distance between the points\n distance = (closest_point - point)\n return distance", "def room_center(self):\n std_dev = np.std([point.length for point in self.points])\n # rospy.loginfo(std_dev)\n if std_dev < self.ROOM_CENTER_CUTOFF:\n self.get_cmd_vel = self.start_360()\n return self.start_360()\n closest_points = sorted(self.points)[:self.room_center_number_points]\n angles = [point.angle_radians for point in closest_points]\n imaginary_numbers = [np.exp(angle*1j) for angle in angles]\n angle_mean = np.angle(np.mean(imaginary_numbers))\n if angle_mean < 0:\n angle_mean += 2*pi\n\n angle = angle_mean / (2 * pi)\n if angle < 1/2:\n linear_velocity = np.interp(angle, [0, 1/2], [-self.MAX_LINEAR_SPEED, self.MAX_LINEAR_SPEED])\n else:\n linear_velocity = np.interp(angle, [1/2, 1], [self.MAX_LINEAR_SPEED, -self.MAX_LINEAR_SPEED])\n\n if 1/4 < angle < 3/4:\n angular_velocity = np.interp(angle, [1/4, 3/4], [-self.MAX_ANGULAR_SPEED, self.MAX_ANGULAR_SPEED])\n elif 0 <= angle <= 1/4:\n angular_velocity = np.interp(angle, [0, 1/4], [0, self.MAX_ANGULAR_SPEED])\n else:\n angular_velocity = np.interp(angle, [3/4, 1], [-self.MAX_ANGULAR_SPEED, 0])\n\n cmd_vel = Twist()\n cmd_vel.angular.z = angular_velocity\n cmd_vel.linear.x = linear_velocity\n rospy.loginfo(\"wall angle: {:.4f} -> linear: {:.4f}, angular: {:.4f}. std_dev: {:.3f}\".format(angle, linear_velocity, angular_velocity, std_dev))\n return cmd_vel" ]
[ "0.7362369", "0.5656115", "0.5133738", "0.4947574", "0.49140757", "0.4901315", "0.4811237", "0.46052918", "0.4575708", "0.45413876", "0.4496817", "0.44632584", "0.4445101", "0.44152486", "0.44121188", "0.44050354", "0.43854836", "0.43493664", "0.43398598", "0.43357167", "0.43279317", "0.4292639", "0.4286024", "0.42828518", "0.42670223", "0.42499313", "0.4248447", "0.42358574", "0.42037752", "0.41794533" ]
0.75807685
0
Move candidate referents to random positions in the given reference frames. `candidate_setting` is of the form `[(person, guide_path), (person2, guide_path), ...]`
def prepare_scene(data, candidate_setting, randomization_mode): manipulations = defaultdict(dict) for person, guide in candidate_setting.items(): if randomization_mode == "none": # Center the candidate along the guide. p1, p2 = get_guide_endpoints(guide) target = p1 / 2 + p2 / 2 person.location[0] = target[0] person.location[1] = target[1] elif randomization_mode == "20180313": m_pos = randomize_position(person, guide) rotation_bounds = (-math.pi, math.pi) if get_guide_type(guide) == "functional": # Functional frames are very angle-dependent -- we don't expect # them to hold for > 90 deg rotations. They probably won't hold for # even > 45 deg rotations -- but we should check :) rotation_bounds = (-math.pi / 4, math.pi / 4) m_rot = randomize_rotation(person, bounds=rotation_bounds) manipulations[person]["location"] = m_pos manipulations[person]["rotation"] = m_rot elif randomization_mode == "20180410": m_dist = randomize_distance(person, guide) manipulations[person]["distance"] = m_dist person.hide_render = False for person in set(get_candidates(data)) - set(candidate_setting.keys()): person.hide_render = True return dict(manipulations)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def switch_points(mutated_genome,index):\n point_index1 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point_index2 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp", "def randomize_position(obj, guide):\n p1, p2 = get_guide_endpoints(guide)\n t = random.random()\n target_point = p1 + t * (p2 - p1)\n\n # update X and Y coordinates.\n obj.location[0] = target_point[0]\n obj.location[1] = target_point[1]\n\n return t", "def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])", "def move_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def random_reset_mutation(random, candidate, args):\r\n bounder = args['_ec'].bounder\r\n try:\r\n values = bounder.values\r\n except AttributeError:\r\n values = None\r\n if values is not None:\r\n rate = args.setdefault('mutation_rate', 0.1)\r\n mutant = copy.copy(candidate)\r\n for i, m in enumerate(mutant):\r\n if random.random() < rate:\r\n mutant[i] = random.choice(values)\r\n return mutant\r\n else:\r\n return candidate", "def set_candidate(self, roster):\n if len(self.candidate_list) < self.max_participants and len(roster) > 0:\n random.shuffle(roster)\n participant = roster.pop()\n self.candidate_list.append(participant)", "def changeState(self, xyPoints):\n nPts = len(xyPoints)\n ind0 = random.randint(1, nPts-1)\n ind1 = random.randint(1, nPts-1)\n while ind1 == ind0:\n ind1 = random.randint(1, nPts-1)\n # make copy of the sources to make sure the swap works correctly\n xyPoints[ind0], xyPoints[ind1] = tuple(xyPoints[ind1]), tuple(xyPoints[ind0])", "def move_point_rect(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n old_points = list(mutated_genome[index][2])\n old_points[random.randint(0,1)] = point\n mutated_genome[index][2] = tuple(old_points)", "def permutation(self, **kwargs):\n self.locator.recognise_grid()\n red = self.locator.detect_colour(0, 'red')\n rospy.loginfo(\"permutation(): looking for red object: %s\" % str(red))\n blue = self.locator.detect_colour(0, 'blue')\n rospy.loginfo(\"permutation(): looking for blue object: %s\" % str(blue))\n if red[0] < blue[0]:\n sequence = [('red','M'),('blue','A'),('red','D')]\n else:\n sequence = [('red','M'),('blue','D'),('red','A')]\n\n colours = self.locator.tetris_blocks.keys() \n self.target_locations['D']\n self.target_locations['A']\n self.target_locations['M']\n answer = 'n'\n action_number = 0\n while action_number < len(sequence):\n\n (colour,pos) = sequence[action_number]\n \n rospy.loginfo('permutation(): %s to position %s' % (colour,pos))\n\n self.locator.update_pose()\n goal_pose = self.locator.pose[:]\n goal_pose[0:2] = self.target_locations[pos][0:2]\n success = self.locator.locate(colour, goal_pose)\n if not success:\n answer = raw_input('Failed to execute action. Try again? (y/n): ')\n if answer in ('y'):\n action_number -= 1\n continue\n \n action_number += 1", "def mutate_point_circ(mutated_genome):\n seed = random.randint(0,3)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_circ(mutated_genome,index)\n elif seed == 1:\n shift_point_circ(mutated_genome,index)\n elif seed == 2:\n move_radius_circ(mutated_genome,index)\n else: #seed == 3:\n shift_radius_circ(mutated_genome,index)", "def move_point_wline(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(1,max(1,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])", "def set_reference_point(self):\n theta = 2*math.pi-math.radians(self.REFERENCE_ANGLE)\n origin_pos = self.locations[0].get_position()\n target_pos = self.locations[1].get_position()\n v = self.calculate_displacement(origin_pos, target_pos, transpose=True)\n v_norm = math.sqrt(v[0]**2+v[1]**2)\n rotation_matrix = np.matrix([[math.cos(theta), -math.sin(theta)],\n [math.sin(theta), math.cos(theta)]])\n offset_x, offset_y = origin_pos\n reference_point_matrix = np.array([offset_x, offset_y])+np.transpose((1/v_norm)*rotation_matrix*v)\n self.reference_point = reference_point_matrix.tolist()[0]", "def random_pose_next_to_goal(self, goal_pose, spat_rel, env):\n goal_pose = utils.xyz_to_pix(goal_pose[0], self.bounds, self.pix_size)\n obj_size = (0.04, 0.04, 0.04)\n erode_size = self.get_erode_size(obj_size)\n\n _, hmap, obj_mask = self.get_true_image(env)\n free = self.compute_free_space(env, obj_mask)\n\n # Find valid pose\n compute_angle_wrt_goal = functools.partial(\n compute_angle, x2=goal_pose[0], y2=goal_pose[1])\n\n def compute_dist(i, j):\n dist = np.sqrt((goal_pose[0] - i)**2 + (j - goal_pose[1])**2)\n return dist\n\n angle_from_goal = np.fromfunction(compute_angle_wrt_goal, free.shape)\n dist_from_goal = np.fromfunction(compute_dist, free.shape)\n is_valid_dist = np.vectorize(lambda x: x < erode_size * 2)\n is_valid = self.find_valid_region(spat_rel)\n\n # For each occupied region, expand the region a little bit more to avoid\n # placing objects too close by.\n free = cv2.erode(free, np.ones((erode_size, erode_size), np.uint8))\n free[~is_valid(angle_from_goal)] = 0\n free[~is_valid_dist(dist_from_goal)] = 0\n (\n free[0:erode_size, :],\n free[:, 0:erode_size],\n free[-erode_size:, :],\n free[:, -erode_size:],\n ) = (0, 0, 0, 0)\n\n if np.sum(free) == 0:\n print(\"There is no free space!!\")\n return None, None\n\n pos, rot = self.sample_pos_in_free_space(free, hmap, obj_size)\n return pos, rot", "def random_reassignment(graph, possibilities):\n\n random_assignment(graph, possibilities)\n\n violating_nodes = graph.get_violations()\n\n while len(violating_nodes):\n random_reconfigure_nodes(graph, violating_nodes, possibilities)\n\n violating_nodes = graph.get_violations()", "def revert_to_guides():\n\n # Now gather all guides in scene\n all_guides = [g.replace('.partType', '') for g in mc.ls('*.partType')]\n if not all_guides:\n mc.warning('No guides in scene!')\n return\n\n # get all part and build options\n part_types = []\n options = []\n\n for node in all_guides:\n part_types.append(mc.getAttr(node+'.partType'))\n options.append(eval(mc.getAttr(node+'.buildOptions')))\n\n # gather posoiton and control shape information\n zeros = [n.split('.')[0] for n in mc.ls('*.animZeroGrp')]\n ctrls = [n.split('.')[0] for n in mc.ls('*.animControl')]\n jnts = mc.ls('*_JNT', type='joint')\n\n jnt_pos = [utils.decompose_matrix(j) for j in jnts]\n zeros_pos = [utils.decompose_matrix(z) for z in zeros]\n ctrls_pos = [utils.decompose_matrix(c) for c in ctrls]\n shape_data = controlShapes.get_data()\n\n # check to make sure all data exists!\n if not zeros+ctrls+jnts or not shape_data:\n mc.warning('Cannot find nodes in this rig!')\n return\n\n # Scene check\n if not mm.eval('int $rtMelResult = `saveChanges(\"file -f -new\")`;'):\n return\n\n # now rebuild the guides\n for i, part in enumerate(part_types):\n guide.build(part, **options[i])\n\n # place joints\n for i in range(len(jnts))*6:\n\n node = mc.ls(jnts[i]+'_PLC')\n pos = jnt_pos[i]\n\n if not node:\n continue\n\n node = node[0]\n if not mc.getAttr(node+'.rotateOrder', l=1):\n mc.setAttr(node+'.rotateOrder', l=pos[3])\n\n mc.xform(node, ws=1, t=pos[0])\n mc.xform(node, ws=1, ro=pos[1])", "def mutate_point_rect(mutated_genome):\n seed = random.randint(0,1)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_rect(mutated_genome,index)\n else: #seed == 1:\n shift_point_rect(mutated_genome,index)", "def _fledgling_move(self,fledgling,parent_hex):\n options = fledgling.get_move_options(parent_hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options, weights=[x.quality**2 for x in options])\n fledgling.move(target[0])", "def decrement_point(mutated_genome,index):\n point_index1 = random.randint(1,max(0,len(mutated_genome[index][2])-1))\n seed = random.randint(0,2)\n if seed == 0:\n point_index2 = point_index1 - 1\n elif seed == 1:\n point_index2 = random.randint(0, point_index1)\n else: #seed == 2:\n point_index2 = 0\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp", "def move_point_trig(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n old_points = list(mutated_genome[index][2])\n old_points[random.randint(0,2)] = point\n mutated_genome[index][2] = tuple(old_points)", "def specific_reset(self) -> None:\n\n # set agent and goal positions\n self.agent.specific_reset()\n agent_pos = self.agent.init_xyz\n agent_pos[:2] = self.world.generate_random_xyz_position()[:2]\n goal_pos = agent_pos\n while np.linalg.norm(agent_pos[:2]-goal_pos[:2]) < self.world.body_min_distance:\n goal_pos = self.world.generate_random_xyz_position()\n # adjust the height of agent\n # agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n self.goal.set_position(goal_pos)\n self.old_dist = self.get_xy_distance()\n\n # set agent orientation towards goal\n yaw = angle2pos(self.agent.get_position(), self.goal.get_position())\n yaw = self.agent.init_rpy[2] + yaw\n # apply random orientation to agent.\n yaw += np.random.uniform(-np.pi, np.pi)\n quaternion = self.bc.getQuaternionFromEuler([0, 0, yaw])\n self.agent.set_orientation(quaternion)\n\n # reset obstacle positions\n if len(self.obstacles) > 0:\n obs_init_pos = env_utils.generate_obstacles_init_pos(\n num_obstacles=len(self.obstacles),\n agent_pos=self.agent.get_position(),\n goal_pos=self.goal.get_position(),\n world=self.world,\n min_allowed_distance=self.world.body_min_distance,\n agent_obstacle_distance=self.agent_obstacle_distance\n )\n for i in range(len(self.obstacles)):\n self.obstacles[i].set_position(obs_init_pos[i])", "def _move_comp_person(self):\n\n move_tuple = random.choice(self._board.possible())\n self._board[move_tuple] = 'x'", "def switch_chromosomes(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1]\n mutated_genome[index1] = mutated_genome[index2]\n mutated_genome[index2] = temp", "def _migrate(self):\n\t\tchoice_list = [s for s in self.site.neighbors if s != self.site]\n\t\tif len(choice_list) > 0: \n\t\t\tchoosed = numpy.random.choice(choice_list)\n\t\t\tif choosed.resource > self.site.resource:\n\t\t\t\tchoosed.add_agent(self)", "def mutate(self, candidates, mutation_prob=0.2, mutation_type='cull'):\n mutants = []\n for cand in candidates:\n n = cand.enc_path.shape[0]\n mutant = Path()\n if mutation_type == 'cull':\n cull_mask = np.random.random(n) > mutation_prob\n cull_mask[0] = cull_mask[-1] = True # do not cull start or end points\n mutant.enc_path = cand.enc_path[cull_mask]\n mutants.append(mutant)\n elif mutation_type == 'monotone':\n # if np.random.random() < mutation_prob or n < 4:\n ix1 = np.random.randint((n // 2) - 1) + 1\n ix2 = np.random.randint((n // 2) + 1, n - 2)\n x0, y0 = cand.enc_path[ix1]\n x1, y1 = cand.enc_path[ix2]\n l = line(x0, y0, x1, y1)\n coords = np.vstack((l[0], l[1])).T.astype(int)\n coords_mask = np.random.random(coords.shape[0]) > mutation_prob\n coords = coords[coords_mask]\n mutant.enc_path = np.vstack((cand.enc_path[0:ix1], coords, cand.enc_path[ix2:]))\n mutants.append(mutant)\n # else:\n # mutants.append(cand)\n return mutants", "def sequenced_picknplace_plan(assembly_pkg_json_path, solve_method='sparse_ladder_graph', viewer=False, scale=1e-3,\n sample_time=5, sparse_time_out=2, jt_res=0.1, pos_step_size=0.01, ori_step_size=np.pi/16,\n viz_inspect=False, warning_pause=False, save_dir=None, **kwargs):\n # TODO: assert solve method in avaiable list\n\n # * load robot setup data\n (robot_urdf, base_link_name, tool_root_link_name, ee_link_name, ik_joint_names, disabled_self_collision_link_names), \\\n (workspace_urdf, workspace_robot_disabled_link_names) = get_picknplace_robot_data()\n picknplace_end_effector_urdf = get_picknplace_end_effector_urdf()\n picknplace_tcp_def = get_picknplace_tcp_def()\n\n # * create robot and pb environment\n connect(use_gui=viewer)\n\n # * adjust camera pose (optional)\n if has_gui():\n camera_base_pt = (0,0,0)\n camera_pt = np.array(camera_base_pt) + np.array([1, 0, 0.5])\n set_camera_pose(tuple(camera_pt), camera_base_pt)\n\n with HideOutput():\n # * pybullet can handle ROS-package path URDF automatically now (ver 2.5.7)!\n robot = load_pybullet(robot_urdf, fixed_base=True)\n workspace = load_pybullet(workspace_urdf, fixed_base=True)\n # ee_body = create_obj(picknplace_end_effector_urdf)\n ee_body = load_pybullet(picknplace_end_effector_urdf)\n\n # * set robot idle configuration\n ik_joints = joints_from_names(robot, ik_joint_names)\n robot_start_conf = get_robot_init_conf()\n set_joint_positions(robot, ik_joints, robot_start_conf)\n\n # * create tool and tool TCP from flange (tool0) transformation\n root_link = link_from_name(robot, tool_root_link_name)\n # create end effector body\n ee_attach = Attachment(robot, root_link, unit_pose(), ee_body)\n # set up TCP transformation, just a renaming here\n root_from_tcp = picknplace_tcp_def\n if has_gui() :\n # draw_tcp pose\n ee_attach.assign()\n ee_link_pose = get_pose(ee_attach.child)\n draw_pose(multiply(ee_link_pose, root_from_tcp))\n\n # * specify ik fn wrapper\n ik_fn = IK_MODULE.get_ik\n def get_sample_ik_fn(robot, ik_fn, ik_joint_names, base_link_name, tool_from_root=None):\n def sample_ik_fn(world_from_tcp):\n if tool_from_root:\n world_from_tcp = multiply(world_from_tcp, tool_from_root)\n return sample_tool_ik(ik_fn, robot, ik_joint_names, base_link_name, world_from_tcp, get_all=True) #,sampled=[0])\n return sample_ik_fn\n # ik generation function stays the same for all cartesian processes\n sample_ik_fn = get_sample_ik_fn(robot, ik_fn, ik_joint_names, base_link_name, invert(root_from_tcp))\n\n # * load shape & collision data\n with open(assembly_pkg_json_path, 'r') as f:\n json_data = json.loads(f.read())\n assembly = Assembly.from_package(json_data)\n elements = assembly.elements\n for element in elements.values():\n for unit_geo in element.unit_geometries:\n unit_geo.rescale(scale)\n # TODO: scale derived from the assembly package unit\n # static_obstacles = []\n # for ug in assembly.static_obstacle_geometries.values():\n # static_obstacles.extend(ug.mesh)\n\n # * load precomputed sequence / use assigned sequence\n # TODO: load this as function argument\n # element_seq = elements.keys()\n element_seq = [0, 1, 2, 3, 4, 5]\n # element_seq = [3, 4, 5]\n print('sequence: ', element_seq)\n\n # visualize goal pose\n if has_gui():\n # viz_len = 0.003\n with WorldSaver():\n for e_id in element_seq:\n element = elements[e_id]\n with LockRenderer():\n print('e_id #{} : {}'.format(e_id, element))\n for unit_geo in element.unit_geometries:\n for pb_geo in unit_geo.pybullet_bodies:\n set_pose(pb_geo, random.choice(unit_geo.get_goal_frames(get_pb_pose=True)))\n # print('---------')\n # wait_for_user()\n # wait_for_user()\n\n # * construct ignored body-body links for collision checking\n # in this case, including self-collision between links of the robot\n disabled_self_collisions = get_disabled_collisions(robot, disabled_self_collision_link_names)\n # and links between the robot and the workspace (e.g. robot_base_link to base_plate)\n extra_disabled_collisions = get_body_body_disabled_collisions(robot, workspace, workspace_robot_disabled_link_names)\n # TODO: extra disabled collisions as function argument\n extra_disabled_collisions.update({\n ((robot, link_from_name(robot, 'robot_link_5')), (ee_body, link_from_name(ee_body, 'eef_base_link'))),\n ((robot, link_from_name(robot, 'robot_link_6')), (ee_body, link_from_name(ee_body, 'eef_base_link')))\n })\n\n # * create cartesian processes without a sequence being given, with random pose generators\n cart_process_seq = build_picknplace_cartesian_process_seq(\n element_seq, elements,\n robot, ik_joint_names, root_link, sample_ik_fn,\n ee_attachs=[ee_attach], self_collisions=True, disabled_collisions=disabled_self_collisions,\n obstacles=[workspace],extra_disabled_collisions=extra_disabled_collisions,\n tool_from_root=invert(root_from_tcp), viz_step=False, pick_from_same_rack=True,\n pos_step_size=pos_step_size, ori_step_size=ori_step_size)\n\n # specifically for UR5, because of its wide joint range, we need to apply joint value snapping\n for cp in cart_process_seq:\n cp.target_conf = robot_start_conf\n\n with LockRenderer(not viz_inspect):\n if solve_method == 'ladder_graph':\n print('\\n'+'#' * 10)\n print('Solving with the vanilla ladder graph search algorithm.')\n cart_process_seq = solve_ladder_graph_from_cartesian_process_list(cart_process_seq,\n verbose=True, warning_pause=warning_pause, viz_inspect=viz_inspect, check_collision=False, start_conf=robot_start_conf)\n elif solve_method == 'sparse_ladder_graph':\n print('\\n'+'#' * 10)\n print('Solving with the sparse ladder graph search algorithm.')\n sparse_graph = SparseLadderGraph(cart_process_seq)\n sparse_graph.find_sparse_path(verbose=True, vert_timeout=sample_time, sparse_sample_timeout=sparse_time_out)\n cart_process_seq = sparse_graph.extract_solution(verbose=True, start_conf=robot_start_conf)\n else:\n raise ValueError('Invalid solve method!')\n assert all(isinstance(cp, CartesianProcess) for cp in cart_process_seq)\n\n pnp_trajs = [[] for _ in range(len(cart_process_seq))]\n for cp_id, cp in enumerate(cart_process_seq):\n element_attachs = []\n for sp_id, sp in enumerate(cp.sub_process_list):\n assert sp.trajectory, '{}-{} does not have a Cartesian plan found!'.format(cp, sp)\n # ! reverse engineer the grasp pose\n if sp.trajectory.tag == 'pick_retreat':\n unit_geo = elements[sp.trajectory.element_id].unit_geometries[0]\n e_bodies = unit_geo.pybullet_bodies\n for e_body in e_bodies:\n set_pose(e_body, unit_geo.get_initial_frames(get_pb_pose=True)[0])\n set_joint_positions(sp.trajectory.robot, sp.trajectory.joints, sp.trajectory.traj_path[0])\n element_attachs.append(create_attachment(sp.trajectory.robot, root_link, e_body))\n\n if sp.trajectory.tag == 'pick_retreat' or sp.trajectory.tag == 'place_approach':\n sp.trajectory.attachments= element_attachs\n pnp_trajs[cp_id].append(sp.trajectory)\n full_trajs = pnp_trajs\n\n # * transition motion planning between extrusions\n return2idle = True\n transition_traj = solve_transition_between_picknplace_processes(pnp_trajs, elements, robot_start_conf,\n disabled_collisions=disabled_self_collisions,\n extra_disabled_collisions=extra_disabled_collisions,\n obstacles=[workspace], return2idle=return2idle,\n resolutions=[jt_res]*len(ik_joints),\n **kwargs)\n\n # * weave the Cartesian and transition processses together\n for cp_id, print_trajs in enumerate(full_trajs):\n print_trajs.insert(0, transition_traj[cp_id][0])\n print_trajs.insert(3, transition_traj[cp_id][1])\n if return2idle:\n full_trajs[-1].append(transition_traj[-1][-1])\n\n if save_dir is None:\n here = os.path.dirname(__file__)\n save_dir = os.path.join(here, 'results')\n export_trajectory(save_dir, full_trajs, ee_link_name, indent=1, shape_file_path=assembly_pkg_json_path,\n include_robot_data=True, include_link_path=True)\n\n # * disconnect and close pybullet engine used for planning, visualizing trajectories will start a new one\n reset_simulation()\n disconnect()\n\n if viewer:\n cart_time_step = None\n tr_time_step = None\n display_picknplace_trajectories(robot_urdf, ik_joint_names,\n assembly_pkg_json_path, full_trajs, tool_root_link_name,\n ee_urdf=picknplace_end_effector_urdf, workspace_urdf=workspace_urdf, animate=True,\n cart_time_step=cart_time_step, tr_time_step=tr_time_step)", "def random_reconfigure_nodes(graph, nodes, possibilities):\n for node in nodes:\n graph.nodes[node].set_value(random.choice(possibilities))", "def set_references(self, ref_ch=None, bipolar_dict=None):\n\n if ref_ch is None:\n ref_ch = ['PO9', 'FT9']\n\n if bipolar_dict is not None:\n mne.set_bipolar_reference(self.raw,\n anode=[val[0] for val in bipolar_dict.values()],\n cathode=[val[1] for val in bipolar_dict.values()],\n ch_name=list(bipolar_dict.keys()),\n copy=False)\n\n self.raw.set_eeg_reference(ref_channels=ref_ch)\n self.raw.drop_channels(ref_ch)", "def specific_reset(self) -> None:\n\n # set agent and goal positions\n self.agent.specific_reset()\n agent_pos = self.world.generate_random_xyz_position()\n goal_pos = agent_pos\n while np.linalg.norm(agent_pos[:2]-goal_pos[:2]) < self.world.body_min_distance:\n goal_pos = self.world.generate_random_xyz_position()\n # adjust the height of agent\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n self.goal.set_position(goal_pos)\n self.old_dist = self.get_xy_distance()\n\n # apply random orientation to agent.\n random_yaw = np.random.uniform(-np.pi, np.pi)\n quaternion = self.bc.getQuaternionFromEuler([0, 0, random_yaw])\n self.agent.set_orientation(quaternion)\n\n # reset obstacle positions\n if len(self.obstacles) > 0:\n obs_init_pos = env_utils.generate_obstacles_init_pos(\n num_obstacles=len(self.obstacles),\n agent_pos=self.agent.get_position(),\n goal_pos=self.goal.get_position(),\n world=self.world,\n min_allowed_distance=self.world.body_min_distance,\n agent_obstacle_distance=self.agent_obstacle_distance\n )\n for i in range(len(self.obstacles)):\n self.obstacles[i].set_position(obs_init_pos[i])", "def mutate_point_wline(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point_wline(mutated_genome,index)\n elif seed == 1:\n remove_point_wline(mutated_genome,index)\n elif seed == 2:\n switch_points_wline(mutated_genome,index)\n elif seed == 3:\n shuffle_points_wline(mutated_genome,index)\n elif seed == 4:\n move_point_wline(mutated_genome,index)\n elif seed == 5:\n shift_point_wline(mutated_genome,index)\n elif seed == 6:\n increment_point_wline(mutated_genome,index)\n else: #seed == 7:\n decrement_point_wline(mutated_genome,index)" ]
[ "0.6009613", "0.53904885", "0.5373188", "0.5336502", "0.52867323", "0.52322876", "0.51420814", "0.51346767", "0.50901896", "0.5086578", "0.49775714", "0.49687153", "0.4933418", "0.4913252", "0.48939452", "0.48697948", "0.48356456", "0.4826306", "0.4824639", "0.48112273", "0.47526833", "0.47453013", "0.472483", "0.470968", "0.469727", "0.4668866", "0.46621394", "0.46553284", "0.46488065", "0.46447414" ]
0.5655333
1
Allows leastsq to take bounds if minimize function is missing.
def myleastsq(errfunc0,x0,args=None,bounds=None,**exkw): from scipy import optimize if hasattr(optimize,'minimize'): def errfunc(x,*iargs): return sum(errfunc0(x,*iargs)**2) if args is not None: exkw['args'] = args res = optimize.minimize(errfunc,x0[:],bounds=bounds,**exkw) return res.x,res.success else: lres = sys.float_info.max def errfunc(x,*iargs): if bounds!=None: for idx in range(len(x)): if bounds[idx][0]!=None and x[idx]<bounds[idx][0]: return lres if bounds[idx][1]!=None and x[idx]>bounds[idx][1]: return lres return errfunc0(x,*iargs) if args is not None: exkw['args'] = args return optimize.leastsq(errfunc,x0,**exkw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def leastsqbound(func,x0,bounds,args=(),**kw):\n # check for full output\n if \"full_output\" in kw and kw[\"full_output\"]:\n full=True\n else:\n full=False\n\n # convert x0 to internal variables\n i0 = external2internal(x0,bounds)\n\n # perfrom unconstrained optimization using internal variables\n r = leastsq(err,i0,args=(bounds,func,args),**kw)\n\n # unpack return convert to external variables and return\n if full:\n xi,cov_xi,infodic,mesg,ier = r\n xe = internal2external(xi,bounds)\n cov_xe = i2e_cov_x(xi,bounds,cov_xi)\n # XXX correct infodic 'fjac','ipvt', and 'qtf' \n return xe,cov_xe,infodic,mesg,ier \n\n else:\n xi,ier = r\n xe = internal2external(xi,bounds)\n return xe,ier", "def find_opt_func(W, x0, N, M, h_initial=None):\n if h_initial is None:\n h_initial = np.ones(N, dtype=float)\n h = leastsq(func_to_min, h_initial, args=(x0, M, W), full_output=True)[0]\n return OptFunc(W, x0, h, M)", "def minimize(self):\n raise NotImplementedError", "def fit(x_array, y_array, function, A_start):\n param = (x_array, y_array, function)\n\n A_final, cov_x, infodict, mesg, ier = leastsq(minimize, A_start, args=param, full_output=True)#, warning=True)\n \n return A_final", "def _helper_run_appropriate_fitter(self,lowerbounds_list: list,\n upperbounds_list: list,\n bounds_not_least_squares: sopt.Bounds):\n \n if self.fitmodel_input.minimization_method_str == \"least_squares\":\n fit_function_callable = getattr(fitmodels,self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.least_squares(fit_function_callable,\n np.array(list(self.fitmodel_input.start_paramdict.values())),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n bounds=(lowerbounds_list, upperbounds_list),\n loss=\"linear\", f_scale=1)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"minimize\":\n fit_function_callable = getattr(fitmodels,self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.minimize(sum_squares_decorator(fit_function_callable),\n np.array(list(self.fitmodel_input.start_paramdict.values())),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n bounds=bounds_not_least_squares,\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"basinhopping\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.basinhopping(\n sum_squares_decorator(fit_function_callable),\n np.array(list(self.fitmodel_input.start_paramdict.values())),\n minimizer_kwargs = {\"args\":(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n \"method\":\"trust-constr\"}, # TODO: figure out a smart thing to use here\n **self.fitmodel_input.fitter_options_dict)\n # The next lines is just for now the weirdness of basinhopping, it doesn't\n # have the global attribute called success\n setattr(optimization_output,\"success\",optimization_output.lowest_optimization_result.success)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"differential_evolution\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.differential_evolution(\n sum_squares_decorator(fit_function_callable),\n bounds_not_least_squares,\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"shgo\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.shgo(\n sum_squares_decorator(fit_function_callable),\n tuple(zip(lowerbounds_list,upperbounds_list)),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"dual_annealing\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.dual_annealing(\n sum_squares_decorator(fit_function_callable),\n tuple(zip(lowerbounds_list,upperbounds_list)),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"findmax\":\n # make a copy so that we can go about deleting the max value to find the next\n # max and so on\n peaks_xvals = []\n peaks_yvals = []\n data_array_copy = self.fitmodel_input.yvals.copy()\n # find max, then replace that point with the average, find the next max \n # and keep going until found as many maxima as requested\n for peak_num in range(self.fitmodel_input.start_paramdict[\"numpeaks\"]):\n peakval_y = np.nanmax(data_array_copy)\n peakcoord = np.argmax(data_array_copy)\n peakval_x = self.fitmodel_input.xvals[peakcoord]\n peaks_xvals.append(peakval_x)\n peaks_yvals.append(peakval_y)\n data_array_copy[peakcoord] = np.mean(data_array_copy)\n # we now have to build the optimization_output object that will look similar to what it looks like for regular fits\n param_dict_length = len(self.fitmodel_input.start_paramdict)\n optimization_output = types.SimpleNamespace() # this just initializes an empty class\n optimization_output.fun = -1 # objective function is -1, because it has no meaning here\n optimization_output.x = [peaks_xvals,peaks_yvals]\n # we now add the values to the \"output\" which are not real fit parameters\n # in normal fitting these are always fit parameters, but since this is a \"fake\" fit, we can simply add the initial parameters just to keep the interface constant\n for (idx,key) in enumerate(self.fitmodel_input.start_paramdict):\n if idx >= len(optimization_output.x):\n optimization_output.x.append(self.fitmodel_input.start_paramdict[key])\n optimization_output.success = True\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"findmin\":\n # make a copy so that we can go about deleting the max value to find the next\n # max and so on\n peaks_xvals = []\n peaks_yvals = []\n data_array_copy = self.fitmodel_input.yvals.copy()\n # find max, then replace that point with the average, find the next max \n # and keep going until found as many maxima as requested\n for peak_num in range(self.fitmodel_input.start_paramdict[\"numpeaks\"]):\n peakval_y = np.nanmin(data_array_copy)\n peakcoord = np.argmin(data_array_copy)\n peakval_x = self.fitmodel_input.xvals[peakcoord]\n peaks_xvals.append(peakval_x)\n peaks_yvals.append(peakval_y)\n data_array_copy[peakcoord] = np.mean(data_array_copy)\n # we now have to build the optimization_output object that will look similar to what it looks like for regular fits\n param_dict_length = len(self.fitmodel_input.start_paramdict)\n optimization_output = types.SimpleNamespace() # this just initializes an empty class\n optimization_output.fun = -1 # objective function is -1, because it has no meaning here\n optimization_output.x = [peaks_xvals,peaks_yvals]\n for (idx,key) in enumerate(self.fitmodel_input.start_paramdict):\n if idx >= len(optimization_output.x):\n optimization_output.x.append(self.fitmodel_input.start_paramdict[key])\n optimization_output.success = True\n return optimization_output\n else:\n print(\n \"\"\"Message from Class {:s} function _helper_run_appropriate_fitter: \n you tried to use the following optimizer: {}. \n This optimizer does not exist. Not doing any optimization\"\"\".format(\n self.__class__.__name__, self.fitmodel_input.minimization_method_str))\n return None", "def minimize(fun, \n bounds = None, \n value_limit = math.inf,\n num_retries = 1000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n max_evaluations = 50000, \n capacity = 500,\n stop_fittness = None,\n optimizer = None,\n ):\n\n if optimizer is None:\n optimizer = de_cma(max_evaluations, popsize, stop_fittness) \n store = Store(bounds, capacity = capacity, logger = logger)\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers)", "def minimize(\n func: Callable,\n x0: Union[Array, BlockArray],\n args: Union[Tuple, Tuple[Any]] = (),\n method: str = \"L-BFGS-B\",\n hess: Optional[Union[Callable, str]] = None,\n hessp: Optional[Callable] = None,\n bounds: Optional[Union[Sequence, spopt.Bounds]] = None,\n constraints: Union[spopt.LinearConstraint, spopt.NonlinearConstraint, dict] = (),\n tol: Optional[float] = None,\n callback: Optional[Callable] = None,\n options: Optional[dict] = None,\n) -> spopt.OptimizeResult:\n\n if snp.util.is_complex_dtype(x0.dtype):\n # scipy minimize function requires real-valued arrays, so\n # we split x0 into a vector with real/imaginary parts stacked\n # and compose `func` with a `_join_real_imag`\n iscomplex = True\n func_ = lambda x: func(_join_real_imag(x))\n x0 = _split_real_imag(x0)\n else:\n iscomplex = False\n func_ = func\n\n x0_shape = x0.shape\n x0_dtype = x0.dtype\n x0 = x0.ravel() # if x0 is a BlockArray it will become a jax array here\n\n # Run the SciPy minimizer\n if method in (\n \"CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov, \"\n \"trust-exact, trust-constr\"\n ).split(\n \", \"\n ): # uses gradient info\n min_func = _wrap_func_and_grad(func_, x0_shape, x0_dtype)\n jac = True # see scipy.minimize docs\n else: # does not use gradient info\n min_func = _wrap_func(func_, x0_shape, x0_dtype)\n jac = False\n\n res = spopt.OptimizeResult({\"x\": None})\n\n def fun(x0):\n nonlocal res # To use the external res and update side effect\n res = spopt.minimize(\n min_func,\n x0=x0,\n args=args,\n jac=jac,\n method=method,\n options=options,\n ) # Returns OptimizeResult with x0 as ndarray\n return res.x.astype(x0_dtype)\n\n # HCB call with side effects to get the OptimizeResult on the same device it was called\n res.x = hcb.call(\n fun,\n arg=x0,\n result_shape=x0, # From Jax-docs: This can be an object that has .shape and .dtype attributes\n )\n\n # un-vectorize the output array from spopt.minimize\n res.x = snp.reshape(\n res.x, x0_shape\n ) # if x0 was originally a BlockArray then res.x is converted back to one here\n\n if iscomplex:\n res.x = _join_real_imag(res.x)\n\n return res", "def best_fit(self, **kwargs):\n n_fit_p = len(self.fit_parameters)\n n_wc = len(self.fit_wc_names)\n if n_fit_p + n_wc == 1:\n def f(x):\n return -self.log_likelihood([x])\n opt = scipy.optimize.minimize_scalar(f, **kwargs)\n else:\n def f(x):\n return -self.log_likelihood(x)\n if 'x0' not in kwargs:\n x0 = np.zeros(n_fit_p + n_wc)\n if n_fit_p > 1:\n x0[:n_fit_p] = self.get_central_fit_parameters\n opt = minimize_robust(f, x0, **kwargs)\n else:\n opt = minimize_robust(f, **kwargs)\n if not opt.success:\n raise ValueError(\"Optimization failed.\")\n else:\n return {'x': opt.x, 'log_likelihood': -opt.fun}", "def minimize(self,x0,method='fmin',**kwargs):\n return self._optimize(x0,'min',method,**kwargs)", "def minimize(self):\n pass", "def get_suffstat_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(sum_x=(minf,inf), sum_x_squared=(0.0 ,inf))\n return params", "def test_with_optimize(self, fitter):\n fitter = fitter()\n\n model = fitter(self.gauss, self.xdata, self.ydata, estimate_jacobian=True)\n\n def func(p, x):\n return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)\n\n def errfunc(p, x, y):\n return func(p, x) - y\n\n result = optimize.leastsq(\n errfunc, self.initial_values, args=(self.xdata, self.ydata)\n )\n assert_allclose(model.parameters, result[0], rtol=10 ** (-3))", "def solve(self):\n\n constrains, bounds = self.init_constraint_list()\n result = minimize(self.objective_function,\n x0=self.init_guess,\n constraints=constrains,\n bounds=bounds,\n options={'disp': False})\n\n return result", "def minimize(self, func, grad, x0, args=()):\n learning_rate = self._learning_rate\n best_x = x = x0\n best_value = func(x, *args)\n iters_without_improve = 0\n\n for iteration in range(self._max_iterations):\n gradient = grad(x, *args)\n\n # If absolute values of all partial derivatives are equal to 0 with specified accuracy, then parameters are\n # close enough to the minimum and there is no need to continue gradient descent.\n if np.abs(gradient).max() <= self._accuracy:\n break\n\n x = x - learning_rate * gradient\n\n # If new values of x haven't lead to decrease of the function value for the specified number of iteration,\n # the x is reverted to its previous best value and the learning rate is reduced\n value = func(x, *args)\n if value > best_value:\n iters_without_improve += 1\n if iters_without_improve >= self._lr_reduce_patience:\n x = best_x\n learning_rate *= self._lr_reduce_factor\n else:\n iters_without_improve = 0\n best_value = value\n best_x = x\n\n return best_x", "def scipy_lbfgs(fun, jac, x0):\n result = scipy.optimize.minimize(fun, x0, jac=jac, method='L-BFGS-B')\n if not result['success']:\n raise RuntimeError(\"L-BFGS-B failed to converge\")\n return result['x']", "def brute_leastsquare_fit(fun, x_data, y_data,weight_data=None,p_names=None,p_min_max_steps_dict=None,\r\n const_params=[], visualize=False):\r\n \r\n if p_names == None or p_min_max_steps_dict==None:\r\n raise Exception ('p_names and p_min_max_steps must be given!'+ \r\n 'structure of p_min_max_steps_dict: {\"pname0\":[min0,max0,brute_steps0]}')\r\n \r\n params = Parameters() ### initialize LMfit parameters\r\n for p_name in p_names:\r\n min_val=p_min_max_steps_dict[p_name][0]\r\n max_val=p_min_max_steps_dict[p_name][1]\r\n steps=p_min_max_steps_dict[p_name][2]\r\n params.add(p_name,value=min_val,\r\n min=min_val,\r\n max=max_val,\r\n brute_step=(max_val-min_val)/(steps-1))\r\n \r\n ### define function to be minimized for fit \r\n \r\n def cost_function_fit(p=params):\r\n def minimize_fun(pars):\r\n \r\n v=pars.valuesdict()\r\n arglist=[]\r\n for p_name in p_names:\r\n arglist.append(v[p_name])\r\n \r\n for const_param in const_params:\r\n arglist.append(const_param)\r\n \r\n ret=np.array((fun(x_data,*arglist)-y_data),dtype=float)\r\n if weight_data is not None:\r\n ret=ret*np.sqrt(weight_data)\r\n return(ret)\r\n brute_result=lmfit.minimize(minimize_fun,params,method='brute',nan_policy='omit')\r\n best_result=copy.deepcopy(brute_result)\r\n for candidate in brute_result.candidates[0:5]:\r\n trial = lmfit.minimize(minimize_fun, params=candidate.params,method='leastsq',nan_policy='omit')\r\n if trial.chisqr < best_result.chisqr:\r\n best_result = trial\r\n \r\n return((best_result,brute_result))\r\n \r\n best_result,brute_result = cost_function_fit()\r\n arg_list=[]\r\n for p_name in p_names:\r\n arg_list.append(best_result.params.valuesdict()[p_name])\r\n for const_param in const_params:\r\n arg_list.append(const_param)\r\n \r\n \r\n if visualize == True:\r\n plot_brute_leastsquares_results(brute_result,leastsq_fit_result=best_result)\r\n plt.figure()\r\n plt.plot(x_data,y_data,label='data',color='blue')\r\n plt.plot(x_data,fun(x_data,*arg_list),label='Fit',color='red')\r\n plt.title(best_result.params.valuesdict())\r\n plt.show()\r\n return (arg_list[0:len(p_names)])", "def optimize(self, x0):\n (result,f,d) = fmin_l_bfgs_b(lambda x:self.costFun(x), np.ravel(x0),lambda x: self.gradFun(x))\n print(\"optimization completed with cost: \" + str(f))\n return result.reshape(self.inp_shape)", "def gopt_max(fun, bounds, n_warmup = 1000, n_local = 10):\n x_best, y_best = gopt_min(lambda x: -fun(x), bounds, n_warmup, n_local)\n return x_best, -y_best", "def minimize_scalar(func, *args, **kwargs):\n bounds = kwargs.get('bounds', None)\n\n if bounds is None or len(bounds) != 2:\n msg = (\"To run maximize_scalar or minimize_scalar, \"\n \"you have to provide a `bounds` \"\n \"keyword argument with a sequence \"\n \"of length 2.\")\n raise ValueError(msg)\n\n try:\n func(bounds[0], *args)\n except Exception as e:\n msg = (\"Before running scipy.integrate.minimize_scalar, \"\n \"I tried running the function you provided \"\n \"with the lower bound, \"\n \"and I got the following error:\")\n logger.error(msg)\n raise (e)\n\n underride(kwargs, method='bounded')\n\n res = spo.minimize_scalar(func, args=args, **kwargs)\n\n if not res.success:\n msg = (\"minimize_scalar did not succeed.\"\n \"The message it returned is: \\n\" +\n res.message)\n raise Exception(msg)\n\n return res", "def linear_least_squares(M, v):\n \n B = copy(M)\n [m,n] = shape(B)\n if rank(B) != min(m,n):\n print('Warning: can not be solved since the rank of the matrix is not its maximum value')\n return nan\n else:\n \n A = copy(M)\n At = transpose(M)\n b = copy(v)\n b = transpose(b)\n \n AtA = dot(At, A)\n Atb = transpose(dot(At, b))\n print(AtA, Atb)\n \n x = gauss_elimination(AtA, Atb)\n print('x*:')\n return x", "def leastsq(error_func, x0, *args, **options):\n # override `full_output` so we get a message if something goes wrong\n options[\"full_output\"] = True\n\n # run leastsq\n t = scipy.optimize.leastsq(error_func, x0=x0, args=args, **options)\n best_params, cov_x, infodict, mesg, ier = t\n\n # pack the results into a ModSimSeries object\n details = SimpleNamespace(cov_x=cov_x,\n mesg=mesg,\n ier=ier,\n **infodict)\n details.success = details.ier in [1,2,3,4]\n\n # if we got a Params object, we should return a Params object\n if isinstance(x0, Params):\n best_params = Params(pd.Series(best_params, x0.index))\n\n # return the best parameters and details\n return best_params, details", "def minimize(self):\n self.normalize()\n p0s = self.spacedvals(method='random')\n if self.n_spots > 1:\n opts = self.multifit(p0s)\n else:\n opts = self.singlefit(p0s)\n self.yf = [self.solve(theta) for theta in opts]\n self.bestps = opts\n return opts", "def get_best_lower_bound(self):\n if not self.tours:\n raise Exception('No lower bound has been computed yet')\n best = max(self.lower_bounds,key=self.lower_bounds.get)\n print('The best lower bound is given by {} with score {}'.format(best,self.lower_bounds[best]))\n return self.lower_bounds[best]", "def fit_gaussian(x, y, z):\n\n def sym_gaussian(p):\n \"\"\"\n Returns a Gaussian function:\n a**2 * exp(-((x - x_0)**2 + (y - y_0)**2) / (2 * sigma**2))\n p = [a, x_0, y_0, sigma]\n \"\"\"\n a, x_0, y_0, sigma = p\n return a**2 \\\n * np.exp(-((x - x_0)**2 + (y - y_0)**2) / (2.0 * sigma**2))\n\n def sym_gaussian_resids(p):\n \"\"\"Residuals to be sent into leastsq\"\"\"\n return z - sym_gaussian(p)\n\n def guess_fit_gaussian():\n \"\"\"\n return a, x_0, y_0, and sigma based on computing moments of data\n \"\"\"\n a = z.max()\n\n # Compute moments\n total = z.sum()\n x_0 = np.dot(x, z) / total\n y_0 = np.dot(y, z) / total\n\n # Approximate sigmas\n sigma_x = np.dot(x**2, z) / total\n sigma_y = np.dot(y**2, z) / total\n sigma = np.sqrt(sigma_x * sigma_y)\n\n # Return guess\n return (a, x_0, y_0, sigma)\n\n # Get guess\n p0 = guess_fit_gaussian()\n\n # Perform optimization using nonlinear least squares\n popt, junk_output, info_dict, mesg, ier = \\\n scipy.optimize.leastsq(sym_gaussian_resids, p0, full_output=True)\n\n # Check to make sure leastsq was successful. If not, return centroid\n # estimate.\n if ier in (1, 2, 3, 4):\n return (popt[0]**2, popt[1], popt[2], popt[3])\n else:\n return p0", "def equality_constrained_linear_least_squares(A, B, y, z):\n return lapack.dgglse(A, B, y, z)[3]", "def minimize(fun: Callable[..., float],\n x0: np.ndarray,\n args: Tuple = (),\n method: Optional[str] = None,\n **kwargs) -> scipy.optimize.OptimizeResult:\n if method.lower() in OPTIMIZERS:\n optimizer = OPTIMIZERS[method.lower()]\n return optimizer(fun, x0, args=args, **kwargs)\n return scipy.optimize.minimize(fun, x0, args=args, method=method, **kwargs)", "def _optimize(self,x0,type,method,**kwargs):\n from scipy.optimize import fmin,fmin_powell\n\n if type == 'min':\n g=lambda x:self(x)\n elif type == 'max':\n g=lambda xs:-1*self(x)\n elif type == 'root':\n g=lambda x:np.abs(self(x))\n elif type == 'val':\n val = kwargs.pop('valtofind')\n g=lambda x:np.abs(self(x)-val)\n elif type == 'saddle':\n raise NotImplementedError\n else:\n raise ValueError('Unrecognized optimization type')\n\n if method == 'fmin':\n res = fmin(g,x0,**kwargs)\n elif method == 'fmin_powell':\n res = fmin_powell(g,x0,**kwargs)\n else:\n raise ValueError('Unrecognized method')\n\n self.lastOpt = res\n return res[0]", "def fit_model_bounds(func, xdata, ydata, yerrdata, p0=None,\n bounds=None, options=None):\n # objective function to be minimized, required format of 'f(x, *args)'\n f = lambda p: calc_chisq(func, xdata, ydata, yerrdata, *p)\n # minimize the given function using 'scipy.optimize.minimize' with bounds\n res = minimize(f, p0, method=MINIMIZE_METHOD, bounds=bounds,\n options=options)\n popt = res.x\n #print(\"DEBUG: minimization results:\\n\", res, file=sys.stderr)\n\n # check minimization results\n if not res.success:\n print(\"*** WARNING: minimization exited with error: ***\\n\" + \\\n \"*** %s ***\" % res.message, file=sys.stderr)\n\n # the function evaluated at the output parameters\n fvec = lambda x: func(x, *popt)\n # degree of freedom\n dof = len(xdata) - len(popt) - 1\n # chi squared\n chisq = res.fun\n # one standard deviation errors on the parameters\n perr = popt * 0.0 # FIXME\n infodict = {\n 'fvec': fvec,\n 'dof': dof,\n 'chisq': chisq,\n 'perr': perr\n }\n return (popt, infodict)", "def fit(self):\n if self.minimizer == \"differential_evolution\":\n kwargs = {\"maxiter\": self._maxiter}\n elif self.minimizer == \"shgo\":\n kwargs = {\"options\": {\"maxiter\": self._maxiter,\n \"jac\": self.cost_func.jac_cost}}\n elif self.minimizer == \"dual_annealing\":\n kwargs = {\"maxiter\": self._maxiter, \"local_search_options\": {\n \"jac\": self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif \"Maximum number of iteration\" in result.message:\n self._status = 1\n else:\n self._status = 2", "def minimize(A, t, y0, function):\n return y0 - function(A, t)" ]
[ "0.7573677", "0.6523712", "0.6448234", "0.6322002", "0.6261192", "0.625652", "0.6225058", "0.62234515", "0.61956996", "0.61218625", "0.6083507", "0.6068286", "0.6016386", "0.60070544", "0.5992043", "0.5962499", "0.5946215", "0.5920246", "0.58964807", "0.58740616", "0.5871715", "0.58413875", "0.5837", "0.5797726", "0.5797227", "0.5783468", "0.5771905", "0.5747621", "0.5743229", "0.5737588" ]
0.71415097
1
Returns spherical radii for provided volumes.
def sphrad(vol): return (3.*vol/(4.*np.pi))**(1./3.)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sphere_volume(r):\n return (4/3) * 3.14159 * r**3", "def sphere_volume(r):\n\treturn 4/3. * math.pi * r ** 3", "def sphere_volume(radius : number) -> number:\n volume = 4/3*(pi*radius*radius*radius)\n return volume", "def sphere_volume(sphere_radius):\n return (4 / 3 * np.pi * sphere_radius**3)", "def sphereVolume(radius):\n volume = (4 / 3) * math.pi * radius ** 3\n return volume", "def calcul_v_sphere(r):\n volume = 4/3 * math.pi * (r ** 3)\n return volume", "def tube_radius_from_volume(volume, length):\n a3 = 4.0 / 3.0 * np.pi\n a2 = np.pi * length\n a1 = 0\n a0 = -volume\n\n r = np.polynomial.polynomial.polyroots([a0, a1, a2, a3])\n\n radius = np.real(r[r > 0][0])\n # print \"geometry3d.pills_radius_from_volume \", radius\n return radius", "def sphvol(r):\n return (4./3.)*np.pi*(r**3.)", "def ellipsoid_volume(radius1: number, radius2: number, radius3: number) -> number:\n volume = 4/3*(pi*radius1*radius2*radius3)\n return volume", "def spherical_differential(self):\n r, theta, phi, v_r, v_t, v_p = self.convert_spherical()\n return SphericalDifferential(\n r * u.m,\n theta * u.rad,\n phi * u.rad,\n v_r * u.m / u.s,\n v_t * u.rad / u.s,\n v_p * u.rad / u.s,\n )", "def spherical_differential(self):\n r, theta, phi, v_r, v_t, v_p = self.convert_spherical()\n return SphericalDifferential(\n r * u.m,\n theta * u.rad,\n phi * u.rad,\n v_r * u.m / u.s,\n v_t * u.rad / u.s,\n v_p * u.rad / u.s,\n )", "def nsphere_volume(n, r):\n return math.pi ** (n / 2) * (r ** n) / gamma(n / 2 + 1)", "def Radius(self, *args):\n return _Bnd.Bnd_Sphere_Radius(self, *args)", "def sphere(\n network,\n pore_diameter='pore.diameter'\n):\n return 4/3*_pi*(network[pore_diameter]/2)**3", "def getSphereRadius(self):\n return 1.5", "def spherical_parameters(self):\n phi_mu_list = []\n theta_mu_list = []\n \n for mu in self.mu_list:\n r, phi, theta = T_cartesian_to_spherical(x=mu[0], y=mu[1], z=mu[2])\n phi_mu_list.append(phi)\n theta_mu_list.append(theta)\n \n return phi_mu_list, theta_mu_list", "def surfaceIntSphere(r: float) -> float:\n return 4.0 * np.pi * r * r", "def cylinder_volume(radius: number, height: number) -> number:\n volume = pi*radius*radius*height\n return volume", "def calc_hypersphere_volume(r: float, n: int) -> float:\n return (math.pi ** (n / 2) * r ** n) / gamma((n / 2) + 1)", "def cartesianToSpherical(x=0, y=0, z=0):\n\n hxy = np.hypot(x, y)\n radius = np.hypot(hxy, z)\n altitude = np.arctan2(z, hxy)\n azimuth = np.arctan2(y, x)\n return altitude, azimuth, radius", "def volume(self) -> float:\n return 4 / 3 * np.pi * self.radius**3", "def to_spherical(d, r_grid, theta_grid, phi_grid, items):\n import numpy as np\n nr, nt, nphi = len(r_grid), len(theta_grid), len(phi_grid)\n files = {}\n\n for key in items:\n files.update({key: open(items[key]['filename'], 'w')})\n\n state = query_state()\n\n for i in range(nphi-1):\n phi = 0.5 * (phi_grid[i] + phi_grid[i+1])\n for j in range(nt-1):\n theta = 0.5 * (theta_grid[j] + theta_grid[j+1])\n for k in range(nr-1):\n r = 0.5 * (r_grid[k] + r_grid[k+1])\n rho = r * np.sin(theta)\n z = r * np.cos(theta)\n for key in items:\n val = state.query(d, rho, z, key)\n files[key].write('{0:.6e}\\n'.format(val))\n\n for key in items:\n files[key].close()", "def volume_from_rs(rs,Nel):\n a0 = 0.5291772 # Bohr radius (angstroms/bohr)\n volume = (4.0*pi/3.0)*Nel * (rs*a0)**3\n\n return volume", "def _calculate_residual_sphere(parameters, x_values, y_values, z_values):\n #extract the parameters\n x_centre, y_centre, z_centre, radius = parameters\n\n #use numpy's sqrt function here, which works by element on arrays\n distance_from_centre = numpy.sqrt((x_values - x_centre)**2 +\n (y_values - y_centre)**2 +\n (z_values - z_centre)**2)\n\n return distance_from_centre - radius", "def get_radii(self) -> np.ndarray:\n return np.array([self._radii[p] for p in self.particles])", "def calculate_soma_surface(data: Data) -> float:\n\n soma = data.morphology.get_soma()\n return 4.0 * math.pi * soma['radius'] * soma['radius']", "def calculateVolumes(data):\n print \"Calculating volumes...\"\n results = {}\n for dataLine in data:\n name = dataLine['name']\n r1 = dataLine['r1']\n r2 = dataLine['r2']\n r3 = dataLine['r3']\n r4 = dataLine['r4']\n t1 = dataLine['t1']\n t2 = dataLine['t2']\n t3 = dataLine['t3']\n volCup = (math.pi/3.0) * t1 * ((r1**2) + (r4**2) - (r1*r4))\n volPeanut = math.pi * (t1 - t2 - t3) * ((r2**2) + (r3**2) - (r2*r3)) / 3.0\n volChoc = volCup - volPeanut\n ratio = volChoc/volPeanut\n print \"Ratio for \" + name + \" is \" + str(ratio)\n results[name] = [r1, volChoc, volPeanut, volCup, ratio]\n return results", "def get_radius(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_radius()", "def radii(self) -> Quantity:\n return self._radii", "def boringInterlude (radiusIn):\n\n\n import math\n volIn = (4/3) * math.pi * (radiusIn ** 3)\n vol = volIn/ 1728\n return vol" ]
[ "0.70751446", "0.69737434", "0.69169736", "0.6792967", "0.6701074", "0.66413015", "0.6604082", "0.6572043", "0.63632387", "0.62836486", "0.62836486", "0.62725145", "0.613928", "0.6092999", "0.6058674", "0.5973587", "0.5946293", "0.59038186", "0.5898153", "0.5847563", "0.58467704", "0.5825417", "0.58080184", "0.5768488", "0.5760287", "0.57283777", "0.5701261", "0.5652844", "0.56507933", "0.5642929" ]
0.71128243
0
Change Karma Make sure that the user can make a karma change using rate limiting and return whether or not the karma value was added or changed
def _change_karma(self, name, change): can_change = self._apply_rate_limit() if not can_change: return False res = self.bot.db.execute('SELECT target, karma FROM karma') for target in res.fetchall(): if target[0].lower() == name.lower(): self.bot.db.execute('UPDATE karma SET karma = karma + ? WHERE target = ?', (change, target[0],)) self.bot.db.commit() return True self.bot.db.execute("INSERT INTO karma (target, karma) VALUES(?, 0)", (name,)) self.bot.db.execute("UPDATE karma SET karma = karma + ? WHERE target = ?", (change, name,)) self.bot.db.commit() return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _change_karma(self, nick, target, mode):\n if nick == target:\n return \"You can't modify your own karma.\"\n if target in self.karma and (datetime.datetime.now() -\n self.karma[target][2]).seconds < 5:\n return 'Karma spamming is prohibited.'\n if not target in self.karma:\n self.karma[target] = [0, 0, datetime.datetime.now()]\n if mode: # Increase\n self.karma[target][0] += 1\n else: # Decrease\n self.karma[target][1] += 1\n self.karma[target][2] = datetime.datetime.now()", "def recalculate_karma():\n if not profile_model:\n raise SiteProfileNotAvailable\n\n sql = '''\n UPDATE\n %s\n SET\n karma = %%s\n ''' % connection.ops.quote_name(profile_model._meta.db_table)\n cursor = connection.cursor()\n cursor.execute(sql, (INITIAL_USER_KARMA,))\n\n for mw in ModelWeight.objects.all():\n update_karma_for_ct(mw.content_type, mw.owner_field, mw.weight)\n return True", "def refreshKarma(self):\n hn = HackerNewsAPI()\n source = hn.getSource(self.userPageURL)\n karmaStart = source.find('<td valign=top>karma:</td><td>') + 30\n karmaEnd = source.find('</td>', karmaStart)\n karma = source[karmaStart:karmaEnd]\n if karma is not '':\n self.karma = int(karma)\n else:\n raise HNException(\"Error getting karma for user \" + self.name)", "def testKarma(self):\n\n\t\t\t\tspinner.Synonym.objects.add('directory', 'catalog', 10, True)\n\t\t\t\tspinner.Synonym.objects.add('list', 'directory', 20, True)\n\t\t\t\tspinner.Synonym.objects.add('directory', 'guide', 10, True)\n\n\t\t\t\tsynonyms = spinner.Synonym.objects.get_synonyms(['directory'])[0]\n\t\t\t\t\n\t\t\t\tfor word in synonyms:\n\t\t\t\t\t\tif word.total_karma < 10:\n\t\t\t\t\t\t\t\tassert False, 'Karma was not recorded correctly'", "def test_vote_view_update_author_karma(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n user = User.objects.get(username=\"John\")\n karma = UserProfileInfo.objects.get(user=user).karma\n response = self.client.post('/posts/2/vote/', {\"vote\": \"-1\"})\n self.assertEqual(UserProfileInfo.objects.get(user=user).karma, karma - 1)", "def karma(_: Bot, update: Update):\n\n user_karma = analytics.get_karma(\n update.message.from_user.id, update.message.chat.id\n )\n\n update.message.reply_text(user_karma)", "def reset_karma(self, msg):\n level = 15\n if self.require_level(msg.user, level):\n word = msg.params.split(' ')[0].lower()\n if self._word_exists(word):\n del self.vault[word]\n self.vault.sync()\n return _('Reset karma of %s.' % word)\n else:\n return _('%s has no karma recordings yet.' % word)\n else:\n return _('Unauthorized. Level %s required.' % level)", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_account_quota():", "def karma_point(phenny, input):\n user = input.group(1)\n op = input.group(2)\n if input.nick == user:\n if '+' in op:\n phenny.reply(\"Silly, you can't award yourself karma...\")\n return\n elif '-' in op:\n phenny.reply(\"Wow, you must have really been bad to take karma from yourself...\")\n f = ops.get(op, lambda x: x)\n conn = sqlite3.connect(phenny.karma_dao.db_path)\n phenny.karma_dao.update_karma(conn, user, f, 0)", "def test_update_rate_plan(self):\n pass", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_50():", "def test_remove_view_update_author_karma(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n user = User.objects.get(username=\"John\")\n karma = UserProfileInfo.objects.get(user=user).karma\n response = self.client.post('/posts/1/remove_vote/')\n self.assertEqual(UserProfileInfo.objects.get(user=user).karma, karma - 1)", "def test_update_subscription_premium(self):\n\n self.assertEqual(first=10, second=self.subscription.radius)\n url = reverse('subscription-detail', args=(self.subscription.id,))\n data = {\n 'type': 'premium',\n 'radius': 30,\n 'swipes_count': 0\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n response = self.client.patch(path=url, content_type='application/json', data=json_data)\n self.subscription.refresh_from_db()\n self.assertEqual(first=200, second=response.status_code)\n self.assertEqual(first=30, second=self.subscription.radius)", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():", "def keychange(self):\n # if response.json()['error']['errors'][0]['reason']=='quotaExceeded':\n self.keyindex += 1\n if self.keyindex == len(self.keylist):\n self.keyindex = 0\n print('Keylist length reached')\n print('Changinf Key..')\n key = self.keylist[self.keyindex]\n print(\"Quota Exceeded\", self.keyindex)\n return key", "def test_auto_update_ms_kirkaldy_method(self):\n with app.test_client() as client:\n self.login_client(client)\n\n res = client.post(\n '/v1/sim/configs/ms',\n data=json.dumps(\n {\n 'method': 'Kirkaldy83',\n 'alloy_store': ALLOY_STORE\n }\n ),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assert200(res)\n self.assertEqual(data['status'], 'success')\n ms_temp = np.float32(data['data']['ms_temp'])\n ms_rate_param = np.float32(data['data']['ms_rate_param'])\n self.assertAlmostEqual(ms_temp, 477.5753, 2)\n self.assertAlmostEqual(ms_rate_param, 0.02069, 2)", "def test_change_default_throttling_settings_http_with_overwrite_throttled():", "def test_special_login_to_put_changes_to_meter(self):\n meter = Meter.objects.create(meter_name='testmeter', meter_unit='X')\n meter.save()\n\n p = Permission.objects.get(name='Can change meter')\n self.user.user_permissions.add(p)\n\n url = reverse('api_v1:meter-detail', kwargs={'pk':1})\n self.client.login(username='testuser', password='q2w3E$R%')\n data = json.dumps({'meter_name': 'testmeter_altered'})\n response = self.client.patch(url,\n data,\n follow=True,\n content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('testmeter_altered', str(response.content))\n self.assertIn('X', str(response.content))\n\n data = json.dumps({'meter_unit': 'Y'})\n response = self.client.patch(url,\n data,\n follow=True,\n content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('testmeter_altered', str(response.content))\n self.assertIn('Y', str(response.content))", "def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_50():", "def rate(self, newrate):\n command = 'rate ' + str(newrate)\n self.run_command(command)", "def _apply_rate_limit(self):\n update_time = time()\n user_name = self.bot.user.full_name\n if user_name in self.tokens.keys():\n last_change = self.tokens[user_name][0]\n # Add 1 token for every 30 seconds from the last change\n added_tokens = int((update_time - last_change) / 30)\n self.tokens[user_name][1] += added_tokens\n # Max at 5 self.tokens\n if self.tokens[user_name][1] > 5:\n self.tokens[user_name][1] = 5\n else:\n # Initialize the users token pair (last change, # of self.tokens)\n self.tokens[user_name] = [update_time, 5] # Start with 5 self.tokens\n if self.tokens[user_name][1] <= 0:\n return False\n self.tokens[user_name][1] -= 1\n return True", "def test_change_default_throttling_settings_http_with_overwrite_not_throttled():", "def test_update_risk_profile_using_put(self):\n pass", "def test_retire_rate_plan(self):\n pass", "async def rate(self, ctx: commands.Context, rate: hundred_int):\n await self.config.rate.set(rate)\n await ctx.send(f\"The Plague Game rate has been set to {rate}%.\")", "def check(self, value):\n\t\t\n\t\tif value <= self.current_rate:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def change_plan(request):\n\n data = request.data\n\n start_date = datetime.datetime.now().strftime(\"%c\")\n end_date = end_date = (datetime.datetime.now() + datetime.timedelta(30)).strftime(\"%x\")\n \n # print(data[\"subscription_plan\"])\n \n try: \n user = User.objects.get(email=request.user) \n customer = Customer.objects.get(user=user)\n subscription_plan = SubscriptionPlan.objects.get(subscription_plan_name=data[\"subscription_plan\"])\n\n if customer.is_subscribe:\n stripe.Subscription.delete(\n customer.subscription_id,\n ) \n\n plan_id = \"price_1JsHMxSDkRo5FXlkOsq2QHSV\"\n\n if data[\"subscription_plan\"]== \"Globalnet Silver\":\n plan_id = \"price_1JsHOJSDkRo5FXlkQmfEQzhN\"\n \n if data[\"subscription_plan\"]== \"Globalnet Gold\":\n plan_id = \"price_1JsHPFSDkRo5FXlk9VSl41rV\"\n\n # Create new stripe subscription\n subscription = stripe.Subscription.create(\n customer = customer.stripe_id,\n items = [{'plan':plan_id}]\n ) \n \n # Update SubscriptionData \n subscription_user_data = SubscriptionData.objects.filter(subscriber=customer.primary_number) \n for data_subscriber in subscription_user_data:\n if(data_subscriber.subscription_start == customer.start_date):\n data_subscriber.subscription_end = start_date \n data_subscriber.save() \n break \n \n \n # Change subscription plan info\n customer.subscription_plan = subscription_plan\n customer.start_date = start_date\n customer.end_date = end_date\n customer.subscription_id = subscription.id\n customer.is_subscribe = True\n customer.save()\n \n # Create new subscription data \n SubscriptionData.objects.create(\n subscriber = customer.primary_number,\n subscription = subscription_plan.subscription_plan_name,\n subscription_start = start_date,\n subscription_end = end_date \n \n )\n \n serializer= CustomerSerializer(customer,many=False)\n \n return Response(serializer.data)\n \n except Exception as e: \n message = {\"Error\":str(e)}\n return Response(message)", "def test_switch_from_free_set_expiry(self):\n u = User.objects.get(username=\"test1\")\n u.userplan.expire = None\n u.userplan.plan = Plan.objects.get(name=\"Free\")\n u.userplan.save()\n self.assertIsNone(u.userplan.expire)\n self.assertTrue(u.userplan.plan.is_free())\n\n plan = Plan.objects.get(name=\"Standard\")\n self.assertFalse(plan.is_free())\n self.assertNotEqual(u.userplan.plan, plan)\n plan_pricing = PlanPricing.objects.filter(Q(plan=plan) & Q(pricing__period=30))[\n 0\n ]\n\n # Switch to Standard Plan\n u.userplan.extend_account(plan, plan_pricing.pricing)\n self.assertEqual(u.userplan.plan, plan)\n self.assertIsNotNone(u.userplan.expire)\n self.assertEqual(u.userplan.active, True)", "def put(self):\n request_data = request.get_json()\n plan = request_data[\"plan\"]\n\n user = get_authenticated_user()\n if not user.stripe_id:\n raise InvalidRequest()\n\n price = get_price(plan, False)\n if not price:\n abort(404, message=\"Plan not found\")\n\n return change_subscription(user, price)", "def check_api_use_rate():\n with open('api_use.csv', 'r') as api_use_file:\n csv_reader = csv.reader(api_use_file)\n last_date_used_unparsed, times_used_since_last_reset_unparsed = next(csv_reader)\n\n month, day, year, hour, minute = [int(item)\n for item in last_date_used_unparsed.split(\"/\")\n ]\n\n last_time_used = datetime.datetime(year, month, day, hour, minute)\n times_used_since_last_reset = int(times_used_since_last_reset_unparsed)\n\n current_time = datetime.datetime.now()\n\n time_since_last_use = current_time - last_time_used\n seconds_since_last_use = time_since_last_use.seconds\n\n # if it hasn't been ten minutes since the last time you used it\n if seconds_since_last_use < 460:\n # if it hasn't been used more than 8 times\n if times_used_since_last_reset < 9:\n # update last time use and times used\n times_used_since_last_reset += 1\n last_time_used = current_time\n print(\"You can use the api\")\n print(\"You have {} uses remaining and {} minutes before the reset\".format(\n 10 - times_used_since_last_reset, (460 - seconds_since_last_use) / 60.0\n ))\n update_tracker(last_time_used, times_used_since_last_reset)\n return True\n # if it has been used 8 times in the last ten minutes\n elif times_used_since_last_reset >= 9:\n print(\"Warning you have used the api {} times in 10 minutes.\".format(\n times_used_since_last_reset))\n return False\n # if it has been more than 9 minutes you are good to go\n elif seconds_since_last_use >= 460:\n # okay to use. reset current time and times used\n times_used_since_last_reset = 1\n last_time_used = current_time\n print(\"It's been more than 9 minutes since last use. You are good to go\")\n update_tracker(last_time_used, times_used_since_last_reset)\n return True" ]
[ "0.69934213", "0.6468223", "0.61024314", "0.6042686", "0.60205877", "0.59011203", "0.5773027", "0.5760536", "0.572008", "0.56760347", "0.56500137", "0.5561731", "0.53875303", "0.5348932", "0.53422016", "0.52225053", "0.5194737", "0.518642", "0.5163019", "0.51595706", "0.5144919", "0.5137498", "0.5135674", "0.5130989", "0.5099556", "0.50666046", "0.50641805", "0.50394046", "0.50122696", "0.50056875" ]
0.7706274
0
Apply Rate Limit Check how frequently the current user has run karma commands and, if they exceed a certain threshold (30 seconds) return False so they don't make any karma changes
def _apply_rate_limit(self): update_time = time() user_name = self.bot.user.full_name if user_name in self.tokens.keys(): last_change = self.tokens[user_name][0] # Add 1 token for every 30 seconds from the last change added_tokens = int((update_time - last_change) / 30) self.tokens[user_name][1] += added_tokens # Max at 5 self.tokens if self.tokens[user_name][1] > 5: self.tokens[user_name][1] = 5 else: # Initialize the users token pair (last change, # of self.tokens) self.tokens[user_name] = [update_time, 5] # Start with 5 self.tokens if self.tokens[user_name][1] <= 0: return False self.tokens[user_name][1] -= 1 return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True", "def _change_karma(self, name, change):\n can_change = self._apply_rate_limit()\n if not can_change: return False\n res = self.bot.db.execute('SELECT target, karma FROM karma')\n for target in res.fetchall():\n if target[0].lower() == name.lower():\n self.bot.db.execute('UPDATE karma SET karma = karma + ? WHERE target = ?', (change, target[0],))\n self.bot.db.commit()\n return True\n self.bot.db.execute(\"INSERT INTO karma (target, karma) VALUES(?, 0)\", (name,))\n self.bot.db.execute(\"UPDATE karma SET karma = karma + ? WHERE target = ?\", (change, name,))\n self.bot.db.commit()\n return True", "def customer_throttling_checked(request, user_input):\n user = request.user\n plan_monthly_requests = user.customer.plan.monthly_requests\n current_period_end_date = user.customer.current_period_end_date\n spare_requests = user.customer.spare_requests\n current_period_start_date = current_period_end_date - relativedelta(\n months=1\n )\n\n # Count all requests during the month with a 200 code\n # except requests already done by customer (eg same\n # domain name).\n api_calls_counter = ApiAccessLog.objects.filter(\n user=user,\n http_response_code=200,\n date__lte=current_period_end_date,\n date__gt=current_period_start_date\n ).exclude(\n user_input=user_input\n ).distinct('user_input').count()\n\n if api_calls_counter <= plan_monthly_requests + spare_requests:\n return True\n else:\n return False", "def _check_timeouts(self):\n\n expired_tokens = []\n for token in self._capability_timeouts:\n interval = datetime.utcnow() - self._capability_timeouts[token]\n if interval.total_seconds() >= 10:\n expired_tokens.append(token)\n\n for token in expired_tokens:\n cap_withdraw = mplane.model.Withdrawal(capability=self._capabilities[token])\n self.handle_message(cap_withdraw, self.identity_for(token))", "def _check_throttles_decorator(func):\n @wraps(func)\n def _decorated(*args, **kwargs):\n # Skip the throttle check entirely if we've disabled rate limiting.\n # Otherwise, perform the checks (as usual)\n if RateLimitConfiguration.current().enabled:\n return func(*args, **kwargs)\n else:\n msg = \"Rate limiting is disabled because `RateLimitConfiguration` is not enabled.\"\n LOGGER.info(msg)\n return\n\n return _decorated", "def _throttle_time(provider):\n if provider == 'nominatim':\n return 1\n else:\n return 0", "def testKarma(self):\n\n\t\t\t\tspinner.Synonym.objects.add('directory', 'catalog', 10, True)\n\t\t\t\tspinner.Synonym.objects.add('list', 'directory', 20, True)\n\t\t\t\tspinner.Synonym.objects.add('directory', 'guide', 10, True)\n\n\t\t\t\tsynonyms = spinner.Synonym.objects.get_synonyms(['directory'])[0]\n\t\t\t\t\n\t\t\t\tfor word in synonyms:\n\t\t\t\t\t\tif word.total_karma < 10:\n\t\t\t\t\t\t\t\tassert False, 'Karma was not recorded correctly'", "def check_attack(self):\n now = time.time() * 1000\n if self.prev_time is None:\n return True\n else:\n next_time = self.prev_time + self.get_recharge\n if now >= next_time:\n return True\n else:\n return False", "def can_execute(self, msg, command, now):\n if command not in self.user_limit:\n return True, 0\n expiry = self.user_limit[command].get(msg.author.id, 0)\n return now > expiry, expiry-now", "def check(self):\n logging.info(\"rate limit remaining %s\" % self.remaining)\n while self.remaining <= 1:\n now = time.time()\n logging.debug(\"rate limit < 1, now=%s and reset=%s\", now,\n self.reset)\n if self.reset and now < self.reset:\n # padded with 5 seconds just to be on the safe side\n secs = self.reset - now + 5\n logging.info(\"sleeping %s seconds for rate limiting\" % secs)\n time.sleep(secs)\n else:\n # sleep a second before checking again for new rate limit\n time.sleep(1)\n # get the latest limit\n self.ping()\n self.remaining -= 1", "def rate_limiting(cls):\n this_click_time = time.time()\n time_to_last_click = None\n if cls.last_click_time:\n time_to_last_click = this_click_time - cls.last_click_time\n cls.last_click_time = this_click_time\n return time_to_last_click and time_to_last_click < 0.7", "def _change_karma(self, nick, target, mode):\n if nick == target:\n return \"You can't modify your own karma.\"\n if target in self.karma and (datetime.datetime.now() -\n self.karma[target][2]).seconds < 5:\n return 'Karma spamming is prohibited.'\n if not target in self.karma:\n self.karma[target] = [0, 0, datetime.datetime.now()]\n if mode: # Increase\n self.karma[target][0] += 1\n else: # Decrease\n self.karma[target][1] += 1\n self.karma[target][2] = datetime.datetime.now()", "def _is_limited(request, rate, rl):\n def inner(*args, **kwargs):\n is_limited = rl.is_limited(*args, **kwargs)\n\n if is_limited:\n messages.error(\n request,\n _(\"Too many submissions, wait %(time)s.\") % {\n 'time': rate.split('/')[1]})\n\n return is_limited\n\n return inner", "def is_over_quota(conn, project_id, user_id):\r\n\r\n over_quota = False\r\n\r\n # Start by checking for user quota\r\n user_alarm_quota = cfg.CONF.alarm.user_alarm_quota\r\n if user_alarm_quota is not None:\r\n user_alarms = list(conn.get_alarms(user=user_id))\r\n over_quota = len(user_alarms) >= user_alarm_quota\r\n\r\n # If the user quota isn't reached, we check for the project quota\r\n if not over_quota:\r\n project_alarm_quota = cfg.CONF.alarm.project_alarm_quota\r\n if project_alarm_quota is not None:\r\n project_alarms = list(conn.get_alarms(project=project_id))\r\n over_quota = len(project_alarms) >= project_alarm_quota\r\n\r\n return over_quota", "def can_act(self) -> bool:\n return self.cooldown < 1", "def can_act(self) -> bool:\n return self.cooldown < 1", "def rate_limit(entity, limit, duration=60):\n\n return current_rate(entity, limit, duration) > limit", "def handle_rate_limit(rate_limit):\n remaining = rate_limit['remaining']\n limit = rate_limit['limit']\n percent_remaining = remaining / limit\n reset_at = rate_limit['resetAt']\n if percent_remaining < 0.15:\n reset_at = datetime.strptime(reset_at, '%Y-%m-%dT%H:%M:%SZ')\n current_time = datetime.now()\n time_diff = reset_at - current_time\n seconds = time_diff.total_seconds()\n\n print(f'Rate Limit hit. Waiting for reset.\\nProcess will continue at: {reset_at}')\n\n time.sleep(seconds)", "def check_timeout(flag: Callable, limit: float) -> bool:\n timed_out = False\n if HAS_SUPERVISOR:\n start = supervisor.ticks_ms()\n while not timed_out and not flag():\n if ticks_diff(supervisor.ticks_ms(), start) >= limit * 1000:\n timed_out = True\n else:\n start = time.monotonic()\n while not timed_out and not flag():\n if time.monotonic() - start >= limit:\n timed_out = True\n return timed_out", "def check(self):\n self.__check_request_limit()", "async def _user_update_threshold(self, user_config: dict):\n return 30.0 * user_config['backoff_factor']", "def should_be_throttled(self, identifier, **kwargs):\r\n key = self.convert_identifier_to_key(identifier)\r\n\r\n # Make sure something is there.\r\n cache.add(key, [])\r\n\r\n # Weed out anything older than the timeframe.\r\n minimum_time = int(time.time()) - int(self.timeframe)\r\n times_accessed = [\r\n access for access in cache.get(key) if access >= minimum_time]\r\n cache.set(key, times_accessed, self.expiration)\r\n\r\n if len(times_accessed) >= int(self.throttle_at):\r\n # Throttle them.\r\n return True\r\n\r\n # Let them through.\r\n return False", "def check_api_use_rate():\n with open('api_use.csv', 'r') as api_use_file:\n csv_reader = csv.reader(api_use_file)\n last_date_used_unparsed, times_used_since_last_reset_unparsed = next(csv_reader)\n\n month, day, year, hour, minute = [int(item)\n for item in last_date_used_unparsed.split(\"/\")\n ]\n\n last_time_used = datetime.datetime(year, month, day, hour, minute)\n times_used_since_last_reset = int(times_used_since_last_reset_unparsed)\n\n current_time = datetime.datetime.now()\n\n time_since_last_use = current_time - last_time_used\n seconds_since_last_use = time_since_last_use.seconds\n\n # if it hasn't been ten minutes since the last time you used it\n if seconds_since_last_use < 460:\n # if it hasn't been used more than 8 times\n if times_used_since_last_reset < 9:\n # update last time use and times used\n times_used_since_last_reset += 1\n last_time_used = current_time\n print(\"You can use the api\")\n print(\"You have {} uses remaining and {} minutes before the reset\".format(\n 10 - times_used_since_last_reset, (460 - seconds_since_last_use) / 60.0\n ))\n update_tracker(last_time_used, times_used_since_last_reset)\n return True\n # if it has been used 8 times in the last ten minutes\n elif times_used_since_last_reset >= 9:\n print(\"Warning you have used the api {} times in 10 minutes.\".format(\n times_used_since_last_reset))\n return False\n # if it has been more than 9 minutes you are good to go\n elif seconds_since_last_use >= 460:\n # okay to use. reset current time and times used\n times_used_since_last_reset = 1\n last_time_used = current_time\n print(\"It's been more than 9 minutes since last use. You are good to go\")\n update_tracker(last_time_used, times_used_since_last_reset)\n return True", "def canAct(self) -> bool:\n return self.cooldown < 1", "def check_engine_limits(current_rqmt, task):\n current_rqmt['time'] = min(168, current_rqmt.get('time', 1))\n return current_rqmt", "async def rate_limit(self, ctx):\n await ctx.send(\"We have found that the approximate rate limit is 30-40 requests per second. Staying \"\n \"below this should be safe.\")", "def should_be_throttled(self, identifier, **kwargs):\r\n return False", "def test_request_throttling_is_per_user(self):\n self.ensure_is_throttled(MockView, 200)", "def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_50():", "def verify_is_allowed(self):\n if (\n self.throttling_enabled\n and self.throttling_failure_count > 0\n and self.throttling_failure_timestamp is not None\n ):\n now = timezone.now()\n delay = (now - self.throttling_failure_timestamp).total_seconds()\n # Required delays should be 1, 2, 4, 8 ...\n delay_required = self.get_throttle_factor() * (\n 2 ** (self.throttling_failure_count - 1)\n )\n if delay < delay_required:\n return (\n False,\n {\n 'reason': VerifyNotAllowed.N_FAILED_ATTEMPTS,\n 'failure_count': self.throttling_failure_count,\n 'locked_until': self.throttling_failure_timestamp\n + timedelta(seconds=delay_required),\n },\n )\n\n return super().verify_is_allowed()" ]
[ "0.62723166", "0.61897874", "0.6130558", "0.594389", "0.5937632", "0.59163153", "0.5882192", "0.5846787", "0.5845753", "0.57633436", "0.57333547", "0.5716136", "0.57108897", "0.57000345", "0.5694648", "0.5694648", "0.56865287", "0.5670449", "0.56523293", "0.56270933", "0.561915", "0.5599575", "0.55987537", "0.5583821", "0.5579002", "0.5561336", "0.55582964", "0.55551726", "0.554963", "0.55457443" ]
0.65764576
0
Compute the total duty percentage for each purchase line. there is an extra duty for some vendors.If the 'extra_duty' field's value is true,then we use a duty perc(0.288 most probably) for each 144 qtys
def compute_total_customs_duty(self): for rec in self: total = 0.0 extra_duty = 0.0 price_total = rec.quantity * rec.unit_price # total = (price_total * duty_percentage)/100 rec.price_total = price_total # for hts in rec.hts_ids: # if hts.extra_duty_applicable: # extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty) # rec.total = total + extra_duty return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_duty_percentage(self):\n container_line_ids = self\n hbl_customs_obj = self.env['hbl.customs.duty']\n for line in container_line_ids:\n p_line = line.purchase_line\n #Get the supplier from product by using po supplier id.\n product_supplier_id = p_line.product_id.seller_ids.filtered(lambda rec:rec.name.id == p_line.partner_id.id and rec.hts_codes_ids)\n #Get HTS code of the supplier\n hts_codes_ids = product_supplier_id and product_supplier_id[0].hts_codes_ids or False\n if hts_codes_ids:\n percentage = sum(hts_codes_ids.mapped('percentage'))\n line_customs_id = hbl_customs_obj.create({'hbl_line_id' : line.id,\n 'hts_ids': [(6,_, hts_codes_ids.ids)],\n 'duty_percentage': percentage,\n 'quantity' : line.qty_to_load,\n 'unit_price' : p_line.price_unit\n })\n line.write({'line_customs_id' : line_customs_id.id})", "def compute_duty_factor():\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n print(np.sum(foot_l_contact)/len(foot_l_contact))\n print(np.sum(foot_r_contact)/len(foot_r_contact))\n\n return np.sum(foot_l_contact)/len(foot_l_contact)*0.5 + np.sum(foot_r_contact)/len(foot_r_contact)*0.5", "def total_discount_incl_tax(self):\n discount = D(\"0.00\")\n for line in self.lines.all():\n discount += line.discount_incl_tax\n return discount", "def compute_quotation_price(self):\n result = decimal.Decimal('0')\n if self.vehiculePrice:\n result = self.vehiculePrice * 2 / 100\n if self.covWind:\n result += get_coverage_price_by_name(\"WIND\")\n if self.covPass:\n result += get_coverage_price_by_name(\"PASS\")\n if self.covFlood:\n result += get_coverage_price_by_name(\"FLOOD\")\n return result", "def get_percentage(self):\n return self.PotTax_percentage", "def duty(self):\n if self._chanRMT < 0:\n return self._pin.duty()\n else:\n return self._dutyRMT", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def _compute_tax(self):\n for line in self:\n line.tax = (line.amount_untaxed * 14) / 100", "def _compute_amount(self):\n for line in self:\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n if(line.is_discount_allow and line.price_subtotal > 100):\n line.price_subtotal = line.price_subtotal - 100", "def _compute_amount(self):\n for line in self:\n price = line.price_unit\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty,\n product=line.product_id, partner=line.order_id.partner_shipping_id)\n self_price_subtotal = taxes['total_excluded']\n if not line.discount_fixed_percent:\n self_price_subtotal = self_price_subtotal\n if line.discount_fixed_percent == 'Percent':\n self_price_subtotal = self_price_subtotal * (1 - (line.discount or 0.0) / 100.0)\n if line.discount_fixed_percent == 'Fixed':\n self_price_subtotal = self_price_subtotal - line.discount_value\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': self_price_subtotal,\n })", "def patrimony_total(self):\n pass", "def get_percent_oxygen(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[1]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'po read error: {err}')\n return -1", "def represent_total_percent(self, length):\n numpkgs = self.totals['numpkgs']\n dlpkgs = self.totals['dlpkgs']\n return self.represent_percent(dlpkgs, numpkgs, length)", "def discount(self, period):\n\t\treturn 1.0/compound(period)", "def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp", "def duty(self, duty: Optional[int] = None) -> Optional[int]:\n ...", "def calculate_percent(self, total_number, some_number):\n\t\treturn (some_number * 100) / total_number", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))", "def calculate_total(self):\n if self.total_price == 0:\n for discount in self.discounts:\n for item in self.items:\n item.add_discount(discount)\n\n for item in self.items:\n self.total_price += item.final_price()\n\n return self.total_price", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def cash_coupon(certificate, percentage):\n return sum(stake for name, stake in certificate['underlyings'].items()) * percentage", "def pct_helper(self,k,d,total):\n if k in d:\n return 100.0*d[k]/total\n else:\n return -100.0", "def pct(self):\n\t\treturn self.bottle.pct()", "def duty_cycle(self):\n\n duty_cycle_ns = int(utils.readstr_all(os.path.join(_CHANNEL_PATH(self._chip,\n self._channel),\n 'duty_cycle')))\n if self.period > 0:\n return float(duty_cycle_ns / 1000.0 / float(self.period))\n else:\n return 0.0", "def get_total(self):\n\n self.base_price = self.get_base_price()\n\n if self.species == \"christmas melon\":\n self.base_price = self.base_price * 1.5\n\n total = (1 + self.tax) * self.qty * self.base_price\n return total", "def _compute_amount(self):\n for line in self:\n line.update({\n 'price_subtotal': line.price_unit * line.quantity,\n })", "def percent_usage(value, total):\n if total:\n return float(value) * 100.0 / (float(total) + float(value))\n else:\n return 100.0", "def get_total(self):\n\n base_price = 5\n \n if self.species == \"Christmas melon\":\n base_price = base_price * 1.5 \n\n total = (1 + self.tax) * self.qty * base_price \n\n if self.order_type == \"international\" and self.qty>10:\n total += 3\n\n\n return total", "def _total_d(self):\n debit = 0.0\n for l in self.data:\n debit += l['debit']\n self.t_credit += l['credit']\n self.t_balance += l['balance']\n return debit", "def _calc_line_base_price(self, cr, uid, line, context=None):\n return line.price_unit" ]
[ "0.7884601", "0.5777692", "0.56619155", "0.5644612", "0.5609361", "0.5539036", "0.54949725", "0.54502136", "0.5393716", "0.5389697", "0.53487086", "0.52970153", "0.5285339", "0.5277346", "0.5261509", "0.5249308", "0.5240147", "0.5224184", "0.52226484", "0.5192483", "0.5178316", "0.51681334", "0.51609814", "0.51603734", "0.51574063", "0.5150745", "0.51372087", "0.5131451", "0.51263195", "0.5117128" ]
0.78766584
1
Get information related to a specific Smart Group. Get information related to a specific Smart Group.
def get_by_id( self, smart_group_id, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.get_by_id.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1), 'smartGroupId': self._serialize.url("smart_group_id", smart_group_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) header_dict = {} deserialized = None if response.status_code == 200: deserialized = self._deserialize('SmartGroup', response) header_dict = { 'x-ms-request-id': 'str', } if raw: client_raw_response = ClientRawResponse(deserialized, response) client_raw_response.add_headers(header_dict) return client_raw_response return deserialized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(isamAppliance, id, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving group\", \"/sysaccount/groups/{0}/v1\".format(id))", "def show_group(self, group_id):\n\n return Client._get(self, id=group_id)", "def get_group_details(self, group_id):\n url = self.groups_url + \"/\" + group_id\n return requests.get(url, headers=self.headers)", "def get_identity_group(self, group):\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.identity.identitygroup.1.0+xml'})\n\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tresp = self.ise.get('{0}/config/identitygroup?filter=name.EQ.{1}'.format(self.url_base, group))\n\t\tfound_group = ERS._to_json(resp.text)\n\n\t\tif found_group['ns3:searchResult']['@total'] == '1':\n\t\t\tresp = self.ise.get('{0}/config/identitygroup/{1}'.format(\n\t\t\t\t\tself.url_base, found_group['ns3:searchResult']['ns3:resources']['ns5:resource']['@id']))\n\t\t\tif resp.status_code == 200:\n\t\t\t\tresult['success'] = True\n\t\t\t\tresult['response'] = ERS._to_json(resp.text)['ns4:identitygroup']\n\t\t\t\treturn result\n\t\t\telif resp.status_code == 404:\n\t\t\t\tresult['response'] = '{0} not found'.format(group)\n\t\t\t\tresult['error'] = resp.status_code\n\t\t\t\treturn result\n\t\t\telse:\n\t\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\t\tresult['error'] = resp.status_code\n\t\t\t\treturn result\n\t\telif found_group['ns3:searchResult']['@total'] == '0':\n\t\t\tresult['response'] = '{0} not found'.format(group)\n\t\t\tresult['error'] = 404\n\t\t\treturn result\n\n\t\telse:\n\t\t\tresult['response'] = '{0} not found'.format(group)\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result", "def show_security_group(self, security_group, **_params):\r\n return self.get(self.security_group_path % (security_group),\r\n params=_params)", "def get_group(tkn: Token = Depends(from_authotization_header_nondyn),):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return [\n GetGroupShortOut(group_id=str(grp.pk), group_name=grp.group_name)\n for grp in Group.objects().order_by(\"group_name\")\n ]", "def get_group(group):\n\n return ldapi.lookup(ld, 'cn', group, cfg['ldap_groups_base'])", "def customer_group_get(group_id=None):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n\n if group_id:\n query += \"\"\"\n AND `group_id` = \\\"%s\\\"\n \"\"\" % (group_id)\n\n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def get(self):\n self._group = self._client.get(\n url=self._client.get_full_url(\n self.get_path(\n 'single', realm=self._realm_name, group_id=self._group_id\n )\n )\n )\n self._group_id = self._group[\"id\"]\n return self._group", "def get_group(self, group_id: str):\n\n return self._get(f\"cloudConnectorGroups/{group_id}\")", "def get_groups_details(self, groups):\n assert isinstance(groups, list)\n # It may be require we request the API by splitting the names list\n # If the list is too long to be handled by the Gerrit server (URI)\n query_args = \"?%s\" % \"&\".join([\"q=%s\" % g for g in groups])\n query_args += \"&o=MEMBERS\" if groups else \"o=MEMBERS\"\n\n try:\n ret = self.g.get('groups/%s' % query_args)\n except HTTPError as e:\n return self._manage_errors(e)\n\n return ret", "def get_group_info(groupname):\n return jsonify(admin.get_group_info(current_app.scoped_session(), groupname))", "def findGroup(show, group):\n return Group(Cuebot.getStub('group').FindGroup(\n job_pb2.GroupFindGroupRequest(show=show, name=group), timeout=Cuebot.Timeout).group)", "def get_group(self, group_name):\n\n return self._group[group_name]", "def getGroup(self, group_id: int) -> 'Group':\n return self.sObj.getGroup(group_id)", "def cli(ctx, group_id):\n return ctx.gi.groups.show_group(group_id)", "def getGroup(self):\n\t\treturn self.Group", "def customer_group_get_related(group_id):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `groups`.`company_name` = (\n SELECT `asshole`.`company_name` \n FROM \n (\n SELECT * \n FROM `groups` \n WHERE `group_id` = \"%s\"\n ) AS `asshole`\n )\n \"\"\" %(group_id)\n \n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def get(cls, group_id, db_session=None):\n db_session = get_db_session(db_session)\n return db_session.query(cls.model).get(group_id)", "def get_group(self, group_id: str) -> dict:\n group = self.ms_client.http_request(method='GET', url_suffix=f'groups/{group_id}')\n return group", "def show( self, trans, encoded_id, **kwd ):\n decoded_id = trans.security.decode_id( encoded_id )\n group = self.group_manager.get( trans, decoded_id )\n if group is None:\n raise ObjectNotFound( 'Unable to locate group record for id %s.' % ( str( encoded_id ) ) )\n return self._populate( trans, group )", "def getStationGroupDetails(self, sgID, *stationID):\n if not stationID:\n response = self._soap_service.getStationGroupDetails(sgID)\n else:\n response = self._soap_service.getStationGroupDetails(sgID, stationID)\n\n return CPAPIResponse(response)", "def get(self, group_id):\n return self._get(\"/consistencygroups/%s\" % group_id,\n \"consistencygroup\")", "def get_group(self, group_id):\n return self.root.get(group_id)", "def get(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('GET', url)", "def get_device_group(self, device_group_oid):\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.network.networkdevicegroup.1.0+xml'})\n\n\t\tresp = self.ise.get('{0}/config/networkdevicegroup/{1}'.format(self.url_base, device_group_oid))\n\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tif resp.status_code == 200:\n\t\t\tresult['success'] = True\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns4:networkdevicegroup']\n\t\t\treturn result\n\t\telif resp.status_code == 404:\n\t\t\tresult['response'] = '{0} not found'.format(device_group_oid)\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result", "def get_service_group(servicegroup=None, vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"service-group/entry[@name='{}']\".format(vsys, servicegroup)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def get_group(self, group_path=None):\n if group_path is not None:\n path = '/group/' + group_path\n else:\n path = '/group/%2F'\n try:\n response = self.__session.get(self.__api_base_url + path)\n response.raise_for_status()\n response = response.json()\n except (requests.HTTPError, requests.ConnectionError), error:\n raise Exception(error.message)\n\n return response", "def GetGroup(self, group, reason=None):\n query = []\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_GET,\n \"/%s/groups/%s\" % (GANETI_RAPI_VERSION, group),\n query, None)", "def get(self):\n status = ErrorCode.SUCCESS\n try:\n res = []\n cid = self.get_argument('cid', None)\n if not (cid is None):\n res = QueryHelper.get_groups_by_cid(cid, self.db)\n self.write_ret(status,\n dict_=DotDict(res=res))\n except Exception as e:\n logging.exception(\"[UWEB] Get groups failed. Exception: %s\",\n e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)" ]
[ "0.6050466", "0.604989", "0.60478085", "0.60230494", "0.60096216", "0.60058165", "0.5988405", "0.59130573", "0.59111917", "0.58686566", "0.5820906", "0.58130544", "0.5768425", "0.575321", "0.57311463", "0.57198733", "0.57059264", "0.56633234", "0.5633848", "0.5626244", "0.5617494", "0.5574675", "0.55746263", "0.5550753", "0.55360246", "0.5533833", "0.55138415", "0.5512723", "0.55093664", "0.5488538" ]
0.6301686
0
Get the history a smart group, which captures any Smart Group state changes (New/Acknowledged/Closed) .
def get_history( self, smart_group_id, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.get_history.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', min_length=1), 'smartGroupId': self._serialize.url("smart_group_id", smart_group_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('SmartGroupModification', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network_history(request, SPIC_group, SPIC_id):\n SPIC_obj = get_object_or_404(SPIC, group=SPIC_group, local_id=SPIC_id)\n network_list = Network.objects.filter(user_id=request.user.pk, SPIC=SPIC_obj).order_by(\"-local_id\")\n\n return {'SPIC_obj': SPIC_obj, 'network_list': network_list}", "def get_history(self):\r\n\r\n return self.board_history", "def history(self):\n return _spacegrant_swig.general_burster_2_sptr_history(self)", "def history(self):\n return _spacegrant_swig.message_debug_sptr_history(self)", "def getChanges():", "def history(self):\n return self.board.history", "def test_get_sync_history(self):\n pass", "def QueryHistory(self):\n return []", "def orders_history(self): \n return(self._d_orders['history'])", "def get_history(hdr):\n return hdr['HISTORY']", "def history(self):\n return _spacegrant_swig.udp_debug_sptr_history(self)", "def history(self, q=None):\r\n q = q or []\r\n # allow history to be returned for deleted alarms, but scope changes\r\n # returned to those carried out on behalf of the auth'd tenant, to\r\n # avoid inappropriate cross-tenant visibility of alarm history\r\n auth_project = acl.get_limited_to_project(pecan.request.headers)\r\n conn = pecan.request.alarm_storage_conn\r\n kwargs = _query_to_kwargs(q, conn.get_alarm_changes, ['on_behalf_of',\r\n 'alarm_id'])\r\n return [AlarmChange.from_db_model(ac)\r\n for ac in conn.get_alarm_changes(self._id, auth_project,\r\n **kwargs)]", "def get_history(self, clocked: 'Clocked'):\n history = {}\n\n new_tick = self._get_new_tick(clocked)\n\n vclock_history = attributes.get_history(clocked, 'vclock')\n is_vclock_unchanged = (vclock_history.unchanged and\n new_tick == vclock_history.unchanged[0])\n\n for prop in self.history_models.keys():\n value = self._get_prop_value(clocked, prop)\n\n if value is not NOT_FOUND_SENTINEL:\n history[prop] = value\n\n return history, is_vclock_unchanged", "def get_history(self):\n return self.history", "def get_metric_history(self, metric):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/managedfolders/%s/metrics/history\" % (self.project_key, self.odb_id),\n params={'metricLookup' : metric if isinstance(metric, str) or isinstance(metric, unicode) else json.dumps(metric)})", "def groupsChanged(self):\n # Get the list of groups for the present user according to\n # the checklist.\n nglist = []\n for r in self.liststore:\n if (r[1] and (r[0] != self.gidnm)):\n nglist.append(r[0])\n if (gui.getUserGroups(gui.currentUser) != nglist):\n return nglist\n else:\n return None", "def history():", "def history(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_history(self)", "def History(self):\n return self.historydict.get('history', [])", "def history(self):\n return self.info['history']", "def history(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_history(self)", "def history(self):\n return _spacegrant_swig.binary_sink_sptr_history(self)", "def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))", "def history(self):\n return self._history", "def history(self):\n return self._history", "def get_monitored_changes(self) -> List:\n pass", "def history(self):\n return _spacegrant_swig.hdlc_framer_sptr_history(self)", "def get_history(self):\n msg_ids = self._records.keys()\n # Remove any that do not have a submitted timestamp.\n # This is extremely unlikely to happen,\n # but it seems to come up in some tests on VMs.\n msg_ids = [m for m in msg_ids if self._records[m]['submitted'] is not None]\n return sorted(msg_ids, key=lambda m: self._records[m]['submitted'])", "def history(self, maxresults=None, mindate=None):\n server = self._server._server.resource(self._server.name).connect()\n return server.history(maxresults=maxresults, mindate=mindate,\n accountID=self._server.accountID, librarySectionID=self.sectionKey)", "def get_order_history(self):\n return self.__call__('orders', 'getorderhistory')" ]
[ "0.59609723", "0.59375453", "0.5827547", "0.5729176", "0.568817", "0.5650969", "0.56454545", "0.563896", "0.5626192", "0.5599211", "0.55906564", "0.5589627", "0.55577046", "0.5528631", "0.55192196", "0.54704773", "0.54695994", "0.5442508", "0.5441003", "0.5439692", "0.54348654", "0.5428132", "0.5400182", "0.538492", "0.538492", "0.53844345", "0.5377751", "0.5374185", "0.53739965", "0.53218997" ]
0.6507963
0
Attenuators out, beamstop in,
def alignment_stop(): smi = SMI_Beamline() yield from smi.modeMeasurement() proposal_id('2023_2', '311564_Pettersson')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def beam_align():\n\n # do nothing if there is a sample mounted to avoid collisions\n if smart_magnet.sample_detect.get() == 0:\n raise Exception(\"Sample mounted on gonio! Avoided collision\")\n\n # wait for attenuators to finish moving\n yield from bps.abs_set(mxatten, 0.002)\n yield from bps.sleep(5)\n\n # transition to BL and open shutter\n yield from bps.abs_set(gov_rbt, \"BL\", wait=True)\n yield from bps.mv(sht.r, 0)\n\n yield from bps.abs_set(rot_aligner.cam_hi.cam_mode, \"beam_align\")\n\n # which direction, x pos. pitch beam outboard (-), y pos. pitch beam up (+)\n scan_uid = yield from bp.count([rot_aligner.cam_hi], 1)\n centroid_x, centroid_y = (\n db[scan_uid].table()[rot_aligner.cam_hi.cv1.outputs.output1.name][1],\n db[scan_uid].table()[rot_aligner.cam_hi.cv1.outputs.output2.name][1],\n )\n\n if np.isclose(0, centroid_x) or np.isclose(0, centroid_y):\n raise Exception(\"No centroid detected!\")\n\n yield from bps.abs_set(kbt.hor.delta_px, (centroid_x - 320))\n yield from bps.abs_set(kbt.ver.delta_px, -(centroid_y - 256))\n\n def lin_reg(independent, dependent, goal, **kwargs) -> float:\n b = dependent\n A = np.matrix([[pos, 1] for pos in independent])\n p = (\n np.linalg.inv(A.transpose() * A)\n * A.transpose()\n * np.matrix(b.to_numpy()).transpose()\n )\n best = (goal - p[1]) / p[0]\n return best\n\n for axis, signal, center in (\n kbt.hor,\n rot_aligner.cam_hi.cv1.outputs.output1,\n 320,\n ), (kbt.ver, rot_aligner.cam_hi.cv1.outputs.output2, 256):\n # skip if we are within 1 um\n if abs(axis.delta_px.get()) > 2:\n scan_uid = yield from rel_scan_no_reset(\n [rot_aligner.cam_hi],\n axis,\n 0,\n 0.4 * -(axis.delta_px.get() / abs(axis.delta_px.get())),\n 10,\n )\n scan_df = db[scan_uid].table()\n best_voltage = lin_reg(\n scan_df[axis.readback.name],\n scan_df[signal.name],\n center,\n )\n yield from bps.mv(axis, best_voltage)\n yield from bps.sleep(1)\n\n # close shutters and reset attenuators for manual viewing\n yield from bps.mv(sht.r, 20)", "def __init__(self, motion, **kwargs):\n super(ShiftOut, self).__init__(motion)", "def forward_att(self, eouts, elens, ys, trigger_points=None):\n losses_auxiliary = {}\n ys_in, ys_out, ylens = append_sos_eos(ys, self.eos, self.eos, self.pad, self.device, self.bwd)\n if not self.training:\n self.data_dict['elens'] = tensor2np(elens)\n self.data_dict['ylens'] = tensor2np(ylens)\n self.data_dict['ys'] = tensor2np(ys_out)\n bs, ymax = ys_in.size()[:2]\n tgt_mask = (ys_out != self.pad).unsqueeze(1).repeat([1, ymax, 1])\n causal_mask = tgt_mask.new_ones(ymax, ymax, dtype=tgt_mask.dtype)\n causal_mask = torch.tril(causal_mask).unsqueeze(0)\n tgt_mask = tgt_mask & causal_mask\n src_mask = make_pad_mask(elens).unsqueeze(1).repeat([1, ymax, 1])\n if self.attn_type == 'mocha':\n attn_mask = (ys_out != self.pad).unsqueeze(1).unsqueeze(3)\n else:\n attn_mask = None\n lmout = None\n if self.lm is not None:\n self.lm.eval()\n with torch.no_grad():\n lmout, lmstate, _ = self.lm.predict(ys_in, None)\n lmout = self.lm_output_proj(lmout)\n out = self.pos_enc(self.embed_token_id(ys_in), scale=True)\n xy_aws_layers = []\n xy_aws = None\n for lth, layer in enumerate(self.layers):\n out = layer(out, tgt_mask, eouts, src_mask, mode='parallel', lmout=lmout)\n xy_aws = layer.xy_aws\n if xy_aws is not None and self.attn_type == 'mocha':\n xy_aws_masked = xy_aws.masked_fill_(attn_mask.expand_as(xy_aws) == 0, 0)\n xy_aws_layers.append(xy_aws_masked.clone())\n if not self.training:\n self.aws_dict['yy_aws_layer%d' % lth] = tensor2np(layer.yy_aws)\n self.aws_dict['xy_aws_layer%d' % lth] = tensor2np(layer.xy_aws)\n self.aws_dict['xy_aws_beta_layer%d' % lth] = tensor2np(layer.xy_aws_beta)\n self.aws_dict['xy_aws_p_choose%d' % lth] = tensor2np(layer.xy_aws_p_choose)\n self.aws_dict['yy_aws_lm_layer%d' % lth] = tensor2np(layer.yy_aws_lm)\n logits = self.output(self.norm_out(out))\n loss, ppl = cross_entropy_lsm(logits, ys_out, self.lsm_prob, self.pad, self.training)\n losses_auxiliary['loss_quantity'] = 0.0\n if self.attn_type == 'mocha':\n n_tokens_ref = tgt_mask[:, -1, :].sum(1).float()\n n_tokens_pred = sum([torch.abs(aws.sum(3).sum(2).sum(1) / aws.size(1)) for aws in xy_aws_layers])\n n_tokens_pred /= len(xy_aws_layers)\n losses_auxiliary['loss_quantity'] = torch.mean(torch.abs(n_tokens_pred - n_tokens_ref))\n acc = compute_accuracy(logits, ys_out, self.pad)\n return loss, acc, ppl, losses_auxiliary", "def arm(self):\n pass", "def beam_update(self, beams, extra):\n h_i, dec_out, context, src_mask = extra\n h_i = tuple(hc[:, beams, :] for hc in h_i)\n dec_out = dec_out[beams, :]\n return h_i, dec_out, context, src_mask", "def stop(self):\n self.ae.stop()", "def tomoScan(description, inBeamPosition, outOfBeamPosition, exposureTime=1., start=0., stop=180., step=0.1, darkFieldInterval=0, flatFieldInterval=0,\n imagesPerDark=10, imagesPerFlat=10, optimizeBeamInterval=0, pattern=\"default\", tomoRotationAxis=0, addNXEntry=True, autoAnalyse=True, additionalScannables=[]):\n dataFormat = LocalProperties.get(\"gda.data.scan.datawriter.dataFormat\")\n try:\n darkFieldInterval = int(darkFieldInterval)\n flatFieldInterval = int(flatFieldInterval)\n optimizeBeamInterval = int(optimizeBeamInterval)\n \n jns = beamline_parameters.JythonNameSpaceMapping(InterfaceProvider.getJythonNamespace())\n tomography_theta = jns.tomography_theta\n if tomography_theta is None:\n raise NameError(\"tomography_theta is not defined in Jython namespace\")\n tomography_shutter = jns.tomography_shutter\n if tomography_shutter is None:\n raise NameError(\"tomography_shutter is not defined in Jython namespace\")\n tomography_translation = jns.tomography_translation\n if tomography_translation is None:\n raise NameError(\"tomography_translation is not defined in Jython namespace\")\n \n tomography_detector = jns.tomography_detector\n if tomography_detector is None:\n raise NameError(\"tomography_detector is not defined in Jython namespace\")\n\n tomography_optimizer = jns.tomography_optimizer\n if tomography_optimizer is None:\n raise NameError(\"tomography_optimizer is not defined in Jython namespace\")\n\n tomography_time = jns.tomography_time\n if tomography_time is None:\n raise NameError(\"tomography_time is not defined in Jython namespace\")\n \n tomography_beammonitor = jns.tomography_beammonitor\n if tomography_beammonitor is None:\n raise NameError(\"tomography_beammonitor is not defined in Jython namespace\")\n \n tomography_camera_stage = jns.tomography_camera_stage\n if tomography_camera_stage is None:\n raise NameError(\"tomography_camera_stage is not defined in Jython namespace\")\n \n tomography_sample_stage = jns.tomography_sample_stage\n if tomography_sample_stage is None:\n raise NameError(\"tomography_sample_stage is not defined in Jython namespace\")\n \n tomo_additional_scannables = jns.tomography_additional_scannables\n if tomo_additional_scannables is None:\n raise NameError(\"tomo_additional_scannables is not defined in Jython namespace\")\n \n index = SimpleScannable()\n index.setCurrentPosition(0.0)\n index.setInputNames([\"imageNumber\"])\n index.setName(\"imageNumber\")\n index.configure()\n \n image_key = SimpleScannable()\n image_key.setCurrentPosition(0.0)\n image_key.setInputNames([\"image_key\"])\n image_key.setName(\"image_key\")\n image_key.configure()\n\n tomoScanDevice = make_tomoScanDevice(tomography_theta, tomography_shutter,\n tomography_translation, tomography_optimizer, image_key, index)\n\n# return tomoScanDevice\n #generate list of positions\n numberSteps = ScannableUtils.getNumberSteps(tomography_theta, start, stop, step)\n theta_points = []\n theta_points.append(start)\n previousPoint = start\n for i in range(numberSteps):\n nextPoint = ScannableUtils.calculateNextPoint(previousPoint, step);\n theta_points.append(nextPoint)\n previousPoint = nextPoint\n \n #generateScanPoints\n optimizeBeamNo = 0\n optimizeBeamYes = 1\n shutterOpen = 1\n shutterClosed = 0\n shutterNoChange = 2\n scan_points = []\n theta_pos = theta_points[0]\n index = 0\n #Added shutterNoChange state for the shutter. The scan points are added using the (pseudo) ternary operator, \n #if index is 0 then the shutterPosition is added to the scan point, else shutterNoChange is added to scan points.\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark\n index = index + 1\n \n for i in range(imagesPerFlat): \n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat\n index = index + 1 \n scan_points.append((theta_pos, shutterOpen, inBeamPosition, optimizeBeamNo, image_key_project, index)) #first\n index = index + 1 \n imageSinceDark = 1\n imageSinceFlat = 1\n optimizeBeam = 0\n for i in range(numberSteps):\n theta_pos = theta_points[i + 1]\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_project, index))#main image\n index = index + 1 \n \n imageSinceFlat = imageSinceFlat + 1\n if imageSinceFlat == flatFieldInterval and flatFieldInterval != 0:\n for i in range(imagesPerFlat):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index))\n index = index + 1 \n imageSinceFlat = 0\n \n imageSinceDark = imageSinceDark + 1\n if imageSinceDark == darkFieldInterval and darkFieldInterval != 0:\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index))\n index = index + 1 \n imageSinceDark = 0\n\n optimizeBeam = optimizeBeam + 1\n if optimizeBeam == optimizeBeamInterval and optimizeBeamInterval != 0:\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamYes, image_key_project, index))\n index = index + 1 \n optimizeBeam = 0\n \n #add dark and flat only if not done in last steps\n if imageSinceFlat != 0:\n for i in range(imagesPerFlat):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat\n index = index + 1\n if imageSinceDark != 0:\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark\n index = index + 1 \n scan_points1 = generateScanPoints(inBeamPosition, outOfBeamPosition, theta_points, darkFieldInterval, flatFieldInterval,\n imagesPerDark, imagesPerFlat, optimizeBeamInterval, pattern=pattern)\n if pattern == 'default' or pattern == 'DFPFD':\n i = 0\n for pt1 in scan_points1:\n pt = scan_points[i]\n if pt1 != pt:\n print \"Mismatch - please tell Kaz about your scan and its arguments!\"\n print \"i = \", i\n print \"pt = \", pt\n print \"pt1 = \", pt1\n i += 1\n #return None\n positionProvider = tomoScan_positions(start, stop, step, darkFieldInterval, imagesPerDark, flatFieldInterval, imagesPerFlat, \\\n inBeamPosition, outOfBeamPosition, optimizeBeamInterval, scan_points) \n scan_args = [tomoScanDevice, positionProvider, tomography_time, tomography_beammonitor, tomography_detector, exposureTime, tomography_camera_stage, tomography_sample_stage]\n #scan_args.append(RotationAxisScannable(\"approxCOR\", tomoRotationAxis))\n #meta_add(RotationAxisScannable(\"approxCOR\", tomoRotationAxis))\n #meta_add(\"RotationCoord_as_list\", [tomoRotationAxis])\n meta_add(\"approxCOR\", tomoRotationAxis)\n for scannable in additionalScannables:\n scan_args.append(scannable)\n for scannable in tomo_additional_scannables:\n scan_args.append(scannable)\n ''' setting the description provided as the title'''\n if not description == None: \n setTitle(description)\n else :\n setTitle(\"undefined\")\n \n dataFormat = LocalProperties.get(\"gda.data.scan.datawriter.dataFormat\")\n if not dataFormat == \"NexusDataWriter\":\n handle_messages.simpleLog(\"Data format inconsistent. Setting 'gda.data.scan.datawriter.dataFormat' to 'NexusDataWriter'\")\n LocalProperties.set(\"gda.data.scan.datawriter.dataFormat\", \"NexusDataWriter\")\n scanObject = createConcurrentScan(scan_args)\n if addNXEntry:\n addNXTomoSubentry(scanObject, tomography_detector.name, tomography_theta.name)\n scanObject.runScan()\n if autoAnalyse:\n lsdp=jns.lastScanDataPoint()\n OSCommandRunner.runNoWait([\"/dls_sw/apps/tomopy/tomopy/bin/gda/tomo_at_scan_end_kz\", lsdp.currentFilename], OSCommandRunner.LOGOPTION.ALWAYS, None)\n return scanObject;\n except InterruptedException:\n exceptionType, exception, traceback = sys.exc_info()\n handle_messages.log(None, \"User interrupted the scan\", exceptionType, exception, traceback, False)\n raise InterruptedException(\"User interrupted the scan\")\n except:\n exceptionType, exception, traceback = sys.exc_info()\n handle_messages.log(None, \"Error during tomography scan\", exceptionType, exception, traceback, False)\n raise Exception(\"Error during tomography scan\", exception)\n finally:\n handle_messages.simpleLog(\"Data Format reset to the original setting: \" + dataFormat)\n LocalProperties.set(\"gda.data.scan.datawriter.dataFormat\", dataFormat)", "def stop_beam(self, wait=False):\n if self.SE == 6:\n self.evr.polarity.put('VAL', 0, use_complete=True)\n \n self.records.S_CLOSE.put('VAL', 1, use_complete=True, wait=wait)", "def enemy_waves(self):\n\n pass", "def control_law( self, inputs ):", "def stop():", "def stop():", "def stop():", "def stop():", "def test2(self):\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.initialize()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.sweepDuckie()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieSuctionOn()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLiftEE()\r\n\t\tself._motion.terminate()", "def act(self, goal):\n rospy.loginfo(\"Emergency action received goal: \"+str(goal))\n\n\n sam_publisher = rospy.Publisher('/sam_auv_1/thrusters/0/input',\n FloatStamped,\n queue_size = 100)\n\n # sam_publisher = rospy.Publisher('/uavcan_vbs_command/',\n # PercentStamped,\n # queue_size = 100)\n\n #self.emergency_activated = False\n #sam_sub = rospy.Subscriber('/sam_auv_1/emergency_butt', Bool, self.emergency_cb)\n\n while not rospy.is_shutdown() and not self.done_once:\n time.sleep(0.5)\n #rospy.loginfo('Emergency waiting for butt')\n #if self.emergency_activated:\n # rospy.loginfo('Butt activated')\n start_time = rospy.get_time()\n elapsed = 0\n rospy.loginfo('Emergency action active...')\n while elapsed < 3:\n if rospy.is_shutdown():\n break\n\n elapsed = rospy.get_time() - start_time\n #fs = FloatStamped()\n fs = PercentStamped()\n h = Header()\n fs.header = h\n fs.data = 0\n sam_publisher.publish(fs)\n time.sleep(0.1)\n # we are done doing the action succesfully\n self.done_once = True\n rospy.loginfo('Emergency action SUCCESS')\n return True\n\n # something went wrong, we fucked up\n rospy.loginfo('Emergency action FAILURE '+ str(self.done_once))\n return False", "def Advance():\n warp.step()", "def idealOpAmp():", "def capture_stop(self):\n pass", "def stop(self):\n self.stopAngMovementAll()\n self.stopMovementAll()", "def at_aflight(seq, flag):\n at(\"AFLIGHT\", seq, [flag])", "def eStop(self):\n Step(speed=0, coils=1, steps=0, dir=Step.BRAKE)\n # #######################################################\n # Need to blink Stop and wait until Stop is pressed again\n # #######################################################", "def stop_step_sweep(self):\n self.write(\":SOUR:SWE:CONT:STAT OFF\")", "def end_meassuring(self):\n self.enabler = 0\n #self.t.join()\n return 1", "def forward_att(self, eouts, elens, ys, return_logits=False, teacher_logits=None, ctc_trigger_points=None, forced_trigger_points=None):\n bs, xmax = eouts.size()[:2]\n device = eouts.device\n ys_in, ys_out, ylens = append_sos_eos(ys, self.eos, self.eos, self.pad, eouts.device, self.bwd)\n ymax = ys_in.size(1)\n if forced_trigger_points is not None:\n for b in range(bs):\n forced_trigger_points[b, ylens[b] - 1] = elens[b] - 1\n dstates = self.zero_state(bs)\n if self.training:\n if self.discourse_aware and not self._new_session:\n dstates = {'dstate': (self.dstate_prev['hxs'], self.dstate_prev['cxs'])}\n self.dstate_prev = {'hxs': [None] * bs, 'cxs': [None] * bs}\n self._new_session = False\n cv = eouts.new_zeros(bs, 1, self.enc_n_units)\n self.score.reset()\n aw, aws = None, []\n betas, p_chooses = [], []\n lmout, lmstate = None, None\n ys_emb = self.embed_token_id(ys_in)\n src_mask = make_pad_mask(elens).unsqueeze(1)\n tgt_mask = (ys_out != self.pad).unsqueeze(2)\n logits = []\n for i in range(ymax):\n is_sample = i > 0 and self._ss_prob > 0 and random.random() < self._ss_prob\n if self.lm is not None:\n self.lm.eval()\n with torch.no_grad():\n y_lm = self.output(logits[-1]).detach().argmax(-1) if is_sample else ys_in[:, i:i + 1]\n lmout, lmstate, _ = self.lm.predict(y_lm, lmstate)\n y_emb = self.embed_token_id(self.output(logits[-1]).detach().argmax(-1)) if is_sample else ys_emb[:, i:i + 1]\n dstates, cv, aw, attn_state, attn_v = self.decode_step(eouts, dstates, cv, y_emb, src_mask, aw, lmout, mode='parallel', trigger_points=forced_trigger_points[:, i:i + 1] if forced_trigger_points is not None else None)\n logits.append(attn_v)\n aws.append(aw)\n if attn_state.get('beta', None) is not None:\n betas.append(attn_state['beta'])\n if attn_state.get('p_choose', None) is not None:\n p_chooses.append(attn_state['p_choose'])\n if self.attn_type in ['gmm', 'sagmm']:\n aw = attn_state['myu']\n if self.training and self.discourse_aware:\n for b in [b for b, ylen in enumerate(ylens.tolist()) if i == ylen - 1]:\n self.dstate_prev['hxs'][b] = dstates['dstate'][0][:, b:b + 1].detach()\n self.dstate_prev['cxs'][b] = dstates['dstate'][1][:, b:b + 1].detach()\n if self.training and self.discourse_aware:\n if bs > 1:\n self.dstate_prev['hxs'] = torch.cat(self.dstate_prev['hxs'], dim=1)\n self.dstate_prev['cxs'] = torch.cat(self.dstate_prev['cxs'], dim=1)\n else:\n self.dstate_prev['hxs'] = self.dstate_prev['hxs'][0]\n self.dstate_prev['cxs'] = self.dstate_prev['cxs'][0]\n logits = self.output(torch.cat(logits, dim=1))\n if return_logits:\n return logits\n loss, ppl = cross_entropy_lsm(logits, ys_out, self.lsm_prob, self.pad, self.training)\n acc = compute_accuracy(logits, ys_out, self.pad)\n aws = torch.cat(aws, dim=2)\n if not self.training:\n self.data_dict['elens'] = tensor2np(elens)\n self.data_dict['ylens'] = tensor2np(ylens)\n self.data_dict['ys'] = tensor2np(ys_out)\n self.aws_dict['xy_aws'] = tensor2np(aws)\n if len(betas) > 0:\n self.aws_dict['xy_aws_beta'] = tensor2np(torch.cat(betas, dim=2))\n if len(p_chooses) > 0:\n self.aws_dict['xy_p_choose'] = tensor2np(torch.cat(p_chooses, dim=2))\n if self.attn_type == 'mocha' or (ctc_trigger_points is not None or forced_trigger_points is not None):\n aws = aws.masked_fill_(tgt_mask.unsqueeze(1).expand_as(aws) == 0, 0)\n loss_quantity = 0.0\n if self.attn_type == 'mocha':\n n_tokens_pred = aws.sum(3).sum(2).sum(1) / aws.size(1)\n n_tokens_ref = tgt_mask.squeeze(2).sum(1).float()\n loss_quantity = torch.mean(torch.abs(n_tokens_pred - n_tokens_ref))\n loss_latency = 0.0\n if self.latency_metric == 'interval':\n assert ctc_trigger_points is None\n assert aws.size(1) == 1\n aws_prev = torch.cat([aws.new_zeros(aws.size())[:, :, -1:], aws.clone()[:, :, :-1]], dim=2)\n aws_mat = aws_prev.unsqueeze(3) * aws.unsqueeze(4)\n delay_mat = aws.new_ones(xmax, xmax).float()\n delay_mat = torch.tril(delay_mat, diagonal=-1, out=delay_mat)\n delay_mat = torch.cumsum(delay_mat, dim=-2).unsqueeze(0)\n delay_mat = delay_mat.unsqueeze(1).unsqueeze(2).expand_as(aws_mat)\n loss_latency = torch.pow((aws_mat * delay_mat).sum(-1), 2).sum(-1)\n loss_latency = torch.mean(loss_latency.squeeze(1))\n elif ctc_trigger_points is not None or 'ctc_sync' not in self.latency_metric and forced_trigger_points is not None:\n if 'ctc_sync' in self.latency_metric:\n trigger_points = ctc_trigger_points\n else:\n trigger_points = forced_trigger_points\n js = torch.arange(xmax, dtype=torch.float, device=device).expand_as(aws)\n exp_trigger_points = (js * aws).sum(3)\n trigger_points = trigger_points.float().unsqueeze(1)\n loss_latency = torch.abs(exp_trigger_points - trigger_points)\n loss_latency = loss_latency.sum() / ylens.sum()\n if teacher_logits is not None:\n kl_loss = distillation(logits, teacher_logits, ylens, temperature=5.0)\n loss = loss * (1 - self.distil_weight) + kl_loss * self.distil_weight\n return loss, acc, ppl, loss_quantity, loss_latency", "def _stop(self):", "def out(self, inputs):", "def calculate_beam_xy(self):\n info = []\n\n # Import relevant info\n pixel_size = self.info.pixel_size\n for i in [j.final for j in self.final_objects]:\n try:\n info.append(\n [\n i,\n i[\"beamX\"],\n i[\"beamY\"],\n i[\"wavelength\"],\n i[\"distance\"],\n (i[\"a\"], i[\"b\"], i[\"c\"], i[\"alpha\"], i[\"beta\"], i[\"gamma\"]),\n ]\n )\n except IOError as e:\n print(\"IOTA ANALYSIS ERROR: BEAMXY failed! \", e)\n pass\n\n # Calculate beam center coordinates and distances\n beamX = [i[1] for i in info]\n beamY = [j[2] for j in info]\n beam_dist = [\n math.hypot(i[1] - np.median(beamX), i[2] - np.median(beamY)) for i in info\n ]\n beam_dist_std = np.std(beam_dist)\n img_list = [\n [i[0], i[1], i[2], i[3], i[4], i[5], j]\n for i, j in list(zip(info, beam_dist))\n ]\n\n # Separate out outliers\n outliers = [i for i in img_list if i[3] > 2 * beam_dist_std]\n clean = [i for i in img_list if i[3] <= 2 * beam_dist_std]\n cbeamX = [i[1] for i in clean]\n cbeamY = [j[2] for j in clean]\n obeamX = [i[1] for i in outliers]\n obeamY = [j[2] for j in outliers]\n\n # Calculate median wavelength, detector distance and unit cell params from\n # non-outliers only\n wavelengths = [i[3] for i in clean]\n distances = [i[4] for i in clean]\n cells = [i[5] for i in clean]\n\n wavelength = np.median(wavelengths)\n det_distance = np.median(distances)\n a = np.median([i[0] for i in cells])\n b = np.median([i[1] for i in cells])\n c = np.median([i[2] for i in cells])\n\n # Calculate predicted L +/- 1 misindexing distance for each cell edge\n aD = det_distance * math.tan(2 * math.asin(wavelength / (2 * a)))\n bD = det_distance * math.tan(2 * math.asin(wavelength / (2 * b)))\n cD = det_distance * math.tan(2 * math.asin(wavelength / (2 * c)))\n\n return (\n beamX,\n beamY,\n cbeamX,\n cbeamY,\n obeamX,\n obeamY,\n beam_dist,\n [i[4] for i in info],\n aD,\n bD,\n cD,\n pixel_size,\n )", "def stop_output(out):\n if out is not None:\n out.stop()\n pass", "def stop(self):\n self.stop_aperture()" ]
[ "0.59246117", "0.53664815", "0.5361966", "0.52839315", "0.5274596", "0.5259131", "0.52420837", "0.51933676", "0.51914835", "0.51529115", "0.5128894", "0.5128894", "0.5128894", "0.5128894", "0.50962025", "0.50304735", "0.5025527", "0.49930224", "0.49781093", "0.49603343", "0.4948776", "0.49414974", "0.49263877", "0.49154374", "0.49076658", "0.49032843", "0.48940265", "0.4893402", "0.48926657", "0.48851514" ]
0.5679036
1
RE(run_loop_measurement(t=1, name='1bl_PEI_10mM', loops=7, pump_t=210, total_t=720, jump_x=10)) Take measurements in the loop Sample has to be aligned before starting the script and theta angle at 0 deg (flat sample).
def run_loop_measurement(t=0.5, name='test', loops=4, pump_t=180, total_t=600, jump_x=10): incident_angles = [0.1, 0.4] waxs_arc = [20, 0] user = "TP" condition = ( ( -1 < waxs.arc.position ) and ( waxs.arc.position < 1 ) and (waxs_arc[0] == 20) ) if condition: waxs_arc = waxs_arc[::-1] ranges = { 0.1 : [-16, 16, 33], 0.4 : [-25, 25, 51], } try: ai0 = RE.md['ai_0'] except: yield from bp.count([]) ai0 = db[-1].start['ai_0'] print('Failed to acces RE.md') print(f'\n\nSample flat at theta = {ai0}') proposal_id('2023_2', '311564_Pettersson') #det_exposure_time(t, t) t_initial = time.time() for i in range(loops): t_start = time.time() print('Cycle number',i+1,'started at', (t_start - t_initial)/60) # Wait initial time for pumping to finish print(f'Start pumping now, going to wait for {pump_t} s\n') while (time.time() - t_start) < pump_t: print(f'Pumping time: {(time.time() - t_start):.1f} s') yield from bps.sleep(10) # Go over SAXS and WAXS t_measurement = ( time.time() - t_initial ) / 60 for wa in waxs_arc: yield from bps.mv(waxs, wa) dets = [pil900KW] if waxs.arc.position < 15 else [pil1M, pil900KW] for ai in incident_angles: yield from bps.mv(piezo.th, ai0 + ai) yield from bps.mvr(piezo.x, - jump_x) t2 = 2 * t if ai == 0.4 else t det_exposure_time(t2, t2) try: y_range = ranges[ai] except: y_range = [-10, 10, 11] sample_name = f'{name}{get_scan_md()}_time{t_measurement:.1f}_ai{ai}' sample_id(user_name=user, sample_name=sample_name) print(f"\n\n\n\t=== Sample: {sample_name} ===") yield from bp.rel_scan(dets, piezo.y, *y_range, md=dict(ai=ai)) yield from bps.mv(waxs, waxs_arc[0], piezo.th, ai0) # Wait until the total loop time passes if i + 1 < loops: print(f'Waiting for the loop to last {total_t} s in total\n') sleep_count = 0 while (time.time() - t_start) < total_t: sleep_count += 1 if (sleep_count % 10 == 0): print(f'Total time: {(time.time() - t_start):.1f} s') yield from bps.sleep(1) sample_id(user_name="test", sample_name="test") det_exposure_time(0.5, 0.5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_multiobjective(inputs, parameters = None):\n def thickness(x, t, chord):\n y = af.Naca00XX(chord, t, [x], return_dict = 'y')\n thickness_at_x = y['u'] - y['l']\n return thickness_at_x \n\n if parameters != None:\n eng = parameters[0]\n import_matlab = False\n else:\n eng = None\n import_matlab = True\n \n sma = inputs['sma']\n linear = inputs['linear']\n sigma_o = 100e6\n R = inputs['R']\n \n airfoil = \"naca0012\"\n chord = 1.#0.6175\n t = 0.12*chord\n\n J = {'x':0.75, 'y':0.}\n \n #Adding the area key to the dictionaries\n sma['area'] = math.pi*(0.000381/2.)**2\n linear['area'] = 0.001\n \n # Design constants \n #arm length to center of gravity\n r_w = 0.10\n\n\n #Aicraft weight (mass times gravity)\n W = 0.0523*9.8 #0.06*9.8\n alpha = 0.\n V = 10 #m/s\n altitude = 10000. #feet\n \n # Temperature\n T_0 = 273.15 + 30\n T_final = inputs['T_f']\n \n #Initial martensitic volume fraction\n MVF_init = 1.\n \n # Number of steps and cycles\n n = 200\n n_cycles = 0\n #~~~~~~~~~~~~~~~~~~~~~bb~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n #Parameters to select how to output stuff\n all_outputs = True\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if all_outputs:\n eps_s, eps_l, theta, sigma, MVF, T, eps_t, theta, F_l, k, L_s = flap_multiobjective(airfoil, \n chord, J, sma, linear, sigma_o, \n W, r_w, V, altitude, alpha, T_0, \n T_final, MVF_init, n, R, all_outputs = True,\n import_matlab = import_matlab, eng=eng,\n n_cycles = n_cycles)\n\n return theta, sigma, T, MVF, eps_s, L_s", "def main():\n\n r = np.arange(-90, 90, 2)\n tilt = 22.5\n\n with ptcmd.PTCMD(baud=57600) as controller:\n\n controller.home()\n\n for pan_angle in r:\n\n current_position = controller.get_position()\n pan_steps, tilt_steps, pan_rate, tilt_rate = system.move_to_angle(pan_angle, tilt, current_position[0], current_position[1], base_rate = 1000)\n controller.sync(pan_rate, pan_steps, tilt_rate, tilt_steps)\n controller.wait('all')\n\n time.sleep(2)\n\n distances = []\n for i in range(0,12):\n time.sleep(1)\n distances.append(controller.get_analog_sensor(1))\n\n distances.sort()\n distance = sum(distances[1:-1]) / len(distances[1:-1])\n\n x = math.sin(pan_angle*math.pi/180) * distance\n y = math.cos(pan_angle*math.pi/180) * distance\n\n print(distances, distance, pan_angle, x, y)\n\n plt.plot(x, y, 'b+')\n\n plt.show()", "def run(inputs, parameters = None):\n def thickness(x, t, chord):\n y = af.Naca00XX(chord, t, [x], return_dict = 'y')\n thickness_at_x = y['u'] - y['l']\n return thickness_at_x \n\n if parameters != None:\n eng = parameters[0]\n import_matlab = False\n else:\n eng = None\n import_matlab = True\n \n sma = inputs['sma']\n linear = inputs['linear']\n R = inputs['R']\n\n sigma_o = 100e6\n\n \n airfoil = \"naca0012\"\n chord = 1.#0.6175\n\n J = {'x':0.75, 'y':0.}\n \n #Adding the area key to the dictionaries\n sma['area'] = math.pi*(0.000381/2.)**2\n linear['area'] = 0.001\n \n # Design constants \n #arm length to center of gravity\n r_w = 0.10\n \n #Aicraft weight (mass times gravity)\n W = 0.0523*9.8 #0.06*9.8\n alpha = 0.\n V = 10 #m/s\n altitude = 10000. #feet\n \n # Temperature\n T_0 = 273.15 + 30.\n T_final = 273.15 + 140.\n \n #Initial martensitic volume fraction\n MVF_init = 1.\n \n # Number of steps and cycles\n n = 200\n n_cycles = 0\n #~~~~~~~~~~~~~~~~~~~~~bb~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n #Parameters to select how to output stuff\n all_outputs = True\n save_data = True\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if all_outputs:\n eps_s, eps_l, theta, sigma, MVF, T, eps_t, theta, F_l, k, L_s = flap(airfoil, \n chord, J, sma, linear, sigma_o, \n W, r_w, V, altitude, alpha, T_0, \n T_final, MVF_init, n, R, all_outputs = True,\n import_matlab = import_matlab, eng=eng,\n n_cycles = n_cycles)\n\n import matplotlib.pyplot as plt\n plt.figure()\n plt.plot(np.rad2deg(theta), eps_s, lw=2., label = \"$\\epsilon_s$\")\n plt.plot(np.rad2deg(theta), eps_l, 'b--',lw=2, label = \"$\\epsilon_l$\")\n# plt.scatter(theta, eps_s, c = 'b')\n# plt.scatter(theta, eps_l, c = 'b')\n plt.ylabel('$\\epsilon$', fontsize=24)\n plt.xlabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n \n print len(T), len(eps_s), len(eps_l), len(theta), len(eps_t)\n plt.figure()\n plt.plot(np.rad2deg(theta), eps_t, lw=2.)\n# plt.scatter(theta, eps_t, c = 'b')\n plt.ylabel('$\\epsilon_t$', fontsize=24)\n plt.xlabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n \n plt.figure()\n plt.plot(np.rad2deg(theta), MVF, lw=2.)\n# plt.scatter(theta, MVF, c = 'b')\n plt.ylabel('$MVF$', fontsize=24)\n plt.xlabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n\n plt.figure()\n plt.plot(T, MVF, lw=2.)\n# plt.scatter(T, MVF, c = 'b')\n plt.ylabel('$MVF$', fontsize=24)\n plt.xlabel('$T (K)$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n\n plt.figure()\n plt.plot(T, sigma, lw=2.)\n# plt.scatter(T, sigma, c = 'b')\n plt.ylabel('$\\sigma$', fontsize=24)\n plt.xlabel('$T (K)$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n \n plt.figure()\n plt.plot(T, eps_s, 'b', lw=2., label = \"$\\epsilon_s$\")\n plt.plot(T, eps_l, 'b--',lw=2, label = \"$\\epsilon_l$\")\n# plt.scatter(T, eps_s, c = 'b')\n# plt.scatter(T, eps_l, c = 'b')\n plt.xlabel('$T (K)$', fontsize=20)\n plt.ylabel('$\\epsilon$', fontsize=24)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid()\n \n plt.figure()\n plt.plot(T, np.rad2deg(theta), lw=2.)\n# plt.scatter(T, theta, c = 'b')\n plt.xlabel('$T (K)$', fontsize=20)\n plt.ylabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.grid()\n \n F_s = []\n for i in range(len(sigma)):\n F_s.append(sigma[i]*sma['area'])\n# sigma_MPa = []\n# for sigma_i in sigma:\n# sigma_MPa.append(sigma_i/1e6)\n plt.figure()\n plt.plot(theta, F_s, 'b', lw=2., label = \"$F_s$\")\n plt.plot(theta, F_l, 'b--', lw=2., label = \"$F_l$\")\n# plt.scatter(theta, F_s, c = 'b')\n# plt.scatter(theta, F_l, c = 'b')\n plt.ylabel('$F (N)$', fontsize=20)\n plt.xlabel(r'$\\theta ({}^{\\circ})$', fontsize=20)\n plt.legend(loc = 'best', fontsize = 'x-large')\n plt.grid() \n else:\n theta, k= flap(airfoil, chord, J, sma, linear, sigma_o, \n W, r_w, V, altitude, alpha, T_0, \n T_final, MVF_init, n, R, all_outputs = False,\n import_matlab = import_matlab, eng=eng,\n n_cycles = n_cycles)\n \n if save_data == True:\n Data = {'theta': theta, 'eps_s': eps_s, 'eps_l': eps_l, \n 'sigma': sigma, 'xi': MVF, 'T': T, 'eps_t': eps_t,\n 'F_l': F_l, 'k': k, 'L_s':L_s}\n pickle.dump(Data, open( \"data.p\", \"wb\" ) )\n \n return {'theta': theta, 'k': k}", "def lab_run_med(character_id, time_step):\n pass", "def run1():\n #Reseting motors\n ResetRobot.reset_wheel_motors()\n ResetRobot.reset_attachment_motors()\n CalibrateRobot.calibrate_gyro()\n\n #mission M01 and M02 - space travel and solar panel\n M01_M02()\n \n #Mission M05- Extraction \n M05_M14()\n\n #Back to base before Gerhard (Remove comment if necessary)\n return_to_base1()\n\n # Must delete for competition.. This is to set up forklift to repeat run.\n Robot.attachment_left.on_for_rotations(-100, 8) #Raises Forklift ", "def radiation_measurement_analysis():\n import pint\n ureg = pint.UnitRegistry()\n\n mrem_h = ureg.parse_units('mrem') / ureg.hour\n m = ureg.parse_units('meters')\n s = ureg.parse_units('seconds')\n\n # Measurements of background radiation\n bg_dist = ureg.parse_expression('10 m') # estimate of how far away we are wrt background\n background_rows = [\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.022 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=4.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.021 * mrem_h, capture_time=5.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=11.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=16.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.024 * mrem_h, capture_time=20.0 * s),\n ]\n\n # Measurements of sample radiation\n esp_dist = ureg.parse_expression('1 inch').to(m) / 2 # estimate of how far we are from the sample when very close\n dist0_rows = [\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=0.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=3.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=5.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=9.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=10.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=11.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.057 * mrem_h, capture_time=12.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.058 * mrem_h, capture_time=13.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=14.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=15.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=16.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=20.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=22.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.066 * mrem_h, capture_time=23.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=24.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=25.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=26.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=28.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=30.0 * s),\n ]\n\n dist0_v2_rows = [\n dict(vid=3, distance=esp_dist, rad=0.012 * mrem_h, capture_time=0.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.011 * mrem_h, capture_time=1.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=8.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=9.0 * s),\n ]\n\n close_rows = [\n dict(vid=4, distance=0.5 * m, rad=0.013 * mrem_h, capture_time=0.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.014 * mrem_h, capture_time=5.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=7.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.011 * mrem_h, capture_time=15.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=16.0 * s),\n ]\n\n mid_rows = [\n dict(vid=5, distance=1.0 * m, rad=0.014 * mrem_h, capture_time=0.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.015 * mrem_h, capture_time=5.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.013 * mrem_h, capture_time=10.0 * s),\n ]\n\n far_rows = [\n dict(vid=6, distance=2.0 * m, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=6, distance=2.0 * m, rad=0.025 * mrem_h, capture_time=0.1 * s),\n ]\n\n # guess_dist = ureg.parse_expression('0.3 m') # estimate of how far away we are wrt background\n # guess_rows = [\n # dict(vid=9, distance=guess_dist, rad=0.030 * mrem_h, capture_time=0.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.041 * mrem_h, capture_time=2.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.051 * mrem_h, capture_time=3.0 * s),\n # ]\n\n rows = dist0_rows + background_rows + dist0_v2_rows + close_rows + mid_rows + far_rows\n # rows += guess_rows\n\n import pandas as pd\n import numpy as np\n table = pd.DataFrame(rows)\n\n # Ensure comparable units\n units = {\n 'rad': mrem_h,\n 'distance': m,\n 'capture_time': s,\n }\n for key, unit in units.items():\n table[key] = table[key].apply(lambda c: c.to(unit).m)\n table['rad'] = table['rad'].astype(float)\n table['distance'] = table['distance'].astype(float)\n\n # Weight each measurement based on the amount of time the measurement was\n # sustained in the video.\n average_rad_rows = []\n for vid, group in table.groupby('vid'):\n from statsmodels.stats.weightstats import DescrStatsW\n weights = (-1 * group['capture_time'].diff(periods=-1).fillna(0)) / group['capture_time'].iloc[-1]\n table.loc[group.index, 'weight'] = weights\n values = group['rad']\n weighted_stats = DescrStatsW(values, weights=weights, ddof=0)\n dists = group['distance'].unique()\n assert len(dists) == 1\n average_rad_rows.append({\n 'vid': vid,\n 'distance': dists[0],\n 'rad_mean': weighted_stats.mean,\n 'rad_std': weighted_stats.std,\n })\n stats_table = pd.DataFrame(average_rad_rows)\n\n bg_row = stats_table.loc[stats_table['distance'].argmax()]\n fg_row = stats_table.loc[stats_table['distance'].argmin()]\n\n # -------------------\n ADD_DUMMY_VALUES = 0\n if ADD_DUMMY_VALUES:\n # Hack: because we don't have enough samples we can fudge the value\n # knowning that the value should be the background radiation in the\n # limit.\n\n dummy_measurements = []\n extra_support = 1\n for idx in range(3, 3 + extra_support):\n dummy_row = {\n 'vid': -idx,\n 'distance': bg_row['distance'] + idx,\n 'rad_mean': bg_row['rad_mean'],\n 'rad_std': 0.01,\n }\n dummy_measurements.append(dummy_row)\n\n # also add an extra value close to the sample\n rad_bg = bg_row['rad_mean']\n rad_above_bg = fg_row['rad_mean'] - rad_bg\n dummy_row = {\n 'vid': -1,\n 'distance': fg_row['distance'] / 2,\n 'rad_mean': rad_bg + (rad_above_bg * 4),\n 'rad_std': 0.5,\n }\n dummy_measurements.append(dummy_row)\n\n # dummy_row = {\n # 'vid': -2,\n # 'distance': fg_row['distance'] / 4,\n # 'rad_mean': rad_bg + (rad_above_bg * 16),\n # }\n # dummy_measurements.append(dummy_row)\n\n dummy_stats = pd.DataFrame(dummy_measurements)\n dummy_stats['weight'] = 0.5\n stats_table['weight'] = 1.0\n stats_table2 = pd.concat([stats_table, dummy_stats]).reset_index(drop=True).sort_values('distance')\n else:\n stats_table2 = stats_table\n # -------------------\n\n import scipy\n scipy.optimize.curve_fit\n\n # Because we know the radiation should follow an inverse square law wrt to\n # distance, we can fit a polynomial of degree 2 (parabola) to interpolate /\n # extrapolate the **inverse** values.\n x = stats_table2['distance'].values\n y = stats_table2['rad_mean'].values\n s = stats_table2['rad_std'].values\n\n # Model the squared falloff directly\n def invsquare(x, a, b):\n return a * (1 / (0.01 + x ** 2)) + b\n # bg_row['rad_mean']\n # Use curve_fit to constrain the first coefficient to be zero\n try:\n coef = scipy.optimize.curve_fit(invsquare, x, y, sigma=s, method='trf')[0]\n except Exception as ex:\n coef = None\n print(f'ex={ex}')\n\n # Also fit one to the raw weighted points as a sanity check\n # inv_poly2 = Polynomial.fit(table['distance'], 1 / table['rad'], w=table['weight'], deg=2)\n\n import kwplot\n sns = kwplot.autosns()\n plt = kwplot.autoplt()\n # ax = sns.boxplot(data=table, x='distance', y='rad', width=0.1)\n\n # Add in points to show each observation\n ax = sns.relplot(x=\"distance\", y=\"rad\", data=table, size=4, color=\".3\",\n linewidth=0, alpha=0.5, palette='deep')\n\n ax = plt.gca()\n ax.set_xlabel('distance from sample ({})'.format(str(units['distance'])))\n ax.set_ylabel('radiation dosage ({})'.format(str(units['rad'])))\n\n max_meters = 10\n\n extrap_x = np.linspace(0, max_meters, 1000)\n if coef is not None:\n extrap_y1 = invsquare(extrap_x, *coef)\n # extrap_y2 = 1 / inv_poly2(extrap_x)\n ax.plot(stats_table2['distance'].values, stats_table2['rad_mean'].values, 'rx')\n ax.plot(stats_table['distance'].values, stats_table['rad_mean'].values, 'bo')\n ax.plot(extrap_x, extrap_y1, '--')\n ax.set_ylim(0.001, 0.1)\n ax.set_yscale('log')\n # ax.plot(extrap_x, extrap_y2, '--')", "def run(self):\n openShutter = True\n actuateXed = False\n image_type = \"PPUMP\"\n\n wl = float(self.eo_config.get(\"PPUMP_WL\", 550))\n meas_flux = self.measured_flux(wl)\n seqno = 0\n for tokens in self.instructions:\n exptime = float(tokens[1])\n nframes = int(tokens[2])\n shifts = int(tokens[3])\n for iframe in range(nframes):\n self.image_clears()\n self.bias_image(seqno)\n self.take_image(seqno, exptime, openShutter, actuateXed,\n image_type)\n seqno += 1", "def amet_memoryWise(self):\r\n # set up logging files to monitor the calculation\r\n logging.basicConfig(filename = os.path.join(self.path,'history_amet_python.log'),\r\n filemode = 'w+', level = logging.DEBUG,\r\n format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n # initialize the time span\r\n # define sigma level\r\n A, B = self.defineSigmaLevels()\r\n # use example input file to load the basic dimensions information\r\n datapath_var = os.path.join(self.path, 'MERRA2_400.inst3_3d_asm_Nv.20160101.nc4.nc')\r\n var_key = Dataset(datapath_var)\r\n lat = var_key.variables['lat'][:]\r\n lon = var_key.variables['lon'][:]\r\n # calculate the reference levels based on A & B and standard surface pressure\r\n half_level = A + B * 101325\r\n level = (half_level[1:] + half_level[:-1]) / 2\r\n # create space for the output\r\n # AMET in the entire column\r\n E = np.zeros((len(lat),len(lon)), dtype=float)\r\n cpT = np.zeros((len(lat),len(lon)), dtype=float)\r\n Lvq = np.zeros((len(lat),len(lon)), dtype=float)\r\n gz = np.zeros((len(lat),len(lon)), dtype=float)\r\n uv2 = np.zeros((len(lat),len(lon)), dtype=float)\r\n logging.info(\"Start retrieving variables T,q,u,v,sp\")\r\n # The shape of each variable is (8,72,361,576)\r\n T = var_key.variables['T'][:]\r\n q = var_key.variables['QV'][:]\r\n sp = var_key.variables['PS'][:] #(8,361,576)\r\n u = var_key.variables['U'][:]\r\n v = var_key.variables['V'][:]\r\n logging.info(\"Extracting variables successfully!\") \r\n # compute gz\r\n z_model = self.calc_gz(var_key)\r\n # get the basic shape\r\n tt, hh, yy, xx = q.shape\r\n AMET = amet.met()\r\n E, cpT, Lvq, gz, uv2 = AMET.calc_met(T, q, sp, u, v, z_model, A, B,\r\n tt, hh, len(lat), len(lon), lat, self.lat_unit)\r\n\r\n return np.mean(E)", "def run():\n step = 0\n o2r = 4 #orange to red delay time\n r2g = 2 #red to green delay time\n A_4235 = 0\n B_4235 = 1\n C_4235 = 2\n AB1_4235 = 3\n AB2_4235 = 4\n AC1_4235 = 5\n AC2_4235 = 6\n BA1_4235 = 7\n BA2_4235 = 8\n BC1_4235 = 9\n BC2_4235 = 10\n CA1_4235 = 11\n CA2_4235 = 12\n CB1_4235 = 13\n CB2_4235 = 14\n A_4219 = 0\n B_4219 = 1\n C_4219 = 2\n D_4219 = 3\n E_4219 = 4\n F_4219 = 5\n G_4219 = 6\n AB1_4219 = 7\n AB2_4219 = 8\n AC1_4219 = 9\n AC2_4219 = 10\n AD1_4219 = 11\n AD2_4219 = 12\n AE1_4219 = 13\n AE2_4219 = 14\n AF1_4219 = 16\n AF2_4219 = 17\n AG1_4219 = 18\n AG2_4219 = 19\n BA1_4219 = 20\n BA2_4219 = 21\n BC1_4219 = 22\n BC2_4219 = 23\n BD1_4219 = 24\n BD2_4219 = 25\n BE1_4219 = 26\n BE2_4219 = 27\n BF1_4219 = 28\n BF2_4219 = 29\n BG1_4219 = 30\n BG2_4219 = 31\n CA1_4219 = 32\n CA2_4219 = 33\n CB1_4219 = 34\n CB2_4219 = 35\n CD1_4219 = 36\n CD2_4219 = 37\n CE1_4219 = 38\n CE2_4219 = 39\n CF1_4219 = 40\n CF2_4219 = 41\n CG1_4219 = 42\n CG2_4219 = 43\n DA1_4219 = 44\n DA2_4219 = 45\n DB1_4219 = 46\n DB2_4219 = 47\n DC1_4219 = 48\n DC2_4219 = 49\n DE1_4219 = 50\n DE2_4219 = 51\n DF1_4219 = 52\n DF2_4219 = 53\n DG1_4219 = 54\n DG2_4219 = 55\n EA1_4219 = 56\n EA2_4219 = 57\n EB1_4219 = 58\n EB2_4219 = 59\n EC1_4219 = 60\n EC2_4219 = 61\n ED1_4219 = 62\n ED2_4219 = 63\n EF1_4219 = 64\n EF2_4219 = 65\n EG1_4219 = 66\n EG2_4219 = 67\n FA1_4219 = 68\n FA2_4219 = 69\n FB1_4219 = 70\n FB2_4219 = 71\n FC1_4219 = 72\n FC2_4219 = 73\n FD1_4219 = 74\n FD2_4219 = 75\n FE1_4219 = 76\n FE2_4219 = 77\n FG1_4219 = 78\n FG2_4219 = 79\n GA1_4219 = 80\n GA2_4219 = 81\n GB1_4219 = 82\n GB2_4219 = 83\n GC1_4219 = 84\n GC2_4219 = 85\n GD1_4219 = 86\n GD2_4219 = 87\n GE1_4219 = 88\n GE2_4219 = 89\n GF1_4219 = 90\n GF2_4219 = 91\n A_4220 = 0\n B_4220 = 1\n C_4220 = 2\n D_4220 = 3\n E_4220 = 4\n AB1_4220 = 5\n AB2_4220 = 6\n AC1_4220 = 7\n AC2_4220 = 8\n AD1_4220 = 9\n AD2_4220 = 10\n AE1_4220 = 11\n AE2_4220 = 12\n BA1_4220 = 13\n BA2_4220 = 14\n BC1_4220 = 15\n BC2_4220 = 16\n BD1_4220 = 17\n BD2_4220 = 18\n BE1_4220 = 19\n BE2_4220 = 20\n CA1_4220 = 21\n CA2_4220 = 22\n CB1_4220 = 23\n CB2_4220 = 24\n CD1_4220 = 25\n CD2_4220 = 26\n CE1_4220 = 27\n CE2_4220 = 28\n DA1_4220 = 29\n DA2_4220 = 30\n DB1_4220 = 31\n DB2_4220 = 32\n DC1_4220 = 33\n DC2_4220 = 34\n DE1_4220 = 35\n DE2_4220 = 36\n EA1_4220 = 37\n EA2_4220 = 38\n EB1_4220 = 39\n EB2_4220 = 40\n EC1_4220 = 41\n EC2_4220 = 42\n ED1_4220 = 43\n ED2_4220 = 44\n A_4221 = 0\n B_4221 = 1\n C_4221 = 2\n D_4221 = 3\n E_4221 = 4\n F_4221 = 5\n AB1_4221 = 6\n AB2_4221 = 7\n AC1_4221 = 8\n AC2_4221 = 9\n AD1_4221 = 10\n AD2_4221 = 11\n AE1_4221 = 12\n AE2_4221 = 13\n AF1_4221 = 14\n AF2_4221 = 15\n BA1_4221 = 16\n BA2_4221 = 17\n BC1_4221 = 18\n BC2_4221 = 19\n BD1_4221 = 20\n BD2_4221 = 21\n BE1_4221 = 22\n BE2_4221 = 23\n BF1_4221 = 24\n BF2_4221 = 25\n CA1_4221 = 26\n CA2_4221 = 27\n CB1_4221 = 28\n CB2_4221 = 29\n CD1_4221 = 30\n CD2_4221 = 31\n CE1_4221 = 32\n CE2_4221 = 33\n CF1_4221 = 34\n CF2_4221 = 35\n DA1_4221 = 36\n DA2_4221 = 37\n DB1_4221 = 38\n DB2_4221 = 39\n DC1_4221 = 40\n DC2_4221 = 41\n DE1_4221 = 42\n DE2_4221 = 43\n DF1_4221 = 44\n DF2_4221 = 45\n EA1_4221 = 46\n EA2_4221 = 47\n EB1_4221 = 48\n EB2_4221 = 49\n EC1_4221 = 50\n EC2_4221 = 51\n ED1_4221 = 52\n ED2_4221 = 53\n EF1_4221 = 54\n EF2_4221 = 55\n FA1_4221 = 56\n FA2_4221 = 57\n FB1_4221 = 58\n FB2_4221 = 59\n FC1_4221 = 60\n FC2_4221 = 61\n FD1_4221 = 62\n FD2_4221 = 63\n FE1_4221 = 64\n FE2_4221 = 65\n \n #while traci.simulation.getMinExpectedNumber() > 0:\n while step < 600:\n traci.simulationStep()\n if step == 0:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",6)\n if step == 6:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 10:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 12:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",75)\n if step == 87:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 91:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 93:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 108:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 112:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 114:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",12)\n if step == 126:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 130:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 132:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",32)\n if step == 164:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 168:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 170:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",50)\n if step == 220:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 224:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 226:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 241:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 245:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 247:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",14)\n if step == 261:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 265:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 267:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",13)\n if step == 280:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 284:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 286:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",74)\n if step == 360:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 364:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 366:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 381:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 385:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 387:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",10)\n if step == 397:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 401:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 403:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",16)\n if step == 419:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 423:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 425:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",74)\n if step == 499:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 503:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", AB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 505:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",15)\n if step == 520:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 524:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BC2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 526:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", C_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",8)\n if step == 534:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 538:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", CB2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 540:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", B_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",16)\n if step == 556:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA1_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",o2r)\n if step == 560:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", BA2_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",r2g)\n if step == 562:\n traci.trafficlight.setPhase(\"cluster_1707799581_314056954_5931861577\", A_4235)\n traci.trafficlight.setPhaseDuration(\"cluster_1707799581_314056954_5931861577\",38)\n if step == 0:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 6)\n if step == 6:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 10:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 12:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 20)\n if step == 32:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 36:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 38:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 43:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 47:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 49:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 31)\n if step == 80:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 84:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 86:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 12)\n if step == 98:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 102:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 104:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 29)\n if step == 133:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 137:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 139:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 20)\n if step == 159:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 163:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 165:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 170:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 174:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 176:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 38)\n if step == 214:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 218:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 220:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 13)\n if step == 233:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 237:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 239:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 34)\n if step == 273:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 277:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 279:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 22)\n if step == 301:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 305:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 307:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 312:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 316:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 318:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 31)\n if step == 349:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 353:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 355:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 18)\n if step == 373:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 377:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 379:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 36)\n if step == 415:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 419:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 421:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 19)\n if step == 440:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 444:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 446:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 5)\n if step == 451:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 455:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 457:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", F_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 29)\n if step == 486:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 490:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", FG2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 492:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", G_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 18)\n if step == 510:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 514:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", GA2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 516:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", A_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 41)\n if step == 557:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 561:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", AD2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 563:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", D_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 19)\n if step == 582:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 586:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", DE2_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", r2g)\n if step == 588:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", E_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", 8)\n if step == 596:\n traci.trafficlight.setPhase(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", EF1_4219)\n traci.trafficlight.setPhaseDuration(\"cluster_25977365_314059191_314060044_314061754_314061758_314062509_314062525\", o2r)\n if step == 0:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 17)\n if step == 17:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 21:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 23:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 9)\n if step == 32:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 36:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 38:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 67)\n if step == 105:\n traci.trafficlight.setPhase(\"gneJ41\", AB1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 109:\n traci.trafficlight.setPhase(\"gneJ41\", AB2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 111:\n traci.trafficlight.setPhase(\"gneJ41\", B_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 19)\n if step == 130:\n traci.trafficlight.setPhase(\"gneJ41\", BD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 134:\n traci.trafficlight.setPhase(\"gneJ41\", BD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 136:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 16)\n if step == 152:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 156:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 158:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 11)\n if step == 169:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 173:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 175:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 63)\n if step == 238:\n traci.trafficlight.setPhase(\"gneJ41\", AD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 242:\n traci.trafficlight.setPhase(\"gneJ41\", AD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 244:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 13)\n if step == 257:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 261:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 263:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 9)\n if step == 272:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 276:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 278:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 95)\n if step == 373:\n traci.trafficlight.setPhase(\"gneJ41\", AB1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 377:\n traci.trafficlight.setPhase(\"gneJ41\", AB2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 379:\n traci.trafficlight.setPhase(\"gneJ41\", B_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 19)\n if step == 398:\n traci.trafficlight.setPhase(\"gneJ41\", BD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 402:\n traci.trafficlight.setPhase(\"gneJ41\", BD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 404:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 24)\n if step == 428:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 432:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 434:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 13)\n if step == 447:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 451:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 453:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 56)\n if step == 509:\n traci.trafficlight.setPhase(\"gneJ41\", AB1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 513:\n traci.trafficlight.setPhase(\"gneJ41\", AB2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 515:\n traci.trafficlight.setPhase(\"gneJ41\", B_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 19)\n if step == 534:\n traci.trafficlight.setPhase(\"gneJ41\", BD1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 538:\n traci.trafficlight.setPhase(\"gneJ41\", BD2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 540:\n traci.trafficlight.setPhase(\"gneJ41\", D_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 22)\n if step == 562:\n traci.trafficlight.setPhase(\"gneJ41\", DE1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 566:\n traci.trafficlight.setPhase(\"gneJ41\", DE2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 568:\n traci.trafficlight.setPhase(\"gneJ41\", E_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 20)\n if step == 588:\n traci.trafficlight.setPhase(\"gneJ41\", EA1_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", o2r)\n if step == 592:\n traci.trafficlight.setPhase(\"gneJ41\", EA2_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", r2g)\n if step == 594:\n traci.trafficlight.setPhase(\"gneJ41\", A_4220)\n traci.trafficlight.setPhaseDuration(\"gneJ41\", 6)\n if step == 0:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 9)\n if step == 9:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 13:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 15:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 34:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 38:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 40:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 20)\n if step == 60:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 64:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 66:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 24)\n if step == 90:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 94:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 96:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 9)\n if step == 105:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 109:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 111:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 130:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 134:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 136:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 30)\n if step == 166:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 170:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 172:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 28)\n if step == 200:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 204:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 206:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 225:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 229:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 231:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 8)\n if step == 239:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 243:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 245:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 32)\n if step == 277:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 281:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 283:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 27)\n if step == 310:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 314:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 316:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 15)\n if step == 331:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 335:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 337:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 14)\n if step == 351:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 355:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 357:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 22)\n if step == 379:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 383:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 385:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 24)\n if step == 409:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 413:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 415:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 11)\n if step == 426:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 430:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 432:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 14)\n if step == 446:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 450:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 452:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 30)\n if step == 482:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 486:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 488:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 26)\n if step == 514:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 518:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 520:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 8)\n if step == 528:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 532:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", EF2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 534:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", F_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 18)\n if step == 552:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 556:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", FA2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 558:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", A_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 19)\n if step == 577:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 581:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", AD2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 583:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", D_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 9)\n if step == 592:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE1_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", o2r)\n if step == 596:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", DE2_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", r2g)\n if step == 598:\n traci.trafficlight.setPhase(\"cluster_25953432_313863435_313863521_314053282\", E_4221)\n traci.trafficlight.setPhaseDuration(\"cluster_25953432_313863435_313863521_314053282\", 2)\n\n step += 1\n\n traci.close()\n sys.stdout.flush()", "def timesteps_experiment():\n\n print(\"TIMESTEPS EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'timestep_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'time_steps'\n changing_param_value = [1, 2, 4, 8, 16, 32, 64, 128, 256]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n # set constant parameters\n set_params(epochs=20)\n set_params(dropout=0.3)\n set_params(use_word_emb=1)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(use_word_emb = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id = new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)", "def run_performance():\n # Create a Struct data instance from config\n inputs = Struct(config)\n inputs.throttle = throttle\n # Get oxidizer properties at the given temperature\n n2o = n2o_properties(inputs.ox.T_tank)\n # Our integration variables are oxidizer mass and liquid oxidizer volume\n Mox = n2o.rho_l*(inputs.ox.liquid_V) + n2o.rho_g*(inputs.ox.tank_V-inputs.ox.liquid_V)\n if inputs.options.output_on:\n print(\"Initial oxidizer mass: {} kg.\".format(Mox))\n\n start = time.perf_counter() # Start timer for integration\n\n time, record = integration(inputs) # Time = time for integration, record = output data\n F_thrust = record.F_thrust\n p_cc = record.p_cc\n p_oxtank = record.p_oxtank\n p_oxpresstank = record.p_oxpresstank\n p_fueltank = record.p_fueltank\n p_fuelpresstank = record.p_fuelpresstank\n p_oxmanifold = record.p_oxmanifold\n T_oxtank = record.T_oxtank\n T_cc = record.T_cc\n area_core = record.area_core\n OF = record.OF_i\n gamma_ex = record.gamma_ex\n m_dot_ox = record.m_dot_ox\n m_dot_fuel = record.m_dot_fuel\n p_crit = record.p_crit\n m_dot_ox_crit = record.m_dot_ox_crit\n M_e = record.M_e\n p_exit = record.p_exit\n p_shock = record.p_shock\n\n time_elapsed = start-time.perf_counter() # Stop the timer and print elapsed time\n if inputs.options.output_on:\n print(\"Time elapsed for this timestep: {} sec.\".format(time_elapsed))", "def main():\n\n para_list = [para]\n loss_list = []\n\n for i in range(30):\n para_list.append(diff_fun(loss_fun, para_list[i]))\n loss_list.append(loss_fun(para_list[i]))\n\n env = QEnv()\n env.backend(BackendName.LocalBaiduSim2)\n\n q = env.Q.createList(n)\n\n state_prepare(q, 0)\n universal_cir(q, 0, para_list[-1])\n\n MeasureZ(*env.Q.toListPair())\n taskResult = env.commit(shots, fetchMeasure=True)\n print(taskResult['counts'])", "def water_delay(block_size):\n\n\tdirectory = \"/local/scratch/sam5g13/Sam_5th-yr_Project/test_data\"\n\tfile_name = \"{}/tip4p2005_50_TOTEST.npy\".format(directory)\n\tgnuplot = r'/usr/bin/gnuplot'\n\n\n\tfile_data = np.load(file_name, mmap_mode='r')\n\n\t_, _, _, gamma, _ = file_data \n\n\tgamma_sample = blocksav(gamma, block_size)\n\n\tgamma_file = \"{}/tip4p2005_50_blocksize_{}_gamma.txt\".format(directory, block_size)\n\twith open(gamma_file, 'w') as outfile:\n\t\tnp.savetxt(outfile, gamma_sample)\n\n\tgamma_file_name = \"{}/tip4p2005_50_blocksize_{}_gamma.txt\".format(directory, block_size)\n\n\tcorrelations = subprocess.check_output([\"corr\", gamma_file_name])\n\t\n\tmutual_information = subprocess.check_output([\"mutual\", gamma_file_name])\n\n\tcorrelation_array = np.array(correlations.split()[5:], dtype=float)\n\tmutual_information_array = np.array(mutual_information.split()[2:], dtype=float)\n\n\tidx_odd = range(1,199,2)\n\tidx_even = range(0,200,2)\n\n\tidx_odd1 = range(1,43,2)\n\tidx_even1 = range(0,44,2)\n\n\t#correlation_values = correlation_array[idx_odd]\n\tmutual_information_values = mutual_information_array[idx_odd1]\n\tprint 'LOOK HERE...........................................', mutual_information_array[idx_odd1], len(mutual_information_array[idx_odd1])\n\n\t\"\"\"\n\tdelay_length = 0\n\n\tfor o in range(len(correlation_values) - 1):\n\t\tprint o, correlation_values[o], correlation_values[o+1]\n\t\tif correlation_values[o] > correlation_values[o+1]:\n\t\t\tdelay_length = o \n\t\telse: break\n\t\n\tdelay_length = delay_length + 1\n\n\tprint \"The delay length is\", delay_length\n\t\"\"\"\n\n\tmutual_info_length = 0\n\n\tfor o in range(len(mutual_information_values) - 1):\n\t\t#print o, correlation_values[o], correlation_values[o+1]\n\t\tif mutual_information_values[o] > mutual_information_values[o+1]:\n\t\t\tmutual_info_length = o \n\t\telse: break\n\t\n\tmutual_info_length = mutual_info_length + 1\n\t\n\tprint \"The mutual info length is\", mutual_info_length\n\n\t#assert \tdelay_length == mutual_info_length, \"The minimums of the mutual information and the correlations are not equal! %d %d\" % (delay_length, mutual_info_length)\n\t\n\tproduce_delays = subprocess.check_output([\"delay\", gamma_file_name, \"-d\" + str(mutual_info_length)])\n\n\t\n\tdelay_file = \"{}/tip4p2005_50_blocksize_{}_gamma_delay_{}.txt\".format(directory, block_size, mutual_info_length)\n\tf = open(delay_file, 'w')\n\tf.write(produce_delays)\n\tf.close()\n\n\t\"\"\"\n\n\tprint produce_delays\n\tprint len(produce_delays), len(mutual_information_values)\n\tplt.figure(\"produce_delays vs mutual information\")\n\tplt.xlabel(\"produce_delays\")\n\tplt.ylabel(\"Mutual information\")\n\tplt.plot(produce_delays, mutual_information_values)\n\tplt.show()\n\t\n\t\"\"\"\n\t\n\tembedding = subprocess.check_output([\"false_nearest\", gamma_file_name])\n\n\tembedding_dimension = int(raw_input(\"What embedding dimension would you like to use? \"))\n\t\n\trun_calc = subprocess.check_output(['gnuplot', '-e', \"filename='{}/tip4p2005_50_blocksize_{}_gamma_delay_{}.txt';ofilename='tip4p2005_50_blocksize_{}_gamma_delay_{}_graph.png'\".format(directory, block_size, mutual_info_length, block_size, mutual_info_length ),\"plot.gnu\"])\n\n\n\t\"\"\"Imports the time series and specifies each aspect used in building the recurrence matrix\"\"\"\n\n\tsettings = Settings(time_series = gamma_sample, embedding_dimension = embedding_dimension, time_delay = mutual_info_length, similarity_measure = EuclideanMetric, neighbourhood = FixedRadius(radius = 13), min_diagonal_line_length = 2, min_vertical_line_length = 2)\n\n\t\"\"\"Performs the computation and prints out all the results\"\"\"\n\n\trqacomputation = RQAComputation.create(settings, verbose = True)\n\n\trqaresult = rqacomputation.run()\n\n\tprint rqaresult\n\n\t\"\"\"Creates the Recurrence matrix for viewing\"\"\"\n\n\trpcomputation = RecurrencePlotComputation.create(settings)\n\n\trpresult = rpcomputation.run()\n\n\tImageGenerator.save_recurrence_plot(rpresult.recurrence_matrix, 'recurrence_plot.png')", "def test_run_time(self):\n\n wrapper = Wrapper()\n name = 'simplemesh'\n corner_points = ((0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0))\n extrude_length = 1\n nex = 3\n ney = 3\n nez = 1\n create_quad_mesh(name, wrapper, corner_points,\n extrude_length, nex, ney, nez)\n\n wrapper.CM[CUBA.NAME] = name\n wrapper.CM_extensions[CUBAExt.GE] = (CUBAExt.INCOMPRESSIBLE,\n CUBAExt.LAMINAR_MODEL)\n wrapper.SP[CUBA.TIME_STEP] = 1\n wrapper.SP[CUBA.NUMBER_OF_TIME_STEPS] = 1\n wrapper.SP[CUBA.DENSITY] = 1.0\n wrapper.SP[CUBA.DYNAMIC_VISCOSITY] = 1.0\n wrapper.BC[CUBA.VELOCITY] = {'inflow': ('fixedValue', (0.1, 0, 0)),\n 'outflow': 'zeroGradient',\n 'walls': ('fixedValue', (0, 0, 0)),\n 'frontAndBack': 'empty'}\n wrapper.BC[CUBA.PRESSURE] = {'inflow': 'zeroGradient',\n 'outflow': ('fixedValue', 0),\n 'walls': 'zeroGradient',\n 'frontAndBack': 'empty'}\n\n mesh_inside_wrapper = wrapper.get_dataset(name)\n\n wrapper.run()\n\n # sum data pointwise\n old_vel = 0.0\n old_pres = 0.0\n for point in mesh_inside_wrapper.iter(item_type=CUBA.POINT):\n velo = point.data[CUBA.VELOCITY]\n old_vel += math.sqrt(velo[0]*velo[0] + velo[1]*velo[1] +\n velo[2]*velo[2])\n old_pres += point.data[CUBA.PRESSURE]\n\n wrapper.SP[CUBA.DENSITY] = 5.0\n\n wrapper.run()\n\n # sum data pointwise\n new_vel = 0.0\n new_pres = 0.0\n for point in mesh_inside_wrapper.iter(item_type=CUBA.POINT):\n velo = point.data[CUBA.VELOCITY]\n new_vel += math.sqrt(velo[0]*velo[0] + velo[1]*velo[1] +\n velo[2]*velo[2])\n new_pres += point.data[CUBA.PRESSURE]\n\n self.assertNotAlmostEqual(old_vel, new_vel, 5)\n self.assertNotAlmostEqual(old_pres, new_pres, 5)", "def exercise1a():\n\n # Defination of muscles\n parameters = MuscleParameters()\n pylog.warning(\"Loading default muscle parameters\")\n pylog.info(parameters.showParameters())\n pylog.info(\"Use the parameters object to change the muscle parameters\")\n\n # Create muscle object\n muscle = Muscle(parameters)\n\n # Instatiate isometric muscle system\n sys = IsometricMuscleSystem()\n\n # Add the muscle to the system\n sys.add_muscle(muscle)\n \n # Set the initial condition\n x0 = [0.0, sys.muscle.L_OPT]\n # x0[0] --> muscle stimulation intial value\n # x0[1] --> muscle contracticle length initial value\n \n # Set the time for integration\n t_start = 0.0\n t_stop = 0.5\n time_step = 0.001\n time = np.arange(t_start, t_stop, time_step)\n \n # Evalute for a single muscle stimulation\n muscle_stimulation = np.arange(0,1.,0.2)\n \n # Several muscle stretch\n muscle_stretches = np.arange(0,0.3,0.01)\n \n active_active = []\n\n for stim in muscle_stimulation:\n active_forces = []\n passive_forces = []\n total = [] \n lengths = []\n for stretch in muscle_stretches:\n # Run the integration\n result = sys.integrate(x0=x0,\n time=time,\n time_step=time_step,\n stimulation=stim,\n muscle_length=stretch)\n active_forces.append(result.active_force[-1])\n passive_forces.append(result.passive_force[-1])\n total.append(result.active_force[-1]+result.passive_force[-1])\n lengths.append(result.l_ce[-1])\n active_active.append(active_forces)\n \n # Plotting\n plt.figure('Isometric muscle experiment 1')\n plt.plot(lengths, active_forces)\n plt.plot(lengths, passive_forces)\n plt.plot(lengths, total)\n plt.title('Isometric muscle experiment stimulation')\n plt.xlabel('Muscle stretch')\n plt.ylabel('Muscle force')\n plt.legend(('Active','Passive','Total'))\n plt.grid()\n plt.show()\n \n # Plotting\n plt.figure('Isometric muscle experiment 2')\n for i in range(len(muscle_stimulation)):\n plt.plot(lengths, active_active[i])\n plt.title('Isometric muscle experiment')\n plt.xlabel('Muscle stretch')\n plt.ylabel('Muscle force')\n plt.legend(muscle_stimulation)\n plt.grid()\n plt.show()\n \n # Plotting\n #plt.figure('Isotonic muscle experiment')\n #plt.plot(result.time, result.v_ce)\n #plt.title('Isotonic muscle experiment')\n #plt.xlabel('Time [s]')\n #plt.ylabel('Muscle contractilve velocity')\n #plt.grid()\n \n #muscle with longer l_opt\n muscle.L_OPT = 0.5\n muscle_stimulation = 1.\n lce = []\n totalF = []\n activeF=[]\n passiveF=[]\n for stretch in muscle_stretches:\n # Run the integration\n result = sys.integrate(x0=x0,\n time=time,\n time_step=time_step,\n stimulation=muscle_stimulation,\n muscle_length=stretch)\n activeF.append(result.active_force[-1])\n passiveF.append(result.passive_force[-1])\n lce.append(result.l_ce[-1])\n totalF.append(result.active_force[-1]+result.passive_force[-1])\n plt.figure('muscle with l_opt=0.5') \n plt.title('muscle with l_opt=0.5')\n plt.plot(lce, activeF)\n plt.plot(lce, passiveF)\n plt.plot(lce, totalF)\n plt.xlabel('Muscle Stretch')\n plt.ylabel('Force')\n plt.ylim((0,4000))\n plt.legend(('Active Force','Passive Force','Total Force'))\n\n plt.grid()\n \n \n \n #muscle with shorter l_opt\n t_start = 0.0\n t_stop = 1\n time_step = 0.005\n\n time = np.arange(t_start, t_stop, time_step)\n muscle_stretches = np.arange(0,0.3,0.01 )\n muscle.L_OPT = 0.075\n muscle_stimulation = 1.\n lce = []\n totalF = []\n activeF=[]\n passiveF=[]\n plt.figure('muscle with l_opt=0.075') \n\n for stretch in muscle_stretches:\n # Run the integration\n result = sys.integrate(x0=x0,\n time=time,\n time_step=time_step,\n stimulation=muscle_stimulation,\n muscle_length=stretch)\n activeF.append(result.active_force[-1])\n passiveF.append(result.passive_force[-1])\n lce.append(result.l_ce[-1])\n totalF.append(result.active_force[-1]+result.passive_force[-1])\n plt.title('muscle with l_opt=0.075')\n plt.plot(lce, activeF)\n plt.plot(lce, passiveF)\n plt.plot(lce, totalF)\n plt.xlabel('Muscle Stretch')\n plt.ylabel('Force')\n plt.ylim((0,4000))\n plt.legend(('Active Force','Passive Force','Total Force'))\n plt.grid()", "def exercise1d():\n\n # Defination of muscles\n muscle_parameters = MuscleParameters()\n print(muscle_parameters.showParameters())\n\n mass_parameters = MassParameters()\n print(mass_parameters.showParameters())\n\n # Create muscle object\n muscle = Muscle(muscle_parameters)\n\n # Create mass object\n mass = Mass(mass_parameters)\n\n pylog.warning(\"Isotonic muscle contraction to be implemented\")\n\n # Instatiate isotonic muscle system\n sys = IsotonicMuscleSystem()\n\n # Add the muscle to the system\n sys.add_muscle(muscle)\n\n # Add the mass to the system\n sys.add_mass(mass)\n\n # You can still access the muscle inside the system by doing\n # >>> sys.muscle.L_OPT # To get the muscle optimal length\n\n # Evalute for a single load\n load = 100.\n\n # Evalute for a single muscle stimulation\n muscle_stimulation = 1.\n\n # Set the initial condition\n x0 = [0.0, sys.muscle.L_OPT,\n sys.muscle.L_OPT + sys.muscle.L_SLACK, 0.0]\n \n # x0[0] - -> activation\n # x0[1] - -> contractile length(l_ce)\n # x0[2] - -> position of the mass/load\n # x0[3] - -> velocity of the mass/load\n \n\n # Set the time for integration\n t_start = 0.0\n t_stop = 0.5\n time_step = 0.001\n time_stabilize = 0.2\n\n time = np.arange(t_start, t_stop, time_step)\n \n loads = np.arange(20, 351, 10)\n \n velocities = []\n\n for index, load in enumerate(loads):\n \n # Run the integration\n result = sys.integrate(x0=x0,\n time=time,\n time_step=time_step,\n time_stabilize=time_stabilize,\n stimulation=muscle_stimulation,\n load=load) \n\n if (result.l_mtc[-1] < sys.muscle.L_OPT + sys.muscle.L_SLACK):\n velocities.append(np.max(result.v_ce))\n print('max')\n else:\n velocities.append(np.min(result.v_ce))\n print('min')\n\n\n #Muscle contracile Velocity - Tension (load) relationship\n \n plt.figure('Isotonic muscle experiment')\n plt.title('Isotonic muscle experiment')\n plt.xlabel('Muscle Contractile Velocity [m/s]')\n plt.ylabel('Tension (load) [N]')\n plt.plot(velocities, loads)\n plt.grid()\n \n #For different stimulations 1.f\n \n muscle_stimulation = np.arange(0,1.1,0.2)\n plt.figure('Isotonic muscle exp with different stimulations')\n plt.title('Isotonic muscle experiment with different stimulations')\n\n for stim in muscle_stimulation:\n velocities = []\n for index, load in enumerate(loads):\n # Run the integration\n result = sys.integrate(x0=x0,\n time=time,\n time_step=time_step,\n time_stabilize=time_stabilize,\n stimulation=stim,\n load=load) \n\n if (result.l_mtc[-1] < sys.muscle.L_OPT + sys.muscle.L_SLACK):\n velocities.append(np.max(result.v_ce))\n else:\n velocities.append(np.min(result.v_ce))\n plt.xlabel('Muscle Contractile Velocity [m/s]')\n plt.ylabel('Tension (load) [N]')\n plt.plot(velocities, loads)\n \n plt.legend(('0','0.2','0.4','0.6','0.8','1.0'))\n plt.grid()", "def test_control_loop_example():\n example.control.print_error_measurement()\n matplotlib.use('agg')\n example.control.visualise_error_measurement(fig_file='cs_compare.pdf')\n assert path.isfile('cs_compare.pdf')\n example.control.sil_comparison()", "def step6_run_all(flow_dataset_npz=\"flow_dataset.npz\"):\n global objs, predictions, ticks, timestamps, is_slider, is_spinner, is_note_end, sv, slider_ticks, dist_multiplier, divisor, note_distance_basis, slider_length_base, slider_types, slider_type_rotation, slider_cos, slider_sin, slider_cos_each, slider_sin_each, slider_type_length, slider_lengths, tick_diff, note_distances, maps, labels, special_train_data, special_train_labels, early_stop, loss_ma, extvar, plot_noise\n\n objs, predictions, ticks, timestamps, is_slider, is_spinner, is_note_end, sv, slider_ticks, dist_multiplier = read_map_predictions(\n \"temp/rhythm_data.npz\")\n\n # get divisor from GAN_PARAMS\n divisor = GAN_PARAMS[\"divisor\"]\n\n # get basis\n note_distance_basis = GAN_PARAMS[\"note_distance_basis\"]\n\n # get next_from_slider_end\n next_from_slider_end = GAN_PARAMS[\"next_from_slider_end\"]\n\n # should be slider length each tick, which is usually SV * SMP * 100 / 4\n # e.g. SV 1.6, timing section x1.00, 1/4 divisor, then slider_length_base = 40\n slider_length_base = sv / divisor\n\n # weight for each type of sliders\n slider_type_probs = [0.25, 0.25, 0.25, 0.05, 0.05, 0.03, 0.03, 0.01,\n 0.01, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.015, 0.015, 0.01]\n slider_types = np.random.choice(\n len(slider_type_probs), is_slider.shape, p=slider_type_probs).astype(int)\n\n # these data must be kept consistent with the sliderTypes in load_map.js\n slider_type_rotation = np.array([0, -0.40703540572409336, 0.40703540572409336, -0.20131710837464062, 0.20131710837464062,\n -0.46457807316944644, 0.46457807316944644, 1.5542036732051032, -\n 1.5542036732051032, 0, 0, 0.23783592745745077, -0.23783592745745077,\n 0.5191461142465229, -0.5191461142465229, -0.16514867741462683, 0.16514867741462683, 3.141592653589793])\n\n # this is vector length! I should change the variable name probably...\n slider_type_length = np.array([1.0, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97,\n 0.64, 0.64, 0.94, 0.94, 0.94, 0.94, 0.94, 0.94, 0.96, 0.96, 0])\n\n slider_cos = np.cos(slider_type_rotation)\n slider_sin = np.sin(slider_type_rotation)\n\n slider_cos_each = slider_cos[slider_types]\n slider_sin_each = slider_sin[slider_types]\n\n slider_lengths = np.array([slider_type_length[int(\n k)] * slider_length_base[i] for i, k in enumerate(slider_types)]) * slider_ticks\n\n tick_diff = np.concatenate([[100], ticks[1:] - ticks[:-1]])\n\n if next_from_slider_end:\n tick_diff = np.concatenate(\n [[100], tick_diff[1:] - np.floor(slider_ticks * is_slider)[:-1]])\n\n # Timing section reset == tick_diff < 0\n # Use 1 as default value\n tick_diff = np.where(tick_diff < 0, 1, tick_diff)\n\n note_distances = np.clip(tick_diff, 1, divisor * 2) * \\\n (note_distance_basis / divisor)\n\n # Load the flow dataset saved in part 4\n with np.load(flow_dataset_npz) as flow_dataset:\n maps = flow_dataset[\"maps\"]\n labels = np.ones(maps.shape[0])\n\n order2 = np.argsort(np.random.random(maps.shape[0]))\n special_train_data = maps[order2]\n special_train_labels = labels[order2]\n\n early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)\n\n # Start model training\n\n loss_ma = [90, 90, 90]\n extvar = {\"begin\": 10}\n\n plot_noise = np.random.random((1, GAN_PARAMS[\"g_input_size\"]))\n\n if GAN_PARAMS[\"max_epoch\"] == 0:\n osu_a = put_everything_in_the_center()\n else:\n osu_a = generate_map()\n\n data = objs, predictions, ticks, timestamps, is_slider, is_spinner, is_note_end, sv, slider_ticks, dist_multiplier, slider_types, slider_length_base\n return osu_a, data", "def test_scenario(timestep_per_pi, int_method):\n\n #determine BC and IC\n x0 = 0.0 #init pos\n v0 = 1.0 #init vel\n t0 = 0.0 #start-time\n tn = 4.0*np.pi #end-time\n tau = timestep_per_pi*np.pi #timesteps\n n = (tn-t0)/tau + 1 #number of timesteps\n \n time = np.linspace(t0, tn, n) #time-array\n\n #acceleration of point particle with k=m=1\n acc1 = lambda x,v,t: -1.0*x #function must take three arguments!\n\n pos, vel, time = integrate_time(func=acc1,\n init=(x0,v0),\n timearray=time,\n method=int_method)\n\n #analytical solutions\n pos_an = np.sin(time)\n vel_an = np.cos(time)\n\n return time, pos, pos_an, vel, vel_an", "def test_case():\n \"\"\"\n airspeed_altitude(80000.0, 5000.0, 30.12, 19.2,\n &altitude, &ias, &tas);\n\n printf(\"Alt: %.0f; IAS: %.1f; TAS: %.1f\", altitude, ias, tas);\n \"\"\"\n\n diff_press = 50*1e2 # 50 mbar\n print(f\"Alt: {altitude(80000, 30.12)}; IAS: {pitot_ias(5000)}; TAS: {tas(pitot_ias(5000), 19.2, altitude(80000, 30.12), 30.12)}\")", "def calc_SC_module(tilt_angle_deg, IAM_b_vector, IAM_d_vector, I_direct_vector, I_diffuse_vector, Tamb_vector_C, n0, c1, c2,\n mB0_r, mB_max_r, mB_min_r, C_eff_Jperm2K, t_max, aperture_area_m2, dP1, dP2, dP3, dP4, Cp_fluid_JperkgK, Tin_C, Leq, l_ext, l_int, Nseg):\n\n # local variables\n msc_max_kgpers = mB_max_r * aperture_area_m2 / 3600 # maximum mass flow [kg/s]\n\n # Do the calculation of every time step for every possible flow condition\n # get states where highly performing values are obtained.\n specific_flows_kgpers = [np.zeros(8760), (np.zeros(8760) + mB0_r) * aperture_area_m2 / 3600,\n (np.zeros(8760) + mB_max_r) * aperture_area_m2 / 3600,\n (np.zeros(8760) + mB_min_r) * aperture_area_m2 / 3600, np.zeros(8760), np.zeros(8760)] # in kg/s\n specific_pressure_losses_Pa = [np.zeros(8760), (np.zeros(8760) + dP2) * aperture_area_m2, (np.zeros(8760) + dP3) * aperture_area_m2,\n (np.zeros(8760) + dP4) * aperture_area_m2, np.zeros(8760), np.zeros(8760)] # in Pa\n\n # generate empty lists to store results\n temperature_out = [np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760)]\n temperature_in = [np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760)]\n temperature_mean = [np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760)]\n supply_out_kW = [np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760)]\n supply_losses_kW = [np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760)]\n auxiliary_electricity_kW = [np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760), np.zeros(8760)]\n supply_out_pre = np.zeros(8760)\n supply_out_total_kW = np.zeros(8760)\n mcp_kWperK = np.zeros(8760)\n\n # calculate absorbed radiation\n tilt = radians(tilt_angle_deg)\n q_rad_vector = np.vectorize(calc_q_rad)(n0, IAM_b_vector, I_direct_vector, IAM_d_vector, I_diffuse_vector,\n tilt) # absorbed solar radiation in W/m2 is a mean of the group\n for flow in range(6):\n mode_seg = 1 # mode of segmented heat loss calculation. only one mode is implemented.\n TIME0 = 0\n DELT = 1 # timestep 1 hour\n delts = DELT * 3600 # convert time step in seconds\n Tfl = np.zeros([3, 1]) # create vector to store value at previous [1] and present [2] time-steps\n DT = np.zeros([3, 1])\n Tabs = np.zeros([3, 1])\n STORED = np.zeros([600, 1])\n TflA = np.zeros([600, 1])\n TflB = np.zeros([600, 1])\n TabsB = np.zeros([600, 1])\n TabsA = np.zeros([600, 1])\n q_gain_Seg = np.zeros([101, 1]) # maximum Iseg = maximum Nseg + 1 = 101\n\n for time in range(8760):\n Mfl_kgpers = specific_flows_kgpers[flow][time] # [kg/s]\n if time < TIME0 + DELT / 2:\n # set output values to the appropriate initial values\n for Iseg in range(101, 501): # 400 points with the data\n STORED[Iseg] = Tin_C\n else:\n # write average temperature of all segments at the end of previous time-step\n # as the initial temperature of the present time-step\n for Iseg in range(1, Nseg + 1): # 400 points with the data\n STORED[100 + Iseg] = STORED[200 + Iseg] # thermal capacitance node temperature\n STORED[300 + Iseg] = STORED[400 + Iseg] # absorber node temperature\n\n # calculate stability criteria\n if Mfl_kgpers > 0:\n stability_criteria = Mfl_kgpers * Cp_fluid_JperkgK * Nseg * (DELT * 3600) / (C_eff_Jperm2K * aperture_area_m2)\n if stability_criteria <= 0.5:\n print ('ERROR: stability criteria' + str(stability_criteria) + 'is not reached. aperture_area: '\n + str(aperture_area_m2) + 'mass flow: ' + str(Mfl_kgpers))\n\n # calculate average fluid temperature and average absorber temperature at the beginning of the time-step\n Tamb_C = Tamb_vector_C[time]\n q_rad_Wperm2 = q_rad_vector[time]\n Tfl[1] = 0\n Tabs[1] = 0\n for Iseg in range(1, Nseg + 1):\n Tfl[1] = Tfl[1] + STORED[100 + Iseg] / Nseg # mean fluid temperature\n Tabs[1] = Tabs[1] + STORED[300 + Iseg] / Nseg # mean absorber temperature\n\n ## first guess for Delta T\n if Mfl_kgpers > 0:\n Tout_C = Tin_C + (q_rad_Wperm2 - (c1 + 0.5) * (Tin_C - Tamb_C)) / (Mfl_kgpers * Cp_fluid_JperkgK / aperture_area_m2)\n Tfl[2] = (Tin_C + Tout_C) / 2 # mean fluid temperature at present time-step\n else:\n Tout_C = Tamb_C + q_rad_Wperm2 / (c1 + 0.5)\n Tfl[2] = Tout_C # fluid temperature same as output\n DT[1] = Tfl[2] - Tamb_C # difference between mean absorber temperature and the ambient temperature\n\n # calculate q_gain with the guess for DT[1]\n q_gain_Wperm2 = calc_q_gain(Tfl, Tabs, q_rad_Wperm2, DT, Tin_C, Tout_C, aperture_area_m2, c1, c2, Mfl_kgpers,\n delts, Cp_fluid_JperkgK, C_eff_Jperm2K, Tamb_C)\n\n A_seg_m2 = aperture_area_m2 / Nseg # aperture area per segment\n # multi-segment calculation to avoid temperature jump at times of flow rate changes.\n for Iseg in range(1, Nseg + 1):\n # get temperatures of the previous time-step\n TflA[Iseg] = STORED[100 + Iseg]\n TabsA[Iseg] = STORED[300 + Iseg]\n if Iseg > 1:\n Tin_Seg_C = Tout_Seg_C\n else:\n Tin_Seg_C = Tin_C\n\n if Mfl_kgpers > 0 and mode_seg == 1: # same heat gain/ losses for all segments\n Tout_Seg_K = ((Mfl_kgpers * Cp_fluid_JperkgK * (Tin_Seg_C + 273.15)) / A_seg_m2 -\n (C_eff_Jperm2K * (Tin_Seg_C + 273.15)) / (2 * delts) + q_gain_Wperm2 +\n (C_eff_Jperm2K * (TflA[Iseg] + 273.15) / delts)) / (Mfl_kgpers * Cp_fluid_JperkgK / A_seg_m2 + C_eff_Jperm2K / (2 * delts))\n Tout_Seg_C = Tout_Seg_K - 273.15 # in [C]\n TflB[Iseg] = (Tin_Seg_C + Tout_Seg_C) / 2\n else: # heat losses based on each segment's inlet and outlet temperatures.\n Tfl[1] = TflA[Iseg]\n Tabs[1] = TabsA[Iseg]\n q_gain_Wperm2 = calc_q_gain(Tfl, Tabs, q_rad_Wperm2, DT, Tin_Seg_C, Tout_C, A_seg_m2, c1, c2,\n Mfl_kgpers, delts, Cp_fluid_JperkgK, C_eff_Jperm2K, Tamb_C)\n Tout_Seg_C = Tout_C\n\n if Mfl_kgpers > 0:\n TflB[Iseg] = (Tin_Seg_C + Tout_Seg_C) / 2\n Tout_Seg_C = TflA[Iseg] + (q_gain_Wperm2 * delts) / C_eff_Jperm2K\n else:\n TflB[Iseg] = Tout_Seg_C\n\n #TflB[Iseg] = Tout_Seg\n q_fluid_Wperm2 = (Tout_Seg_C - Tin_Seg_C) * Mfl_kgpers * Cp_fluid_JperkgK / A_seg_m2\n q_mtherm_Whperm2 = (TflB[Iseg] - TflA[Iseg]) * C_eff_Jperm2K / delts # total heat change rate of thermal capacitance\n q_balance_error = q_gain_Wperm2 - q_fluid_Wperm2 - q_mtherm_Whperm2\n if abs(q_balance_error) > 1:\n time = time # re-enter the iteration when energy balance not satisfied\n q_gain_Seg[Iseg] = q_gain_Wperm2 # in W/m2\n\n # resulting net energy output\n q_out_kW = (Mfl_kgpers * Cp_fluid_JperkgK * (Tout_Seg_C - Tin_C)) / 1000 #[kW]\n Tabs[2] = 0\n # storage of the mean temperature\n for Iseg in range(1, Nseg + 1):\n STORED[200 + Iseg] = TflB[Iseg]\n STORED[400 + Iseg] = TabsB[Iseg]\n Tabs[2] = Tabs[2] + TabsB[Iseg] / Nseg\n\n # outputs\n temperature_out[flow][time] = Tout_Seg_C\n temperature_in[flow][time] = Tin_C\n supply_out_kW[flow][time] = q_out_kW\n temperature_mean[flow][time] = (Tin_C + Tout_Seg_C) / 2 # Mean absorber temperature at present\n\n # q_gain = 0\n # TavgB = 0\n # TavgA = 0\n # for Iseg in range(1, Nseg + 1):\n # q_gain = q_gain + q_gain_Seg[Iseg] * A_seg_m2 # [W]\n # TavgA = TavgA + TflA[Iseg] / Nseg\n # TavgB = TavgB + TflB[Iseg] / Nseg\n #\n # # OUT[9] = q_gain/Area_a # in W/m2\n # q_mtherm = (TavgB - TavgA) * C_eff * aperture_area / delts\n # q_balance_error = q_gain - q_mtherm - q_out\n\n # OUT[11] = q_mtherm\n # OUT[12] = q_balance_error\n if flow < 4:\n auxiliary_electricity_kW[flow] = np.vectorize(calc_Eaux_SC)(specific_flows_kgpers[flow], specific_pressure_losses_Pa[flow],\n Leq, l_int, aperture_area_m2) # in kW\n if flow == 3:\n q1 = supply_out_kW[0]\n q2 = supply_out_kW[1]\n q3 = supply_out_kW[2]\n q4 = supply_out_kW[3]\n E1 = auxiliary_electricity_kW[0]\n E2 = auxiliary_electricity_kW[1]\n E3 = auxiliary_electricity_kW[2]\n E4 = auxiliary_electricity_kW[3]\n # calculate optimal mass flow and the corresponding pressure loss\n specific_flows_kgpers[4], specific_pressure_losses_Pa[4] = calc_optimal_mass_flow(q1, q2, q3, q4, E1, E2, E3, E4, 0,\n mB0_r, mB_max_r, mB_min_r, 0,\n dP2, dP3, dP4, aperture_area_m2)\n if flow == 4:\n # calculate pumping electricity when operatres at optimal mass flow\n auxiliary_electricity_kW[flow] = np.vectorize(calc_Eaux_SC)(specific_flows_kgpers[flow], specific_pressure_losses_Pa[flow],\n Leq, l_int, aperture_area_m2) # in kW\n dp5 = specific_pressure_losses_Pa[flow]\n q5 = supply_out_kW[flow]\n m5 = specific_flows_kgpers[flow]\n # set points to zero when load is negative\n specific_flows_kgpers[5], specific_pressure_losses_Pa[5] = calc_optimal_mass_flow_2(m5, q5, dp5)\n\n if flow == 5: # optimal mass flow\n supply_losses_kW[flow] = np.vectorize(calc_qloss_network)(specific_flows_kgpers[flow], l_ext, aperture_area_m2,\n temperature_mean[flow], Tamb_vector_C, msc_max_kgpers)\n supply_out_pre = supply_out_kW[flow].copy() + supply_losses_kW[flow].copy()\n auxiliary_electricity_kW[flow] = np.vectorize(calc_Eaux_SC)(specific_flows_kgpers[flow], specific_pressure_losses_Pa[flow],\n Leq, l_int, aperture_area_m2) # in kW\n supply_out_total_kW = supply_out_kW[flow].copy() + 0.5 * auxiliary_electricity_kW[flow].copy() - supply_losses_kW[flow].copy() # eq.(58) _[J. Fonseca et al., 2016]\n mcp_kWperK = specific_flows_kgpers[flow] * (Cp_fluid_JperkgK / 1000) # mcp in kW/K\n\n result = [supply_losses_kW[5], supply_out_total_kW, auxiliary_electricity_kW[5], temperature_out[5], temperature_in[5], mcp_kWperK]\n q_rad_per_panel = q_rad_vector*aperture_area_m2\n return result", "def main():\n\t#Necessary Parameters for Simulation\n\tAmplitudes = ['230','260','290']\n\tConditions = ['No EES','EES','EES+A08','EES+A08+ProIncrease']\n\n\n\n\t#eesAmplitude = \"230\"\n\teesAmplitudeName = \"230\"\n\tdelay = \"2\"\n\ttoAddname = \"\"\n\tspecies = \"rat\"\n\t#Paramters initialization\n\ttotSimTime = rp.get_tot_sim_time()\n\tgaitCyclesFileName = rp.get_gait_cycles_file()\n\tmuscles = rp.get_muscles()\n\ttemplateFile = \"templateFrwSimRORaReal.txt\"\n\tw1 = 0.011\n\tw2 = -0.005\n\n\ttemplateFile = \"A08.txt\"\n\n\ttls.modify_network_structure(templateFile,templateFile,delay,[w1,w2])\n\n\teesFrequencies = range(0,41,40)\n\tnProc = 4\n\tseed = \"1\"\n\n\tnSim = len(eesFrequencies)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\n\t# run simulations\n\tfor j,eesAmplitude in enumerate(Amplitudes):\n\t\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t\tfor condition in Conditions:\n\t\t\t\t#name = \"Tonic_FFS_\"+inputFileName+\"_freq_\"+str(eesFrequency)\n\t\t\t\tinputFileName = condition\n\t\t\t\tinputFile = \"generatedStructures/\"+inputFileName+\".txt\"\n\t\t\t\tname = \"Tonic_FFS_\"+condition+\"_freq_\"+str(eesFrequency)\n\t\t\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\t\t\tif not resultFile:\n\t\t\t\t\tprogram = ['python','./scripts/runForSimMuscleSpindles_RORa.py',\\\n\t\t\t\t\t\tstr(eesFrequency),eesAmplitude,inputFile,name,\"--simTime\",str(totSimTime),\"--seed\",seed,\"--noPlot\"]\n\n\t\t\t\tif not resultFile: gt.run_subprocess(program)\n\n\t\t\t\tcount+=1\n\t\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\n\n\n\t\"\"\" create plots \"\"\"\n\terrParams = dict(lw=0.5, capsize=1, capthick=0.5)\n\twith open(gaitCyclesFileName, 'r') as pickle_file:\n\t\theelStrikes = pickle.load(pickle_file)\n\t\tfootOffs = pickle.load(pickle_file)\n\n\n\t# Figure 5 plot all gait cycles- afferent and efferents\n\t#if not phasicStim:\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_firingRates.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_firingRates.pdf\")\n\tfig, ax = plt.subplots(2, 4,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\n\t\t#name = \"Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\t# get data\n\t\tprint name\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tprint resultFile\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\n\t\t# get gait cycles\n\t\tif not 'heelStrikeSamples' in locals():\n\t\t\tnSamples = len(meanFr[muscles[0]][\"Mn\"])\n\t\t\tdtMeanFr = float(totSimTime)/nSamples\n\t\t\theelStrikeSamples = [int(x) for x in heelStrikes*1000./dtMeanFr]\n\t\t\tfootOffSamples = [int(x) for x in footOffs*1000./dtMeanFr]\n\t\t\tsamples = range(nSamples)\n\t\t\tstance = np.zeros(nSamples).astype(bool)\n\t\t\tfor strike,off in zip(heelStrikeSamples,footOffSamples):\n\t\t\t\tif strike>nSamples: break\n\t\t\t\tstance[strike:off]=True\n\n\t\tfor j,muscle in enumerate(muscles):\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'],color=colors[i])\n\t\t\tax[j,0].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].plot(meanFr[muscle]['IaInt'],color=colors[i])\n\t\t\tax[j,1].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,2].plot(meanFr[muscle]['Mn'],color=colors[i])\n\t\t\tax[j,2].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,3].plot(estimatedEmg[muscle]['Mn'],color=colors[i])\n\t\t\tax[j,3].fill_between(samples, 0, 200, where=stance, facecolor='#b0abab', alpha=0.25)\n\n\n\tfor j,muscle in enumerate(muscles):\n\t\tax[j,0].set_ylim([0,200])\n\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,1].set_ylim([0,200])\n\t\tax[j,1].set_title(\"IaInt firing rate - \"+muscle)\n\t\tax[j,1].set_xlabel(\"Time (ms)\")\n\t\tax[j,1].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,2].set_ylim([0,200])\n\t\tax[j,2].set_title(\"Mn firing rate - \"+muscle)\n\t\tax[j,2].set_xlabel(\"Time (ms)\")\n\t\tax[j,2].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,3].set_ylim([0,200])\n\t\tax[j,3].set_title(\"EMG - \"+muscle)\n\t\tax[j,3].set_xlabel(\"Time (ms)\")\n\t\tax[j,3].set_ylabel(\"Emg amplitude (a.u.)\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)\n\n\n# FIgure 5 plot 2 single gait cycles- afferent and efferents + mn phasicity score\n\tif species == \"rat\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\telif species == \"human\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_single_firingRates.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_single_firingRates.pdf\")\n\tfig, ax = plt.subplots(2, 6,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\tbar_width = 5\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\t# get data\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\n\t\t# compute stats\n\t\tiaIntModDepth = {}\n\t\tactiveMnFr={}\n\t\tfor muscle in muscles:\n\t\t\tiaIntModDepth[muscle]=[]\n\t\t\tactiveMnFr[muscle]=[]\n\t\tfor j in xrange(len(heelStrikeSamples)-1):\n\t\t\tif heelStrikeSamples[j+1]>nSamples-50: break\n\t\t\tif heelStrikeSamples[j]<50:continue # to skip artefacts\n\t\t\tfor muscle in muscles:\n\t\t\t\tiaIntModDepth[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['IaInt'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].max()-meanFr[muscle]['IaInt'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].min())\n\t\t\t\tmnActivityDuringCycle = meanFr[muscle]['Mn'][heelStrikeSamples[j]:heelStrikeSamples[j+1]]\n\t\t\t\tactiveMnFr[muscle].append(\\\n\t\t\t\t\tmnActivityDuringCycle[mnActivityDuringCycle>=0.8*mnActivityDuringCycle.max()].mean())\n\t\t\t\t\t# mnActivityDuringCycle[mnActivityDuringCycle>=1.5*mnActivityDuringCycle.std()].mean())\n\t\t\t\t\t# mnActivityDuringCycle[mnActivityDuringCycle>=np.percentile(mnActivityDuringCycle,90)].mean())\n\t\tiaIntModDepthStats = {}\n\t\tactiveMnFrStats = {}\n\t\tfor muscle in muscles:\n\t\t\tiaIntModDepthStats[muscle] = {\"mean\":np.mean(iaIntModDepth[muscle]),\n\t\t\t\t\"sem\":np.std(iaIntModDepth[muscle])/(np.sqrt(len(iaIntModDepth[muscle])-1))}\n\t\t\tactiveMnFrStats[muscle] = {\"mean\":np.mean(activeMnFr[muscle]),\n\t\t\t\t\"sem\":np.std(activeMnFr[muscle])/(np.sqrt(len(activeMnFr[muscle])-1))}\n\n\t\t# get gait cycles to plot\n\t\tif not 'startPlot' in locals():\n\t\t\tstartPlot = heelStrikeSamples[startGaitCycleN-1]\n\t\t\tstopPlot = heelStrikeSamples[startGaitCycleN+nCycles-1]\n\t\t\tif stopPlot>nSamples: stopPlot=nSamples\n\t\t\treducedSamples = range(stopPlot-startPlot)\n\t\t\treducedStance = stance[startPlot:stopPlot]\n\n\t\tfor j,muscle in enumerate(muscles):\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,0].fill_between(reducedSamples, 0, 200, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].plot(meanFr[muscle]['IaInt'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,1].fill_between(reducedSamples, 0, 250, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,2].bar(eesFrequency,iaIntModDepthStats[muscle][\"mean\"],bar_width,yerr=iaIntModDepthStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\txValsScatter = np.linspace(0,bar_width*0.9,len(iaIntModDepth[muscle]))+eesFrequency-bar_width*0.45\n\t\t\tax[j,2].scatter(xValsScatter,iaIntModDepth[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\n\t\t\tax[j,3].plot(meanFr[muscle]['Mn'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,3].fill_between(reducedSamples, 0, 40, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,4].bar(eesFrequency,activeMnFrStats[muscle][\"mean\"],bar_width,yerr=activeMnFrStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\tax[j,4].scatter(xValsScatter,activeMnFr[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\t\t\tax[j,5].plot(estimatedEmg[muscle]['Mn'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,5].fill_between(reducedSamples, -50, 50, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\n\tfor j,muscle in enumerate(muscles):\n\t\tax[j,0].set_ylim([0,200])\n\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,1].set_ylim([0,250])\n\t\tax[j,1].set_title(\"IaInt firing rate - \"+muscle)\n\t\tax[j,1].set_xlabel(\"Time (ms)\")\n\t\tax[j,1].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,2].set_ylim([0,250])\n\t\tax[j,2].set_title(\"Mean IaInr Fr while active\")\n\t\tax[j,2].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\tax[j,2].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,3].set_ylim([0,40])\n\t\tax[j,3].set_title(\"Mn firing rate - \"+muscle)\n\t\tax[j,3].set_xlabel(\"Time (ms)\")\n\t\tax[j,3].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,4].set_ylim([0,40])\n\t\tax[j,4].set_title(\"Mean Mn Fr while active\")\n\t\tax[j,4].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\tax[j,4].set_ylabel(\"Firing rate (Imp/s)\")\n\t\tax[j,5].set_ylim([-50,50])\n\t\tax[j,5].set_title(\"EMG - \"+muscle)\n\t\tax[j,5].set_xlabel(\"Time (ms)\")\n\t\tax[j,5].set_ylabel(\"Emg amplitude (a.u.)\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)\n\n\n\n\n\n\t# FIgure 2-7 plot\n\tif species == \"rat\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\telif species == \"human\":\n\t\tstartGaitCycleN = 3\n\t\tnCycles = 1\n\n\t#if not phasicStim:\n\tfigName = time.strftime(\"/%Y_%m_%d_Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_afferentStats.pdf\")\n\t#else: figName = time.strftime(\"/%Y_%m_%d_Phasic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_afferentStats.pdf\")\n\tfig, ax = plt.subplots(2, 4,figsize=(16,9))\n\tcmap = plt.get_cmap('winter')\n\tcolors = cmap(np.linspace(0.1,0.9,len(eesFrequencies)))\n\tbar_width = 5\n\n\tmeanPerEraserApIaf = []\n\toffsetMeanFr = 0\n\toffsetMeanModDepth = 0\n\n\tfor i,eesFrequency in enumerate(eesFrequencies):\n\t\t#if not phasicStim:\n\t\tname = \"FS_EES_230uA_\"+str(eesFrequency)+\"Hz_Delay_2ms_Tonic_FFS_Control_freq_\"+str(eesFrequency)\n\t\t#name = \"Tonic_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\t#else: name = \"Phasic_\"+emgVsKinMod+\"_FFS_species_\"+toAddname+inputFileName+species+\"_muscles_\"+\"\".join(muscles)+\"_delay_\"+str(delay)+\"_amp_\"+str(eesAmplitudeName)+\"_freq_\"+str(eesFrequency)\n\t\tif species == \"human\":name += hp.get_dataset()\n\n\t\tresultFile = gt.find(\"*\"+name+\".p\",pathToResults)\n\t\tif len(resultFile)>1: print \"Warning: multiple result files found!!!\"\n\t\twith open(resultFile[0], 'r') as pickle_file:\n\t\t\testimatedEmg = pickle.load(pickle_file)\n\t\t\tmeanFr = pickle.load(pickle_file)\n\t\t\tmeanPerEraserApIaf.append(pickle.load(pickle_file))\n\n\t\t# compute stats\n\t\tiaModDepth = {}\n\t\tiaMeanFr={}\n\t\tfor muscle in muscles:\n\t\t\tiaModDepth[muscle]=[]\n\t\t\tiaMeanFr[muscle]=[]\n\t\tfor j in xrange(len(heelStrikeSamples)-1):\n\t\t\tif heelStrikeSamples[j+1]>nSamples-50: break\n\t\t\tif heelStrikeSamples[j]<50:continue # to skip artefacts\n\t\t\tfor muscle in muscles:\n\t\t\t\tiaModDepth[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].max()-meanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].min())\n\t\t\t\tiaMeanFr[muscle].append(\\\n\t\t\t\t\tmeanFr[muscle]['Iaf'][heelStrikeSamples[j]:heelStrikeSamples[j+1]].mean())\n\t\tiaModDepthStats = {}\n\t\tiaMeanFrStats = {}\n\t\tfor muscle in muscles:\n\t\t\tiaModDepthStats[muscle] = {\"mean\":np.mean(iaModDepth[muscle]),\n\t\t\t\t\"sem\":np.std(iaModDepth[muscle])/(np.sqrt(len(iaModDepth[muscle])-1))}\n\t\t\tiaMeanFrStats[muscle] = {\"mean\":np.mean(iaMeanFr[muscle]),\n\t\t\t\t\"sem\":np.std(iaMeanFr[muscle])/(np.sqrt(len(iaMeanFr[muscle])-1))}\n\n\t\t# get gait cycles to plot\n\t\tif not 'startPlot' in locals():\n\t\t\tstartPlot = heelStrikeSamples[startGaitCycleN-1]\n\t\t\tstopPlot = heelStrikeSamples[startGaitCycleN+nCycles-1]\n\t\t\tif stopPlot>nSamples: stopPlot=nSamples\n\t\t\treducedSamples = range(stopPlot-startPlot)\n\t\t\treducedStance = stance[startPlot:stopPlot]\n\n\t\tfor j,muscle in enumerate(muscles):\n\n\t\t\tax[j,0].plot(meanFr[muscle]['Iaf'][startPlot:stopPlot],color=colors[i])\n\t\t\tax[j,0].fill_between(reducedSamples, 0, 125, where=reducedStance, facecolor='#b0abab', alpha=0.25)\n\t\t\tax[j,1].bar(eesFrequency,iaMeanFrStats[muscle][\"mean\"],bar_width,yerr=iaMeanFrStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\txValsScatter = np.linspace(0,bar_width*0.9,len(iaMeanFr[muscle]))+eesFrequency-bar_width*0.45\n\t\t\tax[j,1].scatter(xValsScatter,iaMeanFr[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\n\t\t\tax[j,2].bar(eesFrequency,iaModDepthStats[muscle][\"mean\"],bar_width,yerr=iaModDepthStats[muscle][\"sem\"],\\\n\t\t\t\tcolor=colors[i],error_kw=errParams)\n\t\t\tax[j,2].scatter(xValsScatter,iaModDepth[muscle], marker='o',edgecolor='black', linewidth='0.1', color=\"#dddde3\", s=7, zorder=3, alpha=0.7)\n\t\t\tax[j,3].bar(eesFrequency,meanPerEraserApIaf[-1],5,color=colors[i])\n\n\t\t\tax[j,0].set_ylim([0,125])\n\t\t\tax[j,0].set_title(\"Ia fibers firing rate - \"+muscle)\n\t\t\tax[j,0].set_xlabel(\"Time (ms)\")\n\t\t\tax[j,0].set_ylabel(\"Firing rate (Imp/s)\")\n\t\t\tax[j,1].set_ylim([0,125])\n\t\t\tax[j,1].set_title(\"Mean Ia firing rate \")\n\t\t\tax[j,1].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\t\tax[j,1].set_ylabel(\"(imp/s)\")\n\t\t\tax[j,2].set_ylim([0,80])\n\t\t\tax[j,2].set_title(\"modulation depth\")\n\t\t\tax[j,2].set_xlabel(\"Stimulation amplitude (uA)\")\n\t\t\tax[j,2].set_ylabel(\"(imp/s)\")\n\t\t\tax[j,3].set_ylim([0,100])\n\t\t\tax[j,3].set_title(\"Percentage erased APs\")\n\t\t\tax[j,3].set_xlabel(\"Stimulation frequency (Hz)\")\n\t\t\tax[j,3].set_ylabel(\"Percentage\")\n\tplt.savefig(pathToResults+figName, format=\"pdf\",transparent=True)", "def test_simulationRun(self):\n self.opt = { 'temperature' : 300.0, 'friction' : 1, 'dt' : 0.00002,\n 'nIter' : 2, 'nstepsNC' : 2, 'nstepsMD' : 1, 'nprop' : 1,\n 'nonbondedMethod' : 'NoCutoff', 'constraints': 'HBonds',\n 'trajectory_interval' : 1, 'reporter_interval' : 1,\n 'outfname' : 'mc-test',\n 'platform' : None,\n 'constraints' : 'HBonds',\n 'mc_per_iter' : 2 }\n\n structure = self.full_struct\n class SetRotationMove(RandomLigandRotationMove):\n def __init__(self, structure, resname='LIG'):\n super(SetRotationMove, self).__init__(structure, resname)\n\n def move(self, context):\n \"\"\"Function that performs a random rotation about the\n center of mass of the ligand.\n \"\"\"\n #TODO: check if we need to deepcopy\n positions = context.getState(getPositions=True).getPositions(asNumpy=True)\n\n self.positions = positions[self.atom_indices]\n self.center_of_mass = self.getCenterOfMass(self.positions, self.masses)\n reduced_pos = self.positions - self.center_of_mass\n\n # Define random rotational move on the ligand\n #set rotation so that test is reproducible\n set_rotation_matrix = np.array([[-0.62297988, -0.17349253, 0.7627558 ],\n [ 0.55082352, -0.78964857, 0.27027502],\n [ 0.55541834, 0.58851973, 0.58749893]])\n\n\n #set_rotation_matrix = np.array([[1, 0, 0],\n # [0, 1, 0],\n # [0, 0, 1]])\n\n #multiply lig coordinates by rot matrix and add back COM translation from origin\n rot_move = np.dot(reduced_pos, set_rotation_matrix) * positions.unit + self.center_of_mass\n\n # Update ligand positions in nc_sim\n for index, atomidx in enumerate(self.atom_indices):\n positions[atomidx] = rot_move[index]\n context.setPositions(positions)\n positions = context.getState(getPositions=True).getPositions(asNumpy=True)\n self.positions = positions[self.atom_indices]\n return context\n\n\n self.model = SetRotationMove(structure, resname='ALA')\n #self.model = RandomLigandRotationMove(structure, resname='ALA')\n\n self.model.atom_indices = range(22)\n self.model.topology = structure[self.model.atom_indices].topology\n self.model.positions = structure[self.model.atom_indices].positions\n self.model.calculateProperties()\n\n self.mover = MoveEngine(self.model)\n #Initialize the SimulationFactory object\n sims = SimulationFactory(structure, self.mover, **self.opt)\n #print(sims)\n system = sims.generateSystem(structure, **self.opt)\n simdict = sims.createSimulationSet()\n alch_system = sims.generateAlchSystem(system, self.model.atom_indices)\n self.nc_sim = sims.generateSimFromStruct(structure, self.mover, alch_system, ncmc=True, **self.opt)\n self.model.calculateProperties()\n self.initial_positions = self.nc_sim.context.getState(getPositions=True).getPositions(asNumpy=True)\n mc_sim = Simulation(sims, self.mover, **self.opt)\n #monkeypatch to access acceptance value\n def nacceptRejectMC(self, temperature=300, **opt):\n \"\"\"Function that chooses to accept or reject the proposed move.\n \"\"\"\n md_state0 = self.current_state['md']['state0']\n md_state1 = self.current_state['md']['state1']\n log_mc = (md_state1['potential_energy'] - md_state0['potential_energy']) * (-1.0/self.nc_sim.context._integrator.kT)\n randnum = math.log(np.random.random())\n\n if log_mc > randnum:\n self.accept += 1\n print('MC MOVE ACCEPTED: log_mc {} > randnum {}'.format(log_mc, randnum) )\n self.md_sim.context.setPositions(md_state1['positions'])\n else:\n self.reject += 1\n print('MC MOVE REJECTED: log_mc {} < {}'.format(log_mc, randnum) )\n self.md_sim.context.setPositions(md_state0['positions'])\n self.log_mc = log_mc\n self.md_sim.context.setVelocitiesToTemperature(self.opt['temperature'])\n mc_sim.acceptRejectMC = nacceptRejectMC\n nacceptRejectMC.__get__(mc_sim)\n mc_sim.acceptRejectMC = types.MethodType(nacceptRejectMC, mc_sim)\n mc_sim.runMC(self.opt['nIter'])\n #get log acceptance\n print(mc_sim.log_mc)\n #if mc is working, should be around -24.1\n assert mc_sim.log_mc <= -23.8 and mc_sim.log_mc >= -24.3", "def run_analysis(self):\n ### skip some snapshots for testing purposes\n nskip = 199\n read_char.skip_snapshots(self.hfile, self.ifile, nskip)\n ### read in the first two steps (required for velocity related computations\n xs_old, ys_old, lx_old, ly_old, tstep_old, natoms_old = read_char.read_snapshot(self.hfile, self.ifile)\n x_old = xs_old*lx_old\n y_old = ys_old*ly_old\n xs,ys,lx,ly,tstep,natoms = read_char.read_snapshot(self.hfile, self.ifile)\n x = xs*lx\n y = ys*ly\n ### loop over all steps of the input file\n for step in range(nskip+1,self.nsteps-1):\n print step\n ### read in coordinates (as required)\n xs_new,ys_new,lx_new,ly_new,tstep_new,natoms_new = read_char.read_snapshot(self.hfile, self.ifile)\n x_new = xs_new*lx_new\n y_new = ys_new*ly_new\n ### compute further current per/atom quantities\n phi = misc_tools.compute_orientation(x,y,lx,ly,self.npol)\n vx,vy = misc_tools.compute_velocity(x_old,y_old, x_new, y_new, lx, ly, tstep_old, tstep_new, natoms)\n ### start desired analysis methods\n # density\n if self.density_flag:\n self.density.compute(step,x,y,lx,ly,natoms, plot = 'False')\n # number fluctuations\n if self.nf_flag:\n self.numberfluctuation.compute(step,xs,ys, plot = 'False')\n # voronoi density\n if self.voronoi_flag:\n self.voronoidensity.compute(step,x,y,lx,ly,natoms, plot = 'False')\n # velocity / worticity\n if self.velocity_flag:\n self.velocityworticity.compute(step,x,y,vx,vy,natoms,lx,ly, plot = 'False')\n # orientation / velocity\n if self.orientvel_flag:\n self.orientvel.compute(step,x,y,vx,vy,phi,natoms, plot = 'False')\n # defect points\n if self.pointdefects_flag:\n self.pointdefects.compute(step,x,y,phi,lx,ly,natoms)\n ### move coordinate arrays\n xs_old = np.copy(xs)\n ys_old = np.copy(ys)\n x_old = np.copy(x)\n y_old = np.copy(y)\n tstep_old = tstep\n xs = np.copy(xs_new)\n ys = np.copy(ys_new)\n x = np.copy(x_new)\n y = np.copy(y_new)\n tstep = tstep_new\n return", "def main(adate,aprefix):\n script_start_time = datetime.now() #Script Timer\n\n if len(adate)!=8:\n raise ValueError('Please enter a date in the format yyyymmdd') \n\n # Specify Date to Process\n year = int(adate[0:4])\n month = int(adate[4:6])\n day = int(adate[6:8])\n start_date = datetime(year,month,day)\n\n # Loop through 24 hours\n dsout = False\n for jj in range(0,24):\n wrf_file = make_wrf_file(start_date + pd.to_timedelta(jj,unit='h'), forecast_offset)\n print('Processing: ' + wrf_file)\n ds = False\n try:\n ds = xr.open_dataset(directory + wrf_file, engine='pynio')\n except:\n print('Could not open ' + wrf_file)\n if(isinstance(ds,xr.Dataset)):\n ds = clean_dataset(ds)\n if(isinstance(dsout,xr.Dataset)):\n dsout = xr.concat([dsout, ds],dim='time')\n else:\n dsout = ds\n\n if(isinstance(dsout,xr.Dataset)):\n # Add attributes\n dsout['eastward_wind'].attrs['standard_name'] = 'eastward_wind'\n dsout['eastward_wind'].attrs['comment'] = 'The zonal wind speed (m/s) indicates the u (positive eastward) component of where the wind is going.'\n dsout['northward_wind'].attrs['standard_name'] = 'northward_wind'\n dsout['northward_wind'].attrs['comment'] = 'The meridional wind speed (m/s) indicates the v (positive northward) component of where the wind is going.'\n \n # Add Wind Speed\n wind_speed = np.sqrt(dsout['eastward_wind']**2 + dsout['northward_wind']**2)\n wind_speed.attrs['units'] = 'm s-1'\n wind_speed.attrs['comment'] = 'Wind Speed is calculated from the Zonal and Meridional wind speeds.'\n wind_speed.attrs['long_name'] = 'Wind Speed'\n wind_speed.attrs['standard_name'] = 'wind_speed'\n dsout['wind_speed'] = wind_speed\n \n # Add Wind Direction\n wind_dir = 270 - xr.ufuncs.arctan2(dsout['northward_wind'],dsout['eastward_wind'])*180/np.pi\n #wind_dir = (wind_dir.where(wind_dir<0)+360).combine_first(wind_dir) #Flip negative degrees - Doesn't seem to work\n wind_dir = wind_dir % 360 #Use modulo to keep degrees between 0-360\n wind_dir.attrs['units'] = 'degree'\n wind_dir.attrs['comment'] = 'The direction from which winds are coming from, in degrees clockwise from true N.'\n wind_dir.attrs['long_name'] = 'Wind Direction'\n wind_dir.attrs['standard_name'] = 'wind_from_direction'\n dsout['wind_from_direction'] = wind_dir\n\n # Add global metadata\n dsout.attrs['title'] = \"Rutgers WRF 3km model output\"\n dsout.attrs['forecast_offset'] = forecast_offset\n dsout.attrs['source_directory'] = directory\n dsout.attrs['date_created'] = str(datetime.today())\n dsout.attrs['elapsed_time'] = str(datetime.now() - script_start_time)\n dsout.attrs['creator_name'] = \"Sage Lichtenwalner\"\n dsout.attrs['creator_email'] = \"[email protected]\"\n dsout.attrs['creator_url'] = \"https://rucool.marine.rutgers.edu\"\n dsout.attrs['institution'] = \"Rutgers University Center for Ocean Observing Leadership (RU COOL)\"\n dsout.attrs['summary'] = \"Wind data extracted from the RU-WRF model. The model is run daily at 00Z with forecast files saved every hour. Times in this file are UTC based on the forecast time. The forecast_offset specifies how many hours of model spin up are allowed before the data is included in this virtual time-series archive for a given day. For example, a value of 6 means the first 6 hours of data for a day are actually extracted from the previous day's model run.\"\n dsout.attrs['project'] = \"RU COOL BPU Wind Energy Project\"\n dsout.attrs['Conventions'] = 'CF-1.6'\n \n # Setup xarray output encoding\n encoding = make_encoding(dsout)\n \n # Output final datafile\n output_datafile = '%s_%d%02d%02d.nc' % (aprefix, start_date.year, start_date.month, start_date.day)\n dsout.to_netcdf(output_datafile, encoding=encoding) \n print('Outputted ' + output_datafile)\n else:\n print('No data found, skipping.')", "def main():\n\t# \"\"\"\n\t# \tMain function of test python module\n\t# \"\"\"\n\t# random.seed(os.urandom(345634)) # initialize random generator\n\t# t = np.linspace(0.0, 24.0, 96.0) # define the time axis of a day, here we use 96 values every quarter of an hour\n\t# # standard load profile -- input\n\t# q = extra.read_slp(t,\n\t# 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv') # read the sample standard load profile, can be any length, can be resized given a low/high resolution time axis\n\t# q = q / np.sum(q) # normalization of standard load profile\n\t# # process duration\n\t# duration_axis = np.linspace(0.0, 24.0, 96.0)\n\t# (p_d, E_p) = extra.app_time(duration_axis, 10, 2, 0.0,\n\t# 24.0) # function that define the pdf of duration of a process\n\t# # process consumption\n\t# consumption_axis = np.linspace(0.0, 3.5, 96.0)\n\t# (p_k, E_k) = extra.app_consumption(consumption_axis, 10, 2, 0.0,\n\t# 3.5) # function that define the pdf of duration of a process\n\t# # pdf of starting time\n\t# p_t_0 = lpd.infer_t_0(q, p_d, E_k) # computes the pdf of starting time of processes\n\t# p_t_0 = p_t_0 / np.sum(p_t_0) # normalization of the pdf to sum up to zero\n #\n\t# \"\"\"\n\t# 1st Approach, starting time of processes is a discrete propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# synthetic_profile = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# synthetic_profile_1 = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# # expected value of D processes\n\t# q_e_e = lpd.infer_q_e(t, p_t_0, p_d, E_k, D)\n\t# # plot\n\t# plt.step(t, synthetic_profile, \"g-\")\n\t# plt.step(t, q_e_e, \"b--\")\n #\n\t# \"\"\"\n\t# 2nd Approach, starting time of processes is a continuous propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# ts, cs = lpd.continous_synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.xlim(0, 24.0)\n\t# plt.legend([\"synthetic\", \"expected\", \"continuous\"], loc=0)\n\t# plt.show()\n #\n\t# \"\"\"\n\t# Time discretization\n\t# \"\"\"\n\t# n_intervals = 24 * 1 # discretized in minutes\n\t# discrete_timeaxis = np.linspace(0.0, 24.0, n_intervals + 1)\n\t# discrete_consumption = lpd.signal_discretization(discrete_timeaxis, t, ts, cs)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.step(discrete_timeaxis, discrete_consumption, where='post', c='k', ls='--', lw=2)\n\t# plt.legend([\"continuous\", \"discretized\"], loc=0)\n\t# plt.show()\n #\n #\n\t# \"\"\"\n\t# Repeated day synthetic profile creation\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# n = 10\n\t# slp = lpd.synthetic_profile_repeated(D, t, p_d, consumption_axis, p_k, p_t_0, n)\n\t# plt.step(range(len(slp)), slp, \"g-\")\n\t# plt.show()\n\tt = np.linspace(0.0, 24.0, 96.0)\n\tload_profile = extra.read_slp(t, 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv')\n\tslp = synthetic.create_synthetic_load(load_profile, 5.0, 5)\n\tplt.step(range(len(slp)), slp)\n\tplt.show()", "def run(self):\n qubit = self.qubit\n\n routine = self.routine\n\n # Saving some typing for parameters that are only read ;)\n allowed_delta_f = self.get_param_value(\"allowed_delta_f\")\n f_unit = self.get_param_value(\"f_unit\")\n f_factor = self.get_param_value(\"f_factor\")\n delta_f_unit = self.get_param_value(\"delta_f_unit\")\n delta_f_factor = self.get_param_value(\"delta_f_factor\")\n max_iterations = self.get_param_value(\"max_iterations\")\n transition = self.get_param_value(\"transition_name\")\n\n # Finding the ramsey experiment in the pipulse calibration\n pipulse_calib = routine.routine_steps[-1]\n ramsey = pipulse_calib.routine_steps[-1]\n\n # Transition frequency from last Ramsey\n freq = qubit[f\"{transition}_freq\"]()\n\n # Retrieving the frequency difference\n max_waiting_seconds = self.get_param_value(\"max_waiting_seconds\")\n for i in range(max_waiting_seconds):\n try:\n routine.delta_f = (\n ramsey.analysis.proc_data_dict[\n \"analysis_params_dict\"][\n qubit.name][\"exp_decay\"][\"new_qb_freq\"] -\n ramsey.analysis.proc_data_dict[\n \"analysis_params_dict\"][\n qubit.name][\"exp_decay\"][\"old_qb_freq\"])\n break\n except KeyError:\n log.warning(\n \"Could not find frequency difference between current \"\n \"and last Ramsey measurement, delta_f not updated\")\n break\n except AttributeError:\n # FIXME: Unsure if this can also happen on real set-up\n log.warning(\n \"Analysis not yet run on last Ramsey measurement, \"\n \"frequency difference not updated\")\n time.sleep(1)\n\n # Progress update\n if self.get_param_value('verbose'):\n print(f\"Iteration {routine.iteration}, {transition}-freq \"\n f\"{freq / f_factor} {f_unit}, frequency \"\n f\"difference = {routine.delta_f / delta_f_factor} \"\n f\"{delta_f_unit}\")\n\n # Check if the absolute frequency difference is small enough\n if np.abs(routine.delta_f) < allowed_delta_f:\n # Success\n if self.get_param_value('verbose'):\n print(f\"{transition}-frequency found to be \"\n f\"{freq / f_factor} {f_unit} within \"\n f\"{allowed_delta_f / delta_f_factor} \"\n f\"{delta_f_unit} of previous value.\")\n\n elif routine.iteration < max_iterations:\n # No success yet, adding a new rabi-ramsey and decision step\n if self.get_param_value('verbose'):\n print(f\"Allowed error (\"\n f\"{allowed_delta_f / delta_f_factor} \"\n f\"{delta_f_unit}) not yet achieved, adding new\"\n \" round of PiPulse calibration...\")\n\n routine.add_next_pipulse_step()\n\n step_settings = {'qubits': self.qubits}\n routine.add_step(\n FindFrequency.Decision,\n 'decision',\n step_settings,\n )\n\n routine.iteration += 1\n return\n\n else:\n # No success yet, reached max iterations\n msg = (f\"{self.step_label} routine finished for {qubit.name}, \"\n \"desired precision not necessarily achieved within the \"\n f\"maximum number of iterations ({max_iterations}).\")\n log.warning(msg)\n\n if self.get_param_value('verbose'):\n print(msg)\n\n if self.get_param_value('verbose'):\n # Printing termination update\n print(f\"FindFrequency routine finished: \"\n f\"{transition}-frequencies for {qubit.name} \"\n f\"is {freq / f_factor} {f_unit}.\")", "def main():\n\n #for row_start in np.arange(0, 128):\n # MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [row_start], [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, [], '', ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '_cycle01020304_row'+str(row_start).zfill(3), Imin=12, Imax=136)\n\n Marcov_Chain_MLE(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '', 160.0, [90.0, 70.0, 50.0, 30.0], 0.0, 0.5)\n plt.show()\n #MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71, 55, 46, 35], [0.002, 0.01, 0.04, 0.2], 4, [], '', ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01', '../Data/chip14/MLC_programming_Chip14_Col33_10msPULSE_VG1p8_VD2p0_VAsource_VBdrain_02', '../Data/chip14/MLC_programming_Chip14_Col33_40msPULSE_VG1p8_VD2p0_VAsource_VBdrain_03', '../Data/chip14/MLC_programming_Chip14_Col33_200msPULSE_VG1p8_VD2p0_VAsource_VBdrain_04'], '../Plots/chip14/', 'VG1p8_VD2p0', '_cycle01020304_all')\n\n #IDS_VGS(14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAsource_VBdrain', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAsource_VBdrain_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Fresh_vs_MLC01020304_VG1p8_VD2p0_IDS-VGS_VaS-VbD_', range(0, 128), 'Fresh vs MLC-1-2-3-4 (VG=1.8, VD=2.0)\\nMLC-{1, 2, 3, 4}: {2ms, 10ms, 40ms, 200ms} WL pulses, IDSAT threshold = {90, 70, 50, 30}uA, forward' , 150, ['fresh', 'MLC-01', 'MLC-02', 'MLC-03', 'MLC-04']) \n #IDS_VGS(14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAdrain_VBsource', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Fresh_vs_MLC01020304_VG1p8_VD2p0_IDS-VGS_VaD-VbS_', range(0, 128), 'Fresh vs MLC-1-2-3-4 (VG=1.8, VD=2.0)\\nMLC-{1, 2, 3, 4}: {2ms, 10ms, 40ms, 200ms} WL pulses, IDSAT threshold = {90, 70, 50, 30}uA, reversed', 150, ['fresh', 'MLC-01', 'MLC-02', 'MLC-03', 'MLC-04']) \n\n #hist_IDS_VGS(0, 14, 33, 16, 2, 'ULVT', 128, ['../Data/chip14/Fresh_Chip14_Col33_Ids_Vgs_VAdrain_VBsource', '../Data/chip14/MLC_Chip14_Col33_2msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_01', '../Data/chip14/MLC_Chip14_Col33_10msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_02', '../Data/chip14/MLC_Chip14_Col33_40msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_03', '../Data/chip14/MLC_Chip14_Col33_200msPULSE_VG1p8_VD2p0_Ids_Vgs_VAdrain_VBsource_04'], ['b', 'y', 'r', 'k', 'g'], '../Plots/chip14/', 'Hist-IDSAT_MLC-rv1-01020304_reverse-read_', range(0, 128), 'MLC programming {2ms, 10ms, 40ms, 200ms} pulses, VGS=1.8, VDS=2.0 for level=1-2-3-4\\nhistogram of read-IDSAT (VGS=VDS=0.8V)', 0, 150, 0, 150, 1000)\n #\n #t_label = []\n #for t in np.arange(0, 0.002*(71) + 0.0001, 0.002):\n # t_label.append(str(t))\n #\n ##MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [21], [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01_row-21', Imin=82, Imax=142)\n\n #for row_start in np.arange(0, 128):\n # MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, [row_start], [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01_row_'+str(row_start).zfill(3), Imin=80, Imax=142)\n\n #MLC_IDSAT_algorithm_rv1(14, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(0, 128), [71], [0.002], 1, np.arange(0, 0.002*(71)+0.0001, 0.002), t_label, ['../Data/chip14/MLC_programming_Chip14_Col33_2msPULSE_VG1p8_VD2p0_VAsource_VBdrain_01'], '../Plots/chip14/', 'VG1p8_VD2p0', '_rv1_cycle01', Imin=80, Imax=142)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col30_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col30_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col30_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col30_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 10, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle01', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle0102', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle0102', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle010203', 50, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle010203', 20, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 20, 160, 1)\n\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 1.7, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 1.7, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p7', '_cycle01020304', 40, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.5, 2.0, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p0', '_cycle01020304', 10, 160, 1)\n #MLC_IDSAT_characterization(11, 33, 16, 2, 'ULVT', 1.8, 2.0, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col33_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col33_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col33_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col33_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 10, 160, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle01', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle0102', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle0102', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle010203', 50, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle010203', 20, 125, 1)\n\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col18_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p0', '_cycle01020304', 40, 125, 1)\n #MLC_IDSAT_characterization(11, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col18_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col18_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col18_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p4', '_cycle01020304', 10, 125, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80], [0.01, 0.01], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80, 240], [0.01, 0.01, 0.01], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col24_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 80, 240, 180], [0.01, 0.01, 0.01, 0.04], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col24_HCI_80x10ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col24_HCI_240x10ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col24_HCI_180x40ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle0102', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle010203', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 20, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01020304', 5, 150, 1)\n #MLC_IDSAT_characterization(11, 27, 20, 2, 'ULVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col27_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col27_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col27_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col27_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle0102', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle0102', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle0102', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle010203', 50, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle010203', 15, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle010203', 15, 150, 1)\n\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 1.8, 128, range(0, 32) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD1p8', '_cycle01020304', 40, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 1.8, 128, range(32, 64) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD1p8', '_cycle01020304', 20, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.5, 2.2, 128, range(64, 96) , [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p5_VD2p2', '_cycle01020304', 5, 150, 1)\n #MLC_IDSAT_characterization(11, 28, 20, 2, 'LVT', 1.8, 2.2, 128, range(96, 128), [40, 20, 12, 36], [0.01, 0.04, 0.2, 0.2], 4, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4, 4.8, 12], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6', '3.6', '10.8'], ['../Data/chip11/Chip11_Col28_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip11/Chip11_Col28_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip11/Chip11_Col28_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03', '../Data/chip11/Chip11_Col28_HCI_36x200ms_stress_VG_ConstPulse_VAsource_VBdrain_04'], '../Plots/chip11/', 'VG1p8_VD2p2', '_cycle01020304', 5, 150, 1)\n\n\n # (L, Nfin, VT_flavor, Nrow, Imax)\n col_list = [(36, 1, 'ULVT', 32 , 60 ), (36, 1, 'LVT', 32 , 50 ), (36, 1, 'SVT', 32 , 45 ),\n (36, 1, 'ULVT', 128, 60 ), (36, 1, 'LVT', 128, 50 ), (36, 1, 'SVT', 128, 45 ),\n (20, 1, 'ULVT', 32 , 75 ), (20, 1, 'LVT', 32 , 60 ), (20, 1, 'SVT', 32 , 50 ),\n (20, 1, 'ULVT', 128, 75 ), (20, 1, 'LVT', 128, 60 ), (20, 1, 'SVT', 128, 50 ),\n (16, 1, 'ULVT', 32 , 80 ), (16, 1, 'LVT', 32 , 65 ), (16, 1, 'SVT', 32 , 60 ),\n (16, 1, 'ULVT', 128, 80 ), (16, 1, 'LVT', 128, 65 ), (16, 1, 'SVT', 128, 60 ),\n (36, 2, 'ULVT', 32 , 115), (36, 2, 'LVT', 32 , 95 ), (36, 2, 'SVT', 32 , 85 ),\n (36, 2, 'ULVT', 128, 115), (36, 2, 'LVT', 128, 95 ), (36, 2, 'SVT', 128, 85 ), \n (20, 2, 'ULVT', 32 , 135), (20, 2, 'LVT', 32 , 115), (20, 2, 'SVT', 32 , 100),\n (20, 2, 'ULVT', 128, 135), (20, 2, 'LVT', 128, 120), (20, 2, 'SVT', 128, 100),\n (16, 2, 'ULVT', 32 , 150), (16, 2, 'LVT', 32 , 125), (16, 2, 'SVT', 32 , 115),\n (16, 2, 'ULVT', 128, 150), (16, 2, 'LVT', 128, 125), (16, 2, 'SVT', 128, 115)]\n\n #MLC_IDSAT_algorithm_rv1(11, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(0, 128), [135+20], [0.2], 1, np.arange(0, 0.01*16+0.0001, 0.01), '', ['../Data/chip11/MLC_programming_Chip11_Col21_2msPULSE_VG1p8_VD2p4_VAsource_VBdrain_01'], '../Plots/chip11/', 'VG1p8_VD2p4', '_rv1_cycle01_EfficientPython')\n\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', '0.9-1.2-1.5-1.8', 2.4, 128, range(0, 128), [59+16, 72+40, 80+31, 68+23], [0.2, 0.2, 0.2, 0.2], 4, [0, 15, 15.1, 37.5, 37.6, 59.8, 59.9, 78.1], ['0', '15', '', '37.4', '', '59.6', '', '77.8'], ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03', '../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG-0p9-1p2-1p5-1p8_VD2p4', '_rv1_cycle01020304')\n\n t_ratio_lst = [(0, 0.17), (0.16, 0.34), (0.33, 0.505), (0.495, 0.67), (0.66, 0.84), (0.83, 1)]\n\n #t_label = []\n #for t in np.arange(0, 0.2*(59+16) + 0.0001, 0.2):\n # t_label.append(str(t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(0, 128), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(row_start, row_start+8), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 0.9, 2.4, 128, range(row_start, row_start+8), [59+16], [0.2], 1, np.arange(0, 0.2*(59+16)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG0p9_VD2p4_VAsource_VBdrain_01'], '../Plots/chip12/', 'VG0p9_VD2p4', '_rv1_cycle01_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(59+16), t_ratio[1]*0.2*(59+16)])\n # segment += 1\n\n #t_label = []\n #for t in np.arange(0, 0.2*(72+40) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(0, 128), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(row_start, row_start+8), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.2, 2.4, 128, range(row_start, row_start+8), [72+40], [0.2], 1, np.arange(0, 0.2*(72+40)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p2_VD2p4_VAsource_VBdrain_02'], '../Plots/chip12/', 'VG1p2_VD2p4', '_rv1_cycle02_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(72+40), t_ratio[1]*0.2*(72+40)])\n # segment += 1\n\n\n #t_label = []\n #for t in np.arange(0, 0.2*(80+31) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + 0.2*(72+40) + t))\n ##MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(0, 128), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(row_start, row_start+8), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.5, 2.4, 128, range(row_start, row_start+8), [80+31], [0.2], 1, np.arange(0, 0.2*(80+31)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p5_VD2p4_VAsource_VBdrain_03'], '../Plots/chip12/', 'VG1p5_VD2p4', '_rv1_cycle03_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(80+31), t_ratio[1]*0.2*(80+31)])\n # segment += 1\n\n\n #t_label = []\n #for t in np.arange(0, 0.2*(68+23) + 0.0001, 0.2):\n # t_label.append(str(0.2*(59+16) + 0.2*(72+40) + 0.2*(80+31) + t))\n #MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(0, 128), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04')\n #for row_start in np.arange(0, 128, 8):\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(row_start, row_start+8), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04_row'+str(row_start)+'_to_'+str(row_start+7))\n # segment=0\n # for t_ratio in t_ratio_lst:\n # MLC_IDSAT_algorithm_rv1(12, 21, 36, 2, 'ULVT', 1.8, 2.4, 128, range(row_start, row_start+8), [68+23], [0.2], 1, np.arange(0, 0.2*(68+23)+0.0001, 0.2), t_label, ['../Data/chip12/MLC_programming_Chip12_Col21_200msPULSE_VG1p8_VD2p4_VAsource_VBdrain_04'], '../Plots/chip12/', 'VG1p8_VD2p4', '_rv1_cycle04_row'+str(row_start)+'_to_'+str(row_start+7)+'_'+str(segment), [t_ratio[0]*0.2*(68+23), t_ratio[1]*0.2*(68+23)])\n # segment += 1\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col18_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle010203', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col18_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle010203', 16, 110)\n\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col24_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle010203', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col24_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle010203', 14, 133)\n\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col30_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle010203', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 20, 12], [0.01, 0.04, 0.2], 3, [0, 0.4, 0.8, 1.6, 2.0, 2.6, 3.2, 3.8, 4.4], ['0', '0.4', '0.4', '1.2', '1.2', '1.8', '2.4', '3.0', '3.6'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02', '../Data/chip10/Chip10_Col30_HCI_12x200ms_stress_VG_ConstPulse_VAsource_VBdrain_03'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle010203', 20, 140)\n\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle01', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle01', 16, 110)\n\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle01', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle01', 14, 133)\n\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle01', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40], [0.01], 1, [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.5], ['0', '0.05', '0.1', '0.15', '0.2', '0.25', '0.3', '0.35', '0.4', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle01', 20, 140)\n\n\n\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.0, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle0102', 38, 112)\n #MLC_IDSAT_characterization(10, 18, 36, 2, 'ULVT', 1.8, 2.4, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col18_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col18_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p4', '_cycle0102', 16, 110)\n # \n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 1.8, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD1p8', '_cycle0102', 44, 133)\n #MLC_IDSAT_characterization(10, 24, 20, 2, 'ULVT', 1.8, 2.2, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col24_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col24_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p2', '_cycle0102', 14, 133)\n # \n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 1.7, 32, range(0, 16) , [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD1p7', '_cycle0102', 50, 135)\n #MLC_IDSAT_characterization(10, 30, 16, 2, 'ULVT', 1.8, 2.0, 32, range(16, 32), [40, 20], [0.01, 0.04], 2, [0, 0.2, 0.4, 0.8, 1.0, 1.2, 1.4, 1.6, 1.7], ['0', '0.2', '0.4', '0.4', '0.6', '0.8', '1.0', '1.2', 'recover'], ['../Data/chip10/Chip10_Col30_HCI_40x10ms_stress_VG_ConstPulse_VAsource_VBdrain_01', '../Data/chip10/Chip10_Col30_HCI_20x40ms_stress_VG_ConstPulse_VAsource_VBdrain_02'], '../Plots/chip10/', 'VG1p8_VD2p0', '_cycle0102', 20, 140)", "def take_measurements(timeout=6):\n print(\"Taking measurements for\", timeout, \"seconds.\")\n start = time.monotonic()\n while time.monotonic() - start < timeout:\n for i, (toggle, polarity) in enumerate(vectors):\n result = trackpad.measure_adc(toggle, polarity)\n print(\"meas{}: {}\".format(i, result - compensation[i]), end=\"\\t\")\n print()", "async def loop():\n # ArmDevice.storage.joints_pos = await get_positions() # Use this if encoders are wired up.\n # ArmDevice.storage.joints_pos = simulate_positions() # Use this for testing without position feedback.\n log.debug(\"command: {}\".format(ArmDevice.storage.command))\n ArmDevice.storage.controller.user_command(ArmDevice.storage.mode, *ArmDevice.storage.command)\n ArmDevice.storage.speeds = ArmDevice.storage.controller.update_duties(ArmDevice.storage.joints_pos)\n\n # publish speeds/duty cycles here\n log.debug(\"joints_pos: {}\".format(ArmDevice.storage.joints_pos))\n log.debug(\"speeds: {}\".format(ArmDevice.storage.speeds))\n await send_duties()" ]
[ "0.5746817", "0.5685441", "0.5681871", "0.5642942", "0.5569622", "0.55403596", "0.5525413", "0.55203384", "0.5511839", "0.545532", "0.5445678", "0.5411594", "0.54065305", "0.54041046", "0.53374296", "0.53127813", "0.52985483", "0.52948815", "0.5291056", "0.52908057", "0.52744836", "0.52736574", "0.52512556", "0.5249316", "0.52330613", "0.5225748", "0.52236986", "0.52210903", "0.5211191", "0.52041715" ]
0.7498953
0
LZW komprese dict_of_abc je vstupni slovnik dat na kazdem indexu slovniku je list v prubehu komprese se do nej pridavaji polozky list_of_data je posloupnost cisel ke kompresi
def do_LZW_Compression(dict_of_abc, list_of_data): # rozdil mezi None a [] je v pouziti metody extend na listu result = [] P = [] C = [] # C je vzdy jeden prvek ze vstupu PC = [] #how it works video xplanation https://www.youtube.com/watch?v=MQ4ObKv2L_M for i in range(len(list_of_data)): """ Cyklus pres vsecky vstupni prvky """ C = [] C.append(list_of_data[i]) #PC je vzdy kombinace P a C PC = [] PC.extend(P) PC.extend(C) index_founded = dict_cointains_list(dict_of_abc, PC) if index_founded == -1: #pokud PC neni ve slovniku, pridam ho tam a P = C dict_of_abc[len(dict_of_abc) +1] = PC #output P key in dictionary result.append(dict_cointains_list(dict_of_abc, P)) P = C else: #pokud PC je ve slovniku P = PC pro dalsi iteraci P = PC #pridani posledniho prvku result.append(dict_cointains_list(dict_of_abc, P)) return dict_of_abc, result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_LZW_DeCompression(dict_of_abc, list_of_data):\n \n #https://www.youtube.com/watch?v=MQM_DsX-LBI\n \n out = []\n predchozi_out = []\n for i in range(len(list_of_data)):\n new = []\n new.extend(predchozi_out)\n if list_of_data[i] in dict_of_abc:\n o = dict_of_abc[list_of_data[i]]\n out.extend(o)\n predchozi_out = o\n \n #pokud je o list, beru z nej pouze prvni prvek\n if len(o) > 1:\n new.append(o[0])\n else:\n new.extend(o)\n\n index_founded = dict_cointains_list(dict_of_abc, new)\n if index_founded == -1:\n #pokud new neni ve slovniku, pridam ho tam\n dict_of_abc[len(dict_of_abc) +1] = new\n\n return dict_of_abc, out", "def getUniChemData(self, inchiKeyList):\n mapD = {\n 1: {\"name\": \"chembl\", \"baseUrl\": \"https://www.ebi.ac.uk/chembl/\", \"entryUrl\": \"https://www.ebi.ac.uk/chembldb/compound/inspect/\"},\n 3: {\"name\": \"pdb\", \"baseUrl\": \"http://www.ebi.ac.uk/pdbe/\", \"entryUrl\": \"http://www.ebi.ac.uk/pdbe-srv/pdbechem/chemicalCompound/show/\"},\n 2: {\"name\": \"drugbank\", \"baseUrl\": \"http://drugbank.ca/\", \"entryUrl\": \"http://www.drugbank.ca/drugs/\"},\n 5: {\"name\": \"pubchem_dotf\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov/sources/sources.cgi\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 4: {\"name\": \"gtopdb\", \"baseUrl\": \"http://www.guidetopharmacology.org\", \"entryUrl\": \"http://www.guidetopharmacology.org/GRAC/LigandDisplayForward?ligandId=\"},\n 11: {\"name\": \"ibm\", \"baseUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/\", \"entryUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/?sid=\"},\n 6: {\"name\": \"kegg_ligand\", \"baseUrl\": \"http://www.genome.jp/kegg/ligand.html\", \"entryUrl\": \"http://www.genome.jp/dbget-bin/www_bget?\"},\n 9: {\"name\": \"zinc\", \"baseUrl\": \"http://zinc15.docking.org\", \"entryUrl\": \"http://zinc15.docking.org/substances/\"},\n 8: {\"name\": \"nih_ncc\", \"baseUrl\": \"http://nihsmr.evotec.com/evotec/\", \"entryUrl\": \"\"},\n 10: {\"name\": \"emolecules\", \"baseUrl\": \"https://www.emolecules.com/\", \"entryUrl\": \"https://www.emolecules.com/cgi-bin/more?vid=\"},\n 12: {\"name\": \"atlas\", \"baseUrl\": \"http://www.ebi.ac.uk/gxa/home\", \"entryUrl\": \"http://www.ebi.ac.uk/gxa/query?conditionQuery=\"},\n 7: {\"name\": \"chebi\", \"baseUrl\": \"http://www.ebi.ac.uk/chebi/downloadsForward.do\", \"entryUrl\": \"http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI%3A\"},\n 14: {\n \"name\": \"fdasrs\",\n \"baseUrl\": \"http://fdasis.nlm.nih.gov/srs/srs.jsp\",\n \"entryUrl\": \"http://fdasis.nlm.nih.gov/srs/ProxyServlet?mergeData=true&objectHandle=DBMaint&APPLICATION_NAME=fdasrs&actionHandle=default&nextPage=jsp/srs/ResultScreen.jsp&TXTSUPERLISTID=\",\n },\n 15: {\"name\": \"surechembl\", \"baseUrl\": \"https://www.surechembl.org/search/\", \"entryUrl\": \"https://www.surechembl.org/chemical/\"},\n 21: {\"name\": \"pubchem_tpharma\", \"baseUrl\": \"http://www.thomson-pharma.com/\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 22: {\"name\": \"pubchem\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/compound/\"},\n 27: {\"name\": \"recon\", \"baseUrl\": \"https://vmh.uni.lu\", \"entryUrl\": \"https://vmh.uni.lu/\"},\n 28: {\"name\": \"molport\", \"baseUrl\": \"https://www.molport.com/shop/index\", \"entryUrl\": \"https://www.molport.com/shop/molecule-link/\"},\n 31: {\n \"name\": \"bindingdb\",\n \"baseUrl\": \"https://www.bindingdb.org/bind/index.jsp\",\n \"entryUrl\": \"http://www.bindingdb.org/bind/chemsearch/marvin/MolStructure.jsp?monomerid=\",\n },\n 41: {\"name\": \"swisslipids\", \"baseUrl\": \"http://www.swisslipids.org/\", \"entryUrl\": \"http://www.swisslipids.org/\"},\n 29: {\"name\": \"nikkaji\", \"baseUrl\": \"http://jglobal.jst.go.jp/en/\", \"entryUrl\": \"http://jglobal.jst.go.jp/en/redirect?Nikkaji_No=\"},\n 32: {\"name\": \"comptox\", \"baseUrl\": \"https://comptox.epa.gov/dashboard/\", \"entryUrl\": \"https://comptox.epa.gov/dashboard/\"},\n 33: {\"name\": \"lipidmaps\", \"baseUrl\": \"http://www.lipidmaps.org\", \"entryUrl\": \"http://www.lipidmaps.org/data/LMSDRecord.php?LMID=\"},\n 35: {\"name\": \"carotenoiddb\", \"baseUrl\": \"http://carotenoiddb.jp/index.html\", \"entryUrl\": \"http://carotenoiddb.jp/Entries/\"},\n 36: {\"name\": \"metabolights\", \"baseUrl\": \"http://www.ebi.ac.uk/metabolights/\", \"entryUrl\": \"http://www.ebi.ac.uk/metabolights/\"},\n 37: {\"name\": \"brenda\", \"baseUrl\": \"https://www.brenda-enzymes.org/index.php\", \"entryUrl\": \"https://www.brenda-enzymes.org/ligand.php?brenda_ligand_id=\"},\n 17: {\"name\": \"pharmgkb\", \"baseUrl\": \"https://www.pharmgkb.org\", \"entryUrl\": \"https://www.pharmgkb.org/drug/\"},\n 18: {\"name\": \"hmdb\", \"baseUrl\": \"http://www.hmdb.ca\", \"entryUrl\": \"http://www.hmdb.ca/metabolites/\"},\n 24: {\n \"name\": \"nmrshiftdb2\",\n \"baseUrl\": \"http://nmrshiftdb.nmr.uni-koeln.de/portal/media-type/html/user/anon/page/default.psml/js_pane/P-Home\",\n \"entryUrl\": \"http://nmrshiftdb.org/molecule/\",\n },\n 25: {\"name\": \"lincs\", \"baseUrl\": \"http://www.lincsproject.org/\", \"entryUrl\": \"http://identifiers.org/lincs.smallmolecule/\"},\n 39: {\"name\": \"chemicalbook\", \"baseUrl\": \"https://www.chemicalbook.com\", \"entryUrl\": \"https://www.chemicalbook.com/ChemicalProductProperty_EN_\"},\n 20: {\"name\": \"selleck\", \"baseUrl\": \"http://www.selleckchem.com\", \"entryUrl\": \"http://www.selleckchem.com/products/\"},\n 23: {\"name\": \"mcule\", \"baseUrl\": \"https://mcule.com\", \"entryUrl\": \"https://mcule.com/\"},\n 26: {\"name\": \"actor\", \"baseUrl\": \"https://actor.epa.gov\", \"entryUrl\": \"http://actor.epa.gov/actor/chemical.xhtml?casrn=\"},\n 34: {\"name\": \"drugcentral\", \"baseUrl\": \"http://drugcentral.org\", \"entryUrl\": \"http://drugcentral.org/drugcard/\"},\n 38: {\"name\": \"rhea\", \"baseUrl\": \"http://www.rhea-db.org\", \"entryUrl\": \"http://www.rhea-db.org/searchresults?q=CHEBI:\"},\n }\n oD = {}\n try:\n for ky in inchiKeyList:\n unc = unichem_client # pylint: disable=no-member\n # unc.set_format(\"json\")\n uDL = unc.get(ky)\n if uDL:\n qD = {}\n for uD in uDL:\n if \"src_id\" in uD and int(uD[\"src_id\"]) in mapD:\n qD[mapD[int(uD[\"src_id\"])][\"name\"]] = uD[\"src_compound_id\"]\n if qD:\n oD[ky] = qD\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return oD", "def dict_cointains_list(dict_of_abc, item_list):\n\n values = list(dict_of_abc.values())\n\n #projdu vsecky listy ve slovniku\n for i in range(len(values)):\n #predpokladam ze ve slovniku je\n finded = True\n \n for j in range(len(values[i])):\n if len(item_list) == len(values[i]):\n # kontrola po jednotlivych hodnotach\n # logicky soucin - pokud jednou False, navzdy False\n finded = finded and item_list[j] == values[i][j]\n else:\n finded = False\n\n if finded:\n # cyklus indexuje od 0, slovnik ale indexujeme-klicujeme od 1\n return i + 1 \n\n return -1", "def generate_data_list(self, data_list):\n sol = []\n for i in data_list:\n sol.append({\n \"data_sig\": i[0],\n \"data\": pickle.loads(i[1]),\n \"attachment\": i[2],\n \"likes\":pickle.loads(i[3]),\n \"dislikes\":pickle.loads(i[4]),\n \"owner\": i[5]\n })\n return sol", "def getDataForLBMPZonalComparison(self):\n\n\t\t# Variables\n\t\tzonal_data = self.getDayAheadMarketLBMPZonal()\n\t\tkeys = zonal_data.keys()\n\t\tfinal_data = []\n\t\tvalues = []\n\t\touter_dictionary = {}\n\t\tinner_dictionary = {}\n\n\t\t# Populating final data array and returning it\n\t\tfor key in keys:\n\t\t\tfor data in zonal_data[key]:\n\t\t\t\tinner_dictionary['x'] = data.keys()[0]\n\t\t\t\tinner_dictionary['y'] = data[data.keys()[0]]['LBMP ($/MWHr)']\n\t\t\t\tvalues.append(inner_dictionary)\n\t\t\t\tinner_dictionary = {}\n\t\t\touter_dictionary['values'] = values\n\t\t\tvalues = []\n\t\t\touter_dictionary['key'] = key\n\t\t\tfinal_data.append(outer_dictionary)\n\t\t\touter_dictionary = {}\n\n\t\treturn final_data", "def get_compressed(self, value):\r\n output = []\r\n lz_data = (value >> 8) & 0xFF\r\n lz_counter = value & 0xFF\r\n # Define the relative offset on LZ Window\r\n lz_offset = ((lz_counter & 0xF0) << 4) | lz_data\r\n # Define the LZ Counter for repeat data N times\r\n lz_counter = (lz_counter & 0xF) + 0x2\r\n # Start Repeat Loop\r\n while (lz_counter >= 0):\r\n # Seek the window on LZ Offset and get the LZ Data\r\n self.__lzwindow__.seek(lz_offset, FROM_START)\r\n lz_data = (lz_data & 0xFF00) + \\\r\n int.from_bytes(self.__lzwindow__.read(1), byteorder='big')\r\n # Write the LZ data to the output\r\n output.append((lz_data & 0xFF).to_bytes(1, byteorder='big'))\r\n # Seek the LZ Window on current LZ Window Counter value and write the current LZ Data (LZBuffer)\r\n self.__lzwindow__.seek(self.__lzwindowcounter__, FROM_START)\r\n self.__lzwindow__.write((lz_data & 0xFF).to_bytes(1, byteorder='big'))\r\n # Increment LZ Window Counter\r\n self.__lzwindowcounter__ = (\r\n self.__lzwindowcounter__ + 0x1) & self.__lzwindowmax__\r\n # Increment LZ Offset\r\n lz_offset = (lz_offset + 0x1) & self.__lzwindowmax__\r\n # Decrement number of data to decompress\r\n self.__maxlen__ -= 0x1\r\n # Decrement LZ Loop counter\r\n lz_counter -= 0x1\r\n return output", "def test_compress_offset_less_len1(self):\n text = 'ababab'\n actual = LZ77.compress(text)\n expected = bytearray([32]) + bytearray(b'ab') + bytearray([0, 18])\n self.assertEqual(actual, expected)", "def test_decompress_offset_less_len1(self):\n b_array = bytearray([32]) + bytearray(b'ab') + bytearray([0, 18])\n actual = LZ77.decompress(b_array)\n expected = 'ababab'\n self.assertEqual(actual, expected)", "def test_compress_offset_less_len2(self):\n text = 'abcdabcdab'\n actual = LZ77.compress(text)\n expected = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 52])\n self.assertEqual(actual, expected)", "def dict_check34(_str1):\n\t_len=len(_str1)\n\t_list1=[(i,j,k) for i in range(5) for j in range(5) for k in range(5)]\n\t_list0=[]\n\tfor i in range(len(_list1)):\n\t\t#Take different length\n\t\t_current=_list1[i]\n\t\tif _len>=sum(_current) and sum(_list1[i])!=0:\n\t\t\t_list2=[]\n\t\t\t_n1=_current[0]\n\t\t\t_n2=_current[1]\n\t\t\t_n3=_current[2]\n\t\t\t_list2.append(_str1[:_n1])\n\t\t\t_list2.append(_str1[_n1:_n1+_n2])\n\t\t\t_list2.append(_str1[_n1+_n2:_n1+_n2+_n3])\n\t\telse:\n\t\t\tcontinue\n\t\tn=0\n\t\tfor j in range(3):\n\t\t\tif _list2[j] in _dict_ori or _list2[j]==\"\":\n\t\t\t\tn+=1\n\t\tif n==3:\n\t\t\t_list0.append(_list2)\n\treturn(_list0)", "def decompress_encoded_list( nums ):\n decompressed = []\n\n for i in range( 0, len(nums), 2 ):\n freq = nums[i]\n val = nums[i+1]\n decompressed.extend( [val] * freq )\n\n return decompressed", "def test_decompress_offset_less_len2(self):\n b_array = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 52])\n actual = LZ77.decompress(b_array)\n expected = 'abcdabcdab'\n self.assertEqual(actual, expected)", "def apply_compression(pc2_table, keys_56bits):\n keys_48bits = \"\"\n for index in pc2_table:\n keys_48bits += keys_56bits[index - 1]\n return keys_48bits", "def test_decompress_2(self):\n b_array = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n actual = LZ77.decompress(b_array)\n expected = 'abcdefdeabc'\n self.assertEqual(actual, expected)", "def test_compress_2(self):\n text = 'abcdefdeabc'\n actual = LZ77.compress(text)\n expected = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n self.assertEqual(actual, expected)", "def get_keys_from_list():\n json_data = request.get_json()\n\n d = dict()\n d['elements'] = list()\n settings.setOptionsFile(get_info('uid'))\n fn = settings.getHistoROOTFileName()\n rfn = settings.getReferenceROOTFileName()\n# open root file stored in the root database\n f = ROOT.TFile(fn)\n# open reference root file stored in the root database\n rf = ROOT.TFile(rfn)\n\n for values in json_data.itervalues():\n for k in values:\n subd = dict()\n subd[\"index\"] = k[\"index\"]\n if fn != k[\"file\"]: \n fn = k[\"file\"]\n settings.setHistoROOTFileName(fn)\n f = ROOT.TFile(fn)\n print \"histogram :>>>>>: \",k[\"histogram\"]\n subd[\"data\"] = eval(cppyy.gbl.getDictionary(f,k[\"histogram\"]))\n if rfn != k[\"referenceFile\"]: \n rfn = k[\"referenceFile\"]\n settings.setReferenceROOTFileName(rfn)\n rf = ROOT.TFile(rfn)\n subd[\"refdata\"] = eval(cppyy.gbl.getDictionary(rf,k[\"reference\"]))\n d['elements'].append(subd)\n\n f.Close()\n rf.Close()\n\n return jsonify(d)", "def listz2diclist(listz):\n\ttmp=[]\n\tfor elem in listz:\n\t\tdic = elem.warez2dic()\n\t\ttmp.append(dic)\n\treturn tmp", "def test_compress_lossless_maps(self):\n height_map = 384\n width_map = 384\n \n # The quantization bin widths are small\n # so that the comparison between the\n # theoretical (minimum) coding cost and\n # the coding cost computed by the function\n # is precise enough.\n bin_widths_test = numpy.array([0.5, 0.25], dtype=numpy.float32)\n laplace_scales = numpy.array([0.5, 3.], dtype=numpy.float32)\n \n # Note that the binary probabilities saved at\n # \"lossless/pseudo_data/binary_probabilities_compress_maps_0.npy\"\n # and those saved at\n # \"lossless/pseudo_data/binary_probabilities_compress_maps_1.npy\"\n # are specific to the three Laplace distributions\n # below. This means that the binary probabilities\n # must be modified if `laplace_scales` is modified.\n paths_to_binary_probabilities = [\n 'lossless/pseudo_data/binary_probabilities_compress_maps_0.npy',\n 'lossless/pseudo_data/binary_probabilities_compress_maps_1.npy'\n ]\n \n centered_data_0 = numpy.random.laplace(loc=0.,\n scale=laplace_scales[0].item(),\n size=(1, height_map, width_map, 1)).astype(numpy.float32)\n centered_data_1 = numpy.random.laplace(loc=0.,\n scale=laplace_scales[1].item(),\n size=(1, height_map, width_map, 1)).astype(numpy.float32)\n centered_data = numpy.concatenate((centered_data_0, centered_data_1),\n axis=3)\n expanded_centered_quantized_data = tls.quantize_per_map(centered_data, bin_widths_test)\n centered_quantized_data = numpy.squeeze(expanded_centered_quantized_data,\n axis=0)\n tiled_bin_widths = numpy.tile(numpy.reshape(bin_widths_test, (1, 1, 2)),\n (height_map, width_map, 1))\n ref_int16 = tls.cast_float_to_int16(centered_quantized_data/tiled_bin_widths)\n (rec_int16_0, nb_bits_each_map_0) = \\\n lossless.compression.compress_lossless_maps(ref_int16,\n paths_to_binary_probabilities[0])\n numpy.testing.assert_equal(ref_int16,\n rec_int16_0,\n err_msg='The test fails as the lossless compression alters the signed integers.')\n (rec_int16_1, nb_bits_each_map_1) = \\\n lossless.compression.compress_lossless_maps(ref_int16,\n paths_to_binary_probabilities[1])\n numpy.testing.assert_equal(ref_int16,\n rec_int16_1,\n err_msg='The test fails as the lossless compression alters the signed integers.')\n \n # The equation below is derived from the\n # theorem 8.3.1 in the book\n # \"Elements of information theory\", 2nd edition,\n # written by Thomas M. Cover and Joy A. Thomas.\n theoretical_entropies = -numpy.log2(bin_widths_test) + (numpy.log(2.*laplace_scales) + 1.)/numpy.log(2.)\n print('B0 denotes the binary probabilities saved at \"{}\".'.format(paths_to_binary_probabilities[0]))\n print('B1 denotes the binary probabilities saved at \"{}\".'.format(paths_to_binary_probabilities[1]))\n print('\\n1st centered-quantized latent variable feature map.')\n print('Theoretical coding cost: {} bits.'.format(theoretical_entropies[0]*height_map*width_map))\n print('Coding cost computed by the function via B0: {} bits.'.format(nb_bits_each_map_0[0]))\n print('Coding cost computed by the function via B1: {} bits.'.format(nb_bits_each_map_1[0]))\n print('\\n2nd centered-quantized latent variable feature map.')\n print('Theoretical coding cost: {} bits.'.format(theoretical_entropies[1]*height_map*width_map))\n print('Coding cost computed by the function via B0: {} bits.'.format(nb_bits_each_map_0[1]))\n print('Coding cost computed by the function via B1: {} bits.'.format(nb_bits_each_map_1[1]))", "def test_decompress_1(self):\n b_array = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 49])\n actual = LZ77.decompress(b_array)\n expected = 'abcdabc'\n self.assertEqual(actual, expected)", "def test_compress(self):\n form_field = MultiLingualFormField()\n compressed_data = form_field.compress([\"test-nb\", \"test-en\"])\n self.assertEqual(MultiLingualTextStructure, type(compressed_data))\n self.assertEqual(compressed_data['nb'], \"test-nb\")\n self.assertEqual(compressed_data['en'], \"test-en\")", "def zipped_data(b_data):\n col_names = [\"Name\", \"Version\", \"Date and Time\"]\n bundle_data = []\n for bundle in b_data:\n zipped = zip(col_names, bundle)\n bundle_data.append(dict(zipped))\n return bundle_data", "def construct_zi_dict(train_info_list, test_info_list):\r\n zi_dict, train_dataset_list, test_dataset_list = dict(), list(), list()\r\n for user, age, gender, education, querys in train_info_list:\r\n for query in querys:\r\n for zi in query:\r\n if zi not in zi_dict:\r\n zi_dict[zi] = 0\r\n zi_dict[zi] += 1\r\n for user, querys in test_info_list:\r\n for query in querys:\r\n for zi in query:\r\n if zi not in zi_dict:\r\n zi_dict[zi] = 0\r\n zi_dict[zi] += 1\r\n zi_list = sorted(zi_dict.iteritems(), key=lambda x: x[1], reverse=True)\r\n zi2index = dict([(zi[0], [zi[1], idx]) for idx, zi in enumerate(zi_list)])\r\n index2zi = dict([(idx, [zi[0], zi[1]]) for idx, zi in enumerate(zi_list)])\r\n \r\n return zi2index, index2zi", "def _lzw(self, tile: bytes) -> np.ndarray:\n decoded = self._reshape(np.frombuffer(imagecodecs.lzw_decode(tile), self.dtype))\n self._unpredict(decoded)\n return np.rollaxis(decoded, 2, 0)", "def test_compress_4_idenctical_char(self):\n text = 'bbbb'\n actual = LZ77.compress(text)\n expected = bytearray([32]) + bytearray(b'bb') + bytearray([0, 16])\n self.assertEqual(actual, expected)", "def zzX_from_dict(f, l):\n if l == 1:\n return zzx_from_dict(f)\n elif not f:\n return zzX_zero(l)\n\n coeffs = {}\n\n for monom, coeff in f.iteritems():\n head, tail = monom[0], monom[1:]\n\n if len(tail) == 1:\n tail = tail[0]\n\n if coeffs.has_key(head):\n coeffs[head][tail] = INT_TYPE(int(coeff))\n else:\n coeffs[head] = { tail : INT_TYPE(int(coeff)) }\n\n n, h = max(coeffs.iterkeys()), []\n\n for k in xrange(n, -1, -1):\n coeff = coeffs.get(k)\n\n if coeff is not None:\n h.append(zzX_from_dict(coeff, l-1))\n else:\n h.append(zzX_zero(l-1))\n\n return zzX_strip(h)", "def getData_goodmaps(liste_dictionnaires = [], liste_categories = [], liste_phonemes = [],liste_cartes=[]):\n if liste_dictionnaires!=[] and liste_categories!=[] and liste_phonemes!=[]:\n tableau = np.array(liste_dictionnaires[0][liste_categories[0]][liste_phonemes[0]])\n nb_exemple,nb_carte,lign,col=tableau.shape\n else:\n return [],[],[],[]\n\n Mat = []\n Reference = []\n\n\n for inddict,dict in enumerate(liste_dictionnaires):\n for indcat,cat in enumerate(liste_categories):\n for indpho,pho in enumerate(liste_phonemes):\n for ex in range(nb_exemple):\n goodmaps = []\n for map in liste_cartes:\n goodmaps.append(np.array(dict[cat][pho][ex][map]).flatten())\n Mat.append(np.array(goodmaps).flatten())\n Reference.append([inddict,indcat ,indpho])\n Reference = np.array(Reference)\n Y_c_inc = change_reference(Reference[:,1])\n Y_r_v = Reference[:,2]\n Y_fr_jap = Reference[:,0]\n return np.array(Mat), np.array(Y_c_inc), np.array(Y_r_v), np.array(Y_fr_jap)", "def get_data(url, seed):\n available_fields = {\n 'boro': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'cd': {'fieldtype': 'C', 'categories': range(1, 19)},\n 'uf1_1': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_2': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_3': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_4': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_5': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_6': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_7': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_8': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_9': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_10': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_11': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_12': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_13': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_14': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_15': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_16': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_17': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_18': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_19': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_20': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_21': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_22': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'sc24': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc36': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'sc37': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc38': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'sc114': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'uf48': {'fieldtype': 'N'},\n 'sc147': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'uf11': {'fieldtype': 'C', 'categories': range(1, 8)},\n 'sc149': {'fieldtype': 'B', 'codes': [1, 2, None]},\n 'sc173': {'fieldtype': 'C', 'categories': [1, 2, 3, 9]},\n 'sc171': {'fieldtype': 'B', 'codes': [1, 2]},\n 'sc150': {'fieldtype': 'N'},\n 'sc151': {'fieldtype': 'N'},\n 'sc154': {'fieldtype': 'C', 'categories': [1, 2, 3, 9]},\n 'sc157': {'fieldtype': 'C', 'categories': [1, 2, 9]},\n 'sc158': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc185': {'fieldtype': 'B', 'codes': [0, 1, 8]},\n 'sc186': {'fieldtype': 'C', 'categories': [2, 3, 4, 5, 9]},\n 'sc197': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc198': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc187': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc188': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc571': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'sc189': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'sc190': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc191': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc192': {'fieldtype': 'B', 'codes': [0, 1, 8]},\n 'sc193': {'fieldtype': 'C', 'categories': [2, 3, 9]},\n 'sc194': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc196': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc199': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'rec15': {'fieldtype': 'C', 'categories': range(1, 14)},\n 'sc26': {'fieldtype': 'C', 'categories': [12, 13, 15, 16]},\n 'uf23': {'fieldtype': 'N'},\n 'rec21': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'rec62': {'fieldtype': 'C', 'categories': [1, 2, 4, 5]},\n 'rec64': {'fieldtype': 'C', 'categories': [1, 2, 4, 5]},\n 'rec54': {'fieldtype': 'C', 'categories': range(1, 8)},\n 'rec53': {'fieldtype': 'N'},\n 'new_csr': {'fieldtype': 'C', 'categories': [1, 2, 5, 12, 20,\n 21, 22, 23, 30, 31,\n 80, 85, 90, 95]}\n }\n selected_fields = [\n # The borough where the apartment is located\n 'boro',\n\n # Building type: public housing, new construction,\n # \"In Rem\" foreclosure, old construction\n 'sc26',\n\n # Number of bedrooms\n 'sc151',\n\n # Dilapidated / Not Dilapidated\n 'rec21',\n\n # Complete plumbing facilities in unit\n 'rec62',\n\n # Complete kitchen facilities in unit\n 'rec64',\n\n # Maintenance deficiencies\n 'rec53',\n\n # Building age\n 'uf23',\n\n # Rent control/stabilization category\n 'new_csr',\n\n # Neighborhood rating\n 'sc196',\n\n # Wheelchair accessibility of unit\n 'sc38',\n\n # Presence of elevator\n 'sc149',\n\n # Building height\n 'uf11',\n\n # Air conditioning\n 'sc197',\n\n # Walkup\n 'sc171',\n ]\n mini_fields = {k: available_fields[k]\n for k in available_fields\n if k in selected_fields}\n y_field = 'uf17'\n # s = requests.get(url).content\n # raw_df = pd.read_csv(StringIO(s.decode('utf-8')))\n raw_df = pd.read_csv('homework2_data.csv')\n valid_renters, validated_features, validated_rents = \\\n preprocess_data(raw_df, mini_fields, y_field)\n X_train, X_test, y_train, y_test = train_test_split(\n validated_features, validated_rents, random_state=seed)\n cats = [k\n for (k, v) in mini_fields.items()\n if v[\"fieldtype\"] == \"C\"]\n catnums = [i\n for (i, x) in enumerate([c in cats\n for c in validated_features.columns])\n if x]\n return X_train, X_test, y_train, y_test, catnums, raw_df", "def data():\n \n # Just in case order matters.\n inplist = expected.keys()\n inplist.sort()\n \n #return ['split/' + inpms for inpms in inplist]\n return inplist", "def compress(uncompressed):\r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((chr(i), i) for i in range(dict_size))\r\n # in Python 3: dictionary = {chr(i): i for i in range(dict_size)}\r\n \r\n w = \"\"\r\n result = []\r\n for c in uncompressed:\r\n wc = w + c\r\n if wc in dictionary:\r\n w = wc\r\n else:\r\n result.append(dictionary[w])\r\n # Add wc to the dictionary.\r\n dictionary[wc] = dict_size\r\n dict_size += 1\r\n w = c\r\n \r\n # Output the code for w.\r\n if w:\r\n result.append(dictionary[w])\r\n return result", "def create_data_set():\n data_set = {}\n for index in range(1024):\n size = random.randint(1, 100) #nosec\n key = str(index).encode(\"utf-8\")\n data_set[key] = get_random_bytes(size)\n return data_set" ]
[ "0.81415105", "0.5463324", "0.5255911", "0.51981646", "0.51777226", "0.5148649", "0.5143358", "0.51358664", "0.5128433", "0.5083292", "0.5054639", "0.5033062", "0.50319797", "0.5004983", "0.49983892", "0.49932218", "0.49870348", "0.498288", "0.49821144", "0.49720147", "0.49679914", "0.49470782", "0.4896989", "0.4894046", "0.48793006", "0.4875982", "0.48729634", "0.48617095", "0.48473468", "0.4841541" ]
0.785483
1
LZW Dekomprese dict_of_abc je vstupni slovnik dat na kazdem indexu slovniku je list v prubehu komprese se do nej pridavaji polozky list_of_data je posloupnost cisel pro dekompresi
def do_LZW_DeCompression(dict_of_abc, list_of_data): #https://www.youtube.com/watch?v=MQM_DsX-LBI out = [] predchozi_out = [] for i in range(len(list_of_data)): new = [] new.extend(predchozi_out) if list_of_data[i] in dict_of_abc: o = dict_of_abc[list_of_data[i]] out.extend(o) predchozi_out = o #pokud je o list, beru z nej pouze prvni prvek if len(o) > 1: new.append(o[0]) else: new.extend(o) index_founded = dict_cointains_list(dict_of_abc, new) if index_founded == -1: #pokud new neni ve slovniku, pridam ho tam dict_of_abc[len(dict_of_abc) +1] = new return dict_of_abc, out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_LZW_Compression(dict_of_abc, list_of_data):\n \n # rozdil mezi None a [] je v pouziti metody extend na listu\n \n result = []\n P = []\n C = [] # C je vzdy jeden prvek ze vstupu\n PC = []\n \n #how it works video xplanation https://www.youtube.com/watch?v=MQ4ObKv2L_M\n \n for i in range(len(list_of_data)):\n \"\"\"\n Cyklus pres vsecky vstupni prvky\n \"\"\"\n\n C = []\n C.append(list_of_data[i])\n\n #PC je vzdy kombinace P a C\n PC = []\n PC.extend(P)\n PC.extend(C)\n\n index_founded = dict_cointains_list(dict_of_abc, PC)\n if index_founded == -1:\n #pokud PC neni ve slovniku, pridam ho tam a P = C\n dict_of_abc[len(dict_of_abc) +1] = PC\n #output P key in dictionary\n result.append(dict_cointains_list(dict_of_abc, P))\n P = C\n else:\n #pokud PC je ve slovniku P = PC pro dalsi iteraci\n P = PC\n #pridani posledniho prvku\n result.append(dict_cointains_list(dict_of_abc, P))\n return dict_of_abc, result", "def getUniChemData(self, inchiKeyList):\n mapD = {\n 1: {\"name\": \"chembl\", \"baseUrl\": \"https://www.ebi.ac.uk/chembl/\", \"entryUrl\": \"https://www.ebi.ac.uk/chembldb/compound/inspect/\"},\n 3: {\"name\": \"pdb\", \"baseUrl\": \"http://www.ebi.ac.uk/pdbe/\", \"entryUrl\": \"http://www.ebi.ac.uk/pdbe-srv/pdbechem/chemicalCompound/show/\"},\n 2: {\"name\": \"drugbank\", \"baseUrl\": \"http://drugbank.ca/\", \"entryUrl\": \"http://www.drugbank.ca/drugs/\"},\n 5: {\"name\": \"pubchem_dotf\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov/sources/sources.cgi\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 4: {\"name\": \"gtopdb\", \"baseUrl\": \"http://www.guidetopharmacology.org\", \"entryUrl\": \"http://www.guidetopharmacology.org/GRAC/LigandDisplayForward?ligandId=\"},\n 11: {\"name\": \"ibm\", \"baseUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/\", \"entryUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/?sid=\"},\n 6: {\"name\": \"kegg_ligand\", \"baseUrl\": \"http://www.genome.jp/kegg/ligand.html\", \"entryUrl\": \"http://www.genome.jp/dbget-bin/www_bget?\"},\n 9: {\"name\": \"zinc\", \"baseUrl\": \"http://zinc15.docking.org\", \"entryUrl\": \"http://zinc15.docking.org/substances/\"},\n 8: {\"name\": \"nih_ncc\", \"baseUrl\": \"http://nihsmr.evotec.com/evotec/\", \"entryUrl\": \"\"},\n 10: {\"name\": \"emolecules\", \"baseUrl\": \"https://www.emolecules.com/\", \"entryUrl\": \"https://www.emolecules.com/cgi-bin/more?vid=\"},\n 12: {\"name\": \"atlas\", \"baseUrl\": \"http://www.ebi.ac.uk/gxa/home\", \"entryUrl\": \"http://www.ebi.ac.uk/gxa/query?conditionQuery=\"},\n 7: {\"name\": \"chebi\", \"baseUrl\": \"http://www.ebi.ac.uk/chebi/downloadsForward.do\", \"entryUrl\": \"http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI%3A\"},\n 14: {\n \"name\": \"fdasrs\",\n \"baseUrl\": \"http://fdasis.nlm.nih.gov/srs/srs.jsp\",\n \"entryUrl\": \"http://fdasis.nlm.nih.gov/srs/ProxyServlet?mergeData=true&objectHandle=DBMaint&APPLICATION_NAME=fdasrs&actionHandle=default&nextPage=jsp/srs/ResultScreen.jsp&TXTSUPERLISTID=\",\n },\n 15: {\"name\": \"surechembl\", \"baseUrl\": \"https://www.surechembl.org/search/\", \"entryUrl\": \"https://www.surechembl.org/chemical/\"},\n 21: {\"name\": \"pubchem_tpharma\", \"baseUrl\": \"http://www.thomson-pharma.com/\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 22: {\"name\": \"pubchem\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/compound/\"},\n 27: {\"name\": \"recon\", \"baseUrl\": \"https://vmh.uni.lu\", \"entryUrl\": \"https://vmh.uni.lu/\"},\n 28: {\"name\": \"molport\", \"baseUrl\": \"https://www.molport.com/shop/index\", \"entryUrl\": \"https://www.molport.com/shop/molecule-link/\"},\n 31: {\n \"name\": \"bindingdb\",\n \"baseUrl\": \"https://www.bindingdb.org/bind/index.jsp\",\n \"entryUrl\": \"http://www.bindingdb.org/bind/chemsearch/marvin/MolStructure.jsp?monomerid=\",\n },\n 41: {\"name\": \"swisslipids\", \"baseUrl\": \"http://www.swisslipids.org/\", \"entryUrl\": \"http://www.swisslipids.org/\"},\n 29: {\"name\": \"nikkaji\", \"baseUrl\": \"http://jglobal.jst.go.jp/en/\", \"entryUrl\": \"http://jglobal.jst.go.jp/en/redirect?Nikkaji_No=\"},\n 32: {\"name\": \"comptox\", \"baseUrl\": \"https://comptox.epa.gov/dashboard/\", \"entryUrl\": \"https://comptox.epa.gov/dashboard/\"},\n 33: {\"name\": \"lipidmaps\", \"baseUrl\": \"http://www.lipidmaps.org\", \"entryUrl\": \"http://www.lipidmaps.org/data/LMSDRecord.php?LMID=\"},\n 35: {\"name\": \"carotenoiddb\", \"baseUrl\": \"http://carotenoiddb.jp/index.html\", \"entryUrl\": \"http://carotenoiddb.jp/Entries/\"},\n 36: {\"name\": \"metabolights\", \"baseUrl\": \"http://www.ebi.ac.uk/metabolights/\", \"entryUrl\": \"http://www.ebi.ac.uk/metabolights/\"},\n 37: {\"name\": \"brenda\", \"baseUrl\": \"https://www.brenda-enzymes.org/index.php\", \"entryUrl\": \"https://www.brenda-enzymes.org/ligand.php?brenda_ligand_id=\"},\n 17: {\"name\": \"pharmgkb\", \"baseUrl\": \"https://www.pharmgkb.org\", \"entryUrl\": \"https://www.pharmgkb.org/drug/\"},\n 18: {\"name\": \"hmdb\", \"baseUrl\": \"http://www.hmdb.ca\", \"entryUrl\": \"http://www.hmdb.ca/metabolites/\"},\n 24: {\n \"name\": \"nmrshiftdb2\",\n \"baseUrl\": \"http://nmrshiftdb.nmr.uni-koeln.de/portal/media-type/html/user/anon/page/default.psml/js_pane/P-Home\",\n \"entryUrl\": \"http://nmrshiftdb.org/molecule/\",\n },\n 25: {\"name\": \"lincs\", \"baseUrl\": \"http://www.lincsproject.org/\", \"entryUrl\": \"http://identifiers.org/lincs.smallmolecule/\"},\n 39: {\"name\": \"chemicalbook\", \"baseUrl\": \"https://www.chemicalbook.com\", \"entryUrl\": \"https://www.chemicalbook.com/ChemicalProductProperty_EN_\"},\n 20: {\"name\": \"selleck\", \"baseUrl\": \"http://www.selleckchem.com\", \"entryUrl\": \"http://www.selleckchem.com/products/\"},\n 23: {\"name\": \"mcule\", \"baseUrl\": \"https://mcule.com\", \"entryUrl\": \"https://mcule.com/\"},\n 26: {\"name\": \"actor\", \"baseUrl\": \"https://actor.epa.gov\", \"entryUrl\": \"http://actor.epa.gov/actor/chemical.xhtml?casrn=\"},\n 34: {\"name\": \"drugcentral\", \"baseUrl\": \"http://drugcentral.org\", \"entryUrl\": \"http://drugcentral.org/drugcard/\"},\n 38: {\"name\": \"rhea\", \"baseUrl\": \"http://www.rhea-db.org\", \"entryUrl\": \"http://www.rhea-db.org/searchresults?q=CHEBI:\"},\n }\n oD = {}\n try:\n for ky in inchiKeyList:\n unc = unichem_client # pylint: disable=no-member\n # unc.set_format(\"json\")\n uDL = unc.get(ky)\n if uDL:\n qD = {}\n for uD in uDL:\n if \"src_id\" in uD and int(uD[\"src_id\"]) in mapD:\n qD[mapD[int(uD[\"src_id\"])][\"name\"]] = uD[\"src_compound_id\"]\n if qD:\n oD[ky] = qD\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return oD", "def dict_cointains_list(dict_of_abc, item_list):\n\n values = list(dict_of_abc.values())\n\n #projdu vsecky listy ve slovniku\n for i in range(len(values)):\n #predpokladam ze ve slovniku je\n finded = True\n \n for j in range(len(values[i])):\n if len(item_list) == len(values[i]):\n # kontrola po jednotlivych hodnotach\n # logicky soucin - pokud jednou False, navzdy False\n finded = finded and item_list[j] == values[i][j]\n else:\n finded = False\n\n if finded:\n # cyklus indexuje od 0, slovnik ale indexujeme-klicujeme od 1\n return i + 1 \n\n return -1", "def generate_data_list(self, data_list):\n sol = []\n for i in data_list:\n sol.append({\n \"data_sig\": i[0],\n \"data\": pickle.loads(i[1]),\n \"attachment\": i[2],\n \"likes\":pickle.loads(i[3]),\n \"dislikes\":pickle.loads(i[4]),\n \"owner\": i[5]\n })\n return sol", "def listz2diclist(listz):\n\ttmp=[]\n\tfor elem in listz:\n\t\tdic = elem.warez2dic()\n\t\ttmp.append(dic)\n\treturn tmp", "def getDataDict(self):\n #code begins here \n return self.__dflst,self.__dfwells", "def getDataForLBMPZonalComparison(self):\n\n\t\t# Variables\n\t\tzonal_data = self.getDayAheadMarketLBMPZonal()\n\t\tkeys = zonal_data.keys()\n\t\tfinal_data = []\n\t\tvalues = []\n\t\touter_dictionary = {}\n\t\tinner_dictionary = {}\n\n\t\t# Populating final data array and returning it\n\t\tfor key in keys:\n\t\t\tfor data in zonal_data[key]:\n\t\t\t\tinner_dictionary['x'] = data.keys()[0]\n\t\t\t\tinner_dictionary['y'] = data[data.keys()[0]]['LBMP ($/MWHr)']\n\t\t\t\tvalues.append(inner_dictionary)\n\t\t\t\tinner_dictionary = {}\n\t\t\touter_dictionary['values'] = values\n\t\t\tvalues = []\n\t\t\touter_dictionary['key'] = key\n\t\t\tfinal_data.append(outer_dictionary)\n\t\t\touter_dictionary = {}\n\n\t\treturn final_data", "def get_data(url, seed):\n available_fields = {\n 'boro': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'cd': {'fieldtype': 'C', 'categories': range(1, 19)},\n 'uf1_1': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_2': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_3': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_4': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_5': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_6': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_7': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_8': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_9': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_10': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_11': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_12': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_13': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_14': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_15': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_16': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_17': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_18': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_19': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_20': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_21': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'uf1_22': {'fieldtype': 'B', 'codes': [1, 9, 8]},\n 'sc24': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc36': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'sc37': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc38': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'sc114': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'uf48': {'fieldtype': 'N'},\n 'sc147': {'fieldtype': 'C', 'categories': [1, 2, 3]},\n 'uf11': {'fieldtype': 'C', 'categories': range(1, 8)},\n 'sc149': {'fieldtype': 'B', 'codes': [1, 2, None]},\n 'sc173': {'fieldtype': 'C', 'categories': [1, 2, 3, 9]},\n 'sc171': {'fieldtype': 'B', 'codes': [1, 2]},\n 'sc150': {'fieldtype': 'N'},\n 'sc151': {'fieldtype': 'N'},\n 'sc154': {'fieldtype': 'C', 'categories': [1, 2, 3, 9]},\n 'sc157': {'fieldtype': 'C', 'categories': [1, 2, 9]},\n 'sc158': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc185': {'fieldtype': 'B', 'codes': [0, 1, 8]},\n 'sc186': {'fieldtype': 'C', 'categories': [2, 3, 4, 5, 9]},\n 'sc197': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc198': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc187': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc188': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc571': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'sc189': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'sc190': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc191': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc192': {'fieldtype': 'B', 'codes': [0, 1, 8]},\n 'sc193': {'fieldtype': 'C', 'categories': [2, 3, 9]},\n 'sc194': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'sc196': {'fieldtype': 'C', 'categories': [1, 2, 3, 4]},\n 'sc199': {'fieldtype': 'C', 'categories': range(1, 6)},\n 'rec15': {'fieldtype': 'C', 'categories': range(1, 14)},\n 'sc26': {'fieldtype': 'C', 'categories': [12, 13, 15, 16]},\n 'uf23': {'fieldtype': 'N'},\n 'rec21': {'fieldtype': 'B', 'codes': [1, 2, 8]},\n 'rec62': {'fieldtype': 'C', 'categories': [1, 2, 4, 5]},\n 'rec64': {'fieldtype': 'C', 'categories': [1, 2, 4, 5]},\n 'rec54': {'fieldtype': 'C', 'categories': range(1, 8)},\n 'rec53': {'fieldtype': 'N'},\n 'new_csr': {'fieldtype': 'C', 'categories': [1, 2, 5, 12, 20,\n 21, 22, 23, 30, 31,\n 80, 85, 90, 95]}\n }\n selected_fields = [\n # The borough where the apartment is located\n 'boro',\n\n # Building type: public housing, new construction,\n # \"In Rem\" foreclosure, old construction\n 'sc26',\n\n # Number of bedrooms\n 'sc151',\n\n # Dilapidated / Not Dilapidated\n 'rec21',\n\n # Complete plumbing facilities in unit\n 'rec62',\n\n # Complete kitchen facilities in unit\n 'rec64',\n\n # Maintenance deficiencies\n 'rec53',\n\n # Building age\n 'uf23',\n\n # Rent control/stabilization category\n 'new_csr',\n\n # Neighborhood rating\n 'sc196',\n\n # Wheelchair accessibility of unit\n 'sc38',\n\n # Presence of elevator\n 'sc149',\n\n # Building height\n 'uf11',\n\n # Air conditioning\n 'sc197',\n\n # Walkup\n 'sc171',\n ]\n mini_fields = {k: available_fields[k]\n for k in available_fields\n if k in selected_fields}\n y_field = 'uf17'\n # s = requests.get(url).content\n # raw_df = pd.read_csv(StringIO(s.decode('utf-8')))\n raw_df = pd.read_csv('homework2_data.csv')\n valid_renters, validated_features, validated_rents = \\\n preprocess_data(raw_df, mini_fields, y_field)\n X_train, X_test, y_train, y_test = train_test_split(\n validated_features, validated_rents, random_state=seed)\n cats = [k\n for (k, v) in mini_fields.items()\n if v[\"fieldtype\"] == \"C\"]\n catnums = [i\n for (i, x) in enumerate([c in cats\n for c in validated_features.columns])\n if x]\n return X_train, X_test, y_train, y_test, catnums, raw_df", "def decompress_encoded_list( nums ):\n decompressed = []\n\n for i in range( 0, len(nums), 2 ):\n freq = nums[i]\n val = nums[i+1]\n decompressed.extend( [val] * freq )\n\n return decompressed", "def dict_check34(_str1):\n\t_len=len(_str1)\n\t_list1=[(i,j,k) for i in range(5) for j in range(5) for k in range(5)]\n\t_list0=[]\n\tfor i in range(len(_list1)):\n\t\t#Take different length\n\t\t_current=_list1[i]\n\t\tif _len>=sum(_current) and sum(_list1[i])!=0:\n\t\t\t_list2=[]\n\t\t\t_n1=_current[0]\n\t\t\t_n2=_current[1]\n\t\t\t_n3=_current[2]\n\t\t\t_list2.append(_str1[:_n1])\n\t\t\t_list2.append(_str1[_n1:_n1+_n2])\n\t\t\t_list2.append(_str1[_n1+_n2:_n1+_n2+_n3])\n\t\telse:\n\t\t\tcontinue\n\t\tn=0\n\t\tfor j in range(3):\n\t\t\tif _list2[j] in _dict_ori or _list2[j]==\"\":\n\t\t\t\tn+=1\n\t\tif n==3:\n\t\t\t_list0.append(_list2)\n\treturn(_list0)", "def get_keys_from_list():\n json_data = request.get_json()\n\n d = dict()\n d['elements'] = list()\n settings.setOptionsFile(get_info('uid'))\n fn = settings.getHistoROOTFileName()\n rfn = settings.getReferenceROOTFileName()\n# open root file stored in the root database\n f = ROOT.TFile(fn)\n# open reference root file stored in the root database\n rf = ROOT.TFile(rfn)\n\n for values in json_data.itervalues():\n for k in values:\n subd = dict()\n subd[\"index\"] = k[\"index\"]\n if fn != k[\"file\"]: \n fn = k[\"file\"]\n settings.setHistoROOTFileName(fn)\n f = ROOT.TFile(fn)\n print \"histogram :>>>>>: \",k[\"histogram\"]\n subd[\"data\"] = eval(cppyy.gbl.getDictionary(f,k[\"histogram\"]))\n if rfn != k[\"referenceFile\"]: \n rfn = k[\"referenceFile\"]\n settings.setReferenceROOTFileName(rfn)\n rf = ROOT.TFile(rfn)\n subd[\"refdata\"] = eval(cppyy.gbl.getDictionary(rf,k[\"reference\"]))\n d['elements'].append(subd)\n\n f.Close()\n rf.Close()\n\n return jsonify(d)", "def test_decompress_offset_less_len1(self):\n b_array = bytearray([32]) + bytearray(b'ab') + bytearray([0, 18])\n actual = LZ77.decompress(b_array)\n expected = 'ababab'\n self.assertEqual(actual, expected)", "def getData_goodmaps(liste_dictionnaires = [], liste_categories = [], liste_phonemes = [],liste_cartes=[]):\n if liste_dictionnaires!=[] and liste_categories!=[] and liste_phonemes!=[]:\n tableau = np.array(liste_dictionnaires[0][liste_categories[0]][liste_phonemes[0]])\n nb_exemple,nb_carte,lign,col=tableau.shape\n else:\n return [],[],[],[]\n\n Mat = []\n Reference = []\n\n\n for inddict,dict in enumerate(liste_dictionnaires):\n for indcat,cat in enumerate(liste_categories):\n for indpho,pho in enumerate(liste_phonemes):\n for ex in range(nb_exemple):\n goodmaps = []\n for map in liste_cartes:\n goodmaps.append(np.array(dict[cat][pho][ex][map]).flatten())\n Mat.append(np.array(goodmaps).flatten())\n Reference.append([inddict,indcat ,indpho])\n Reference = np.array(Reference)\n Y_c_inc = change_reference(Reference[:,1])\n Y_r_v = Reference[:,2]\n Y_fr_jap = Reference[:,0]\n return np.array(Mat), np.array(Y_c_inc), np.array(Y_r_v), np.array(Y_fr_jap)", "def construct_zi_dict(train_info_list, test_info_list):\r\n zi_dict, train_dataset_list, test_dataset_list = dict(), list(), list()\r\n for user, age, gender, education, querys in train_info_list:\r\n for query in querys:\r\n for zi in query:\r\n if zi not in zi_dict:\r\n zi_dict[zi] = 0\r\n zi_dict[zi] += 1\r\n for user, querys in test_info_list:\r\n for query in querys:\r\n for zi in query:\r\n if zi not in zi_dict:\r\n zi_dict[zi] = 0\r\n zi_dict[zi] += 1\r\n zi_list = sorted(zi_dict.iteritems(), key=lambda x: x[1], reverse=True)\r\n zi2index = dict([(zi[0], [zi[1], idx]) for idx, zi in enumerate(zi_list)])\r\n index2zi = dict([(idx, [zi[0], zi[1]]) for idx, zi in enumerate(zi_list)])\r\n \r\n return zi2index, index2zi", "def test_xyz_from_data(self):\n symbols = ('C', 'H', 'H', 'H', 'H')\n isotopes = (12, 1, 1, 1, 1)\n coords = ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))\n xyz_dict0 = converter.xyz_from_data(coords=coords, symbols=symbols, isotopes=isotopes)\n self.assertEqual(xyz_dict0, self.xyz1['dict'])\n xyz_dict1 = converter.xyz_from_data(coords=coords, symbols=symbols) # no specifying isotopes\n self.assertEqual(xyz_dict1, self.xyz1['dict'])\n\n numbers = [6, 1, 1, 1, 1]\n coords = [[0.0, 0.0, 0.0],\n [0.6300326, 0.6300326, 0.6300326],\n [-0.6300326, -0.6300326, 0.6300326],\n [-0.6300326, 0.6300326, -0.6300326],\n [0.6300326, -0.6300326, -0.6300326]]\n xyz_dict2 = converter.xyz_from_data(coords=coords, numbers=numbers)\n self.assertEqual(xyz_dict2, self.xyz1['dict'])\n\n numbers = [6, 1, 1, 1, 1]\n coords = [[0.0, 0.0, 0.0],\n [0.6300326, 0.6300326, 0.6300326],\n [-0.6300326, -0.6300326, 0.6300326],\n [-0.6300326, 0.6300326, -0.6300326],\n [0.6300326, -0.6300326, -0.6300326]]\n coords = np.array([np.array(coord, np.float64) for coord in coords], np.float64)\n xyz_dict2 = converter.xyz_from_data(coords=coords, numbers=numbers)\n self.assertEqual(xyz_dict2, self.xyz1['dict'])\n self.assertIsInstance(xyz_dict2['coords'], tuple)\n self.assertIsInstance(xyz_dict2['coords'][0], tuple)", "def test_compress(self):\n form_field = MultiLingualFormField()\n compressed_data = form_field.compress([\"test-nb\", \"test-en\"])\n self.assertEqual(MultiLingualTextStructure, type(compressed_data))\n self.assertEqual(compressed_data['nb'], \"test-nb\")\n self.assertEqual(compressed_data['en'], \"test-en\")", "def test_decompress_2(self):\n b_array = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n actual = LZ77.decompress(b_array)\n expected = 'abcdefdeabc'\n self.assertEqual(actual, expected)", "def test_decompress_offset_less_len2(self):\n b_array = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 52])\n actual = LZ77.decompress(b_array)\n expected = 'abcdabcdab'\n self.assertEqual(actual, expected)", "def zzX_from_dict(f, l):\n if l == 1:\n return zzx_from_dict(f)\n elif not f:\n return zzX_zero(l)\n\n coeffs = {}\n\n for monom, coeff in f.iteritems():\n head, tail = monom[0], monom[1:]\n\n if len(tail) == 1:\n tail = tail[0]\n\n if coeffs.has_key(head):\n coeffs[head][tail] = INT_TYPE(int(coeff))\n else:\n coeffs[head] = { tail : INT_TYPE(int(coeff)) }\n\n n, h = max(coeffs.iterkeys()), []\n\n for k in xrange(n, -1, -1):\n coeff = coeffs.get(k)\n\n if coeff is not None:\n h.append(zzX_from_dict(coeff, l-1))\n else:\n h.append(zzX_zero(l-1))\n\n return zzX_strip(h)", "def zipped_data(b_data):\n col_names = [\"Name\", \"Version\", \"Date and Time\"]\n bundle_data = []\n for bundle in b_data:\n zipped = zip(col_names, bundle)\n bundle_data.append(dict(zipped))\n return bundle_data", "def test_decompress_1(self):\n b_array = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 49])\n actual = LZ77.decompress(b_array)\n expected = 'abcdabc'\n self.assertEqual(actual, expected)", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def data():\n \n # Just in case order matters.\n inplist = expected.keys()\n inplist.sort()\n \n #return ['split/' + inpms for inpms in inplist]\n return inplist", "def test_compress_offset_less_len1(self):\n text = 'ababab'\n actual = LZ77.compress(text)\n expected = bytearray([32]) + bytearray(b'ab') + bytearray([0, 18])\n self.assertEqual(actual, expected)", "def test_hmaps(self):\n #Single element insertion\n self.hll.append(self.data[0])\n assert non_zero_idx_val(hll.hmap) == [(54, 1)]\n #Multiple distinct element insertions\n self.hll.extend(self.data)\n assert non_zero_idx_val(hll.hmap) == [(51, 2), (54, 1), (214, 2)]\n self.reset_hll()\n #Element insertions with duplicates\n self.hll.extend(self.data_duplicate)\n assert non_zero_idx_val(hll.hmap) == [(51, 2), (54, 1), (214, 2)]\n self.reset_hll()\n #Element insertions with numerical values\n self.hll.extend(self.num_data)\n assert non_zero_idx_val(hll.hmap) == [(17, 3), (144, 2), (145, 4),\n (182, 2)]\n self.reset_hll()\n #Test the key collision handling (keep max value)\n self.hll.append(self.colliding_data[0])\n assert non_zero_idx_val(hll.hmap) == [(0, 1)] \n self.hll.append(self.colliding_data[1])\n assert non_zero_idx_val(hll.hmap) == [(0, 2)]\n self.reset_hll()\n self.hll.append(self.colliding_data[1])\n assert non_zero_idx_val(hll.hmap) == [(0, 2)] \n self.hll.append(self.colliding_data[0])\n assert non_zero_idx_val(hll.hmap) == [(0, 2)]\n self.reset_hll()", "def get_dict_refine_insee_code(ls_valid_ic):\n dict_refine_ic = {x: (x, x) for x in ls_valid_ic}\n ls_valid_ic_corse = [x for x in ls_valid_ic if re.match('2[AB]', x)]\n for ic in ls_valid_ic_corse:\n dict_refine_ic[ic[:1] + u'0' + ic[2:]] = (ic, ic) # assumed unicity was checked\n dict_ic_ardts = dict(list(itertools.product(map(str,range(13201, 13217)), ['13055']))+\\\n list(itertools.product(map(str,range(69381, 69390)), ['69123']))+\\\n list(itertools.product(map(str,range(75101, 75121)), ['75056'])))\n dict_ic_ardts = {k : (v,k) for k,v in dict_ic_ardts.items()}\n dict_refine_ic.update(dict_ic_ardts)\n return dict_refine_ic", "def apply_compression(pc2_table, keys_56bits):\n keys_48bits = \"\"\n for index in pc2_table:\n keys_48bits += keys_56bits[index - 1]\n return keys_48bits", "def test_compress_offset_less_len2(self):\n text = 'abcdabcdab'\n actual = LZ77.compress(text)\n expected = bytearray([8]) + bytearray(b'abcd') + bytearray([0, 52])\n self.assertEqual(actual, expected)", "def decode_huffman(bit_seq, dc_ac, layer_type):\n\n def diff_value(idx, size):\n if idx >= len(bit_seq) or idx + size > len(bit_seq):\n raise IndexError('There is not enough bits to decode DIFF value '\n 'codeword.')\n fixed = bit_seq[idx:idx + size]\n return int(fixed, 2)\n\n current_idx = 0\n while current_idx < len(bit_seq):\n # 1. Consume next 16 bits as `current_slice`.\n # 2. Try to find the `current_slice` in Huffman table.\n # 3. If found, yield the corresponding key and go to step 4.\n # Otherwise, remove the last element in `current_slice` and go to\n # step 2.\n # 4. Consume next n bits, where n is the category (size) in returned\n # key yielded in step 3. Use those info to decode the data.\n remaining_len = len(bit_seq) - current_idx\n current_slice = bit_seq[\n current_idx:\n current_idx + (16 if remaining_len > 16 else remaining_len)\n ]\n err_cache = current_slice\n while current_slice:\n if (current_slice in\n HUFFMAN_CATEGORY_CODEWORD[dc_ac][layer_type].inv):\n key = (HUFFMAN_CATEGORY_CODEWORD[dc_ac][layer_type]\n .inv[current_slice])\n if dc_ac == DC: # DC\n size = key\n if size == 0:\n yield 0\n else:\n yield HUFFMAN_CATEGORIES[size][diff_value(\n current_idx + len(current_slice),\n size\n )]\n else: # AC\n run, size = key\n if key in (EOB, ZRL):\n yield key\n else:\n yield (run, HUFFMAN_CATEGORIES[size][diff_value(\n current_idx + len(current_slice),\n size\n )])\n\n current_idx += len(current_slice) + size\n break\n current_slice = current_slice[:-1]\n else:\n raise KeyError(\n f'Cannot find any prefix of {err_cache} in Huffman table.'\n )", "def create_data_set():\n data_set = {}\n for index in range(1024):\n size = random.randint(1, 100) #nosec\n key = str(index).encode(\"utf-8\")\n data_set[key] = get_random_bytes(size)\n return data_set" ]
[ "0.7625661", "0.5576687", "0.5428306", "0.5259145", "0.52158284", "0.5127516", "0.51260525", "0.51113755", "0.5106959", "0.50973123", "0.507302", "0.5056697", "0.50454503", "0.50228345", "0.5017704", "0.49628225", "0.49544364", "0.4946142", "0.49108753", "0.49073184", "0.49037606", "0.48755017", "0.4860034", "0.4850426", "0.48478657", "0.48361212", "0.48324823", "0.48282993", "0.4826274", "0.4822718" ]
0.81224066
0
dict_of_abc vypada napriklad takto
def dict_cointains_list(dict_of_abc, item_list): values = list(dict_of_abc.values()) #projdu vsecky listy ve slovniku for i in range(len(values)): #predpokladam ze ve slovniku je finded = True for j in range(len(values[i])): if len(item_list) == len(values[i]): # kontrola po jednotlivych hodnotach # logicky soucin - pokud jednou False, navzdy False finded = finded and item_list[j] == values[i][j] else: finded = False if finded: # cyklus indexuje od 0, slovnik ale indexujeme-klicujeme od 1 return i + 1 return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_cases_for_dict(self):\n pass", "def init_dict() -> None:\n for elem in letters:\n ascii_dict[elem] = []\n for elem in numbers:\n ascii_dict[elem] = []\n for elem in symbols:\n ascii_dict[elem] = []", "def dictogram_dictlist(self):\n for key, value in self.word_dict.items():\n self.word_dict[key] = dictogram.Dictogram(value)\n # print(\"self.word_dict\", self.word_dict)", "def secondary_keys_dicts(self):", "def multDic(dic, x):\n pass", "def __init__(self, dictionary):\n self.d = {}\n for word in dictionary:\n abbr = self.getAbbr(word)\n if abbr in self.d:\n self.d[abbr] += word,\n else:\n self.d[abbr] = [word]", "def build_arpabet(self, word_lst):\n arpabet_dict = {}\n for word in word_lst:\n unicode_word = unicode(word)\n\n try:\n arpabet_dict[word] = self.cmu_dict[unicode_word][0]\n except:\n print(unicode_word + \" not found in CMUDict\")\n sys.exit()\n\n return arpabet_dict", "def __init__(self):\r\n self.dct = defaultdict(list)", "def def_dict():\n d1 = c.defaultdict(dict)\n print '\\nInitially first dictionary is: ', dict(d1)\n l = [('a', 1), ('b', 2), ('a', 27), ('c', 3), ('d', 4), ('c', 29), ('e', 5)]\n for a, b in l:\n d1[a] = b\n print '\\nAfter adding elements to the default dictionary, it is: ', dict(d1)\n print '\\nItems in the first dictionary is: ', d1.items()\n st = 'malayalam'\n print '\\n\\nCreating dictionary from the string', st\n d2 = c.defaultdict(int)\n print '\\nInitially second dict is:', dict(d2)\n for i in st:\n d2[i] = st.count(i)\n print \"After updation, the second dictionary becomes: \", dict(d2)", "def dict(dict: Dict[str, Pin], /) -> None:", "def standarization_ofconc(a2_data):\n aux_dic = OrderedDict()\n for i in a2_data:\n evol_tuple = a2_data[i]['conc'].keys()\n if len(evol_tuple) != 1:\n raise RuntimeError('too many tuples for conc')\n evol_tuple = evol_tuple[0]\n aux_dic[i] = a2_data[i]['conc'][evol_tuple]\n return aux_dic", "def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {('center_name', 'ANL'), ('center_project_name', None),\n ('emp_status', 'EMP'), ('barcodesequence', 'AGCGCTCACATC'),\n ('library_construction_protocol',\n 'This analysis was done as in Caporaso et al 2011 Genome '\n 'research. The PCR primers (F515/R806) were developed against '\n 'the V4 region of the 16S rRNA (both bacteria and archaea), '\n 'which we determined would yield optimal community clustering '\n 'with reads of this length using a procedure similar to that '\n 'of ref. 15. [For reference, this primer pair amplifies the '\n 'region 533_786 in the Escherichia coli strain 83972 sequence '\n '(greengenes accession no. prokMSA_id:470367).] The reverse '\n 'PCR primer is barcoded with a 12-base error-correcting Golay '\n 'code to facilitate multiplexing of up to 1,500 samples per '\n 'lane, and both PCR primers contain sequencer adapter '\n 'regions.'), ('linkerprimersequence', 'GTGCCAGCMGCCGCGGTAA'),\n ('target_subfragment', 'V4'), ('target_gene', '16S rRNA'),\n ('run_center', 'ANL'), ('run_prefix', 's_G1_L001_sequences'),\n ('run_date', '8/1/12'), ('experiment_center', 'ANL'),\n ('experiment_design_description',\n 'micro biome of soil and rhizosphere of cannabis plants '\n 'from CA'), ('experiment_title', 'Cannabis Soil Microbiome'),\n ('platform', 'Illumina'), ('samp_size', '.25,g'),\n ('sequencing_meth', 'Sequencing by synthesis'),\n ('illumina_technology', 'MiSeq'), ('sample_center', 'ANL'),\n ('pcr_primers',\n 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT'),\n ('study_center', 'CCME')}\n self.assertEqual(set(obs), exp)", "def __init__(self, dictionary):\n self.dict = {}\n for word in dictionary:\n abbr = self.gen_abbr(word)\n if abbr not in self.dict:\n word_set = set([word])\n self.dict[abbr] = word_set\n else:\n self.dict[abbr].add(word)", "def asdict():\n pass", "def test_create_cds_translation_dict_1(self):\n t1 = \"ABCDE\"\n t2 = \"FGHIJ\"\n t3 = \"RSTUV\"\n self.gene_data_1[\"Translation\"] = t1\n self.gene_data_2[\"Translation\"] = t2\n self.gene_data_3[\"Translation\"] = t3\n cdd_list = [self.gene_data_1, self.gene_data_2, self.gene_data_3]\n dict = find_domains.create_cds_translation_dict(cdd_list)\n\n exp_keys = {t1, t2, t3}\n with self.subTest():\n self.assertEqual(dict.keys(), exp_keys)\n with self.subTest():\n self.assertEqual(dict[t1], {self.gene_data_1[\"GeneID\"]})\n with self.subTest():\n self.assertEqual(dict[t2], {self.gene_data_2[\"GeneID\"]})\n with self.subTest():\n self.assertEqual(dict[t3], {self.gene_data_3[\"GeneID\"]})", "def cat_from_letter(cls, cog_letter, dict_output=False):\n functions = dict()\n for char in cog_letter:\n for k, v in cls.COG_letters.items():\n for k_, v_ in v.items():\n if char == k_:\n functions[char] = (k, v_)\n try:\n functions[char]\n except KeyError:\n print(f\"{char} wasn't found! Check if it is a valid COG letter.\")\n\n if functions:\n if dict_output:\n return functions\n else:\n output = [(k, v) for k, v in functions.items()]\n return output", "def test_create_cds_translation_dict_2(self):\n t1 = \"ABCDE\"\n t2 = \"FGHIJ\"\n self.gene_data_1[\"Translation\"] = t1\n self.gene_data_2[\"Translation\"] = t2\n self.gene_data_3[\"Translation\"] = t2\n cdd_list = [self.gene_data_1, self.gene_data_2, self.gene_data_3]\n dict = find_domains.create_cds_translation_dict(cdd_list)\n\n exp_keys = {t1, t2}\n with self.subTest():\n self.assertEqual(dict.keys(), exp_keys)\n with self.subTest():\n self.assertEqual(dict[t1], {self.gene_data_1[\"GeneID\"]})\n with self.subTest():\n self.assertEqual(dict[t2], {self.gene_data_2[\"GeneID\"],\n self.gene_data_3[\"GeneID\"]})", "def __init__(self, dictionary):\n self.abbrev_dict = {}\n for s in dictionary:\n if len(s) < 3:\n abbrev = s\n else:\n abbrev = s[0] + str(len(s) - 2) + s[-1]\n \n if abbrev not in self.abbrev_dict:\n self.abbrev_dict[abbrev] = set()\n self.abbrev_dict[abbrev].add(s)", "def test_attr_dict(self):\n obj = awstats_reader.AttrDict([('this','that'), ('thus','those')])\n self.assertEqual(obj.thus, 'those')", "def __generate_dict_of_keys_to_classification__(self):\n dict_of_assigned_citations = {}\n # duplicating citation dataset to filter as matches go on meaning\n # it should result in quicker allocation\n # can be removed to reduce memory load at expense of speed\n list_of_unassigned = []\n for key in self.dict_of_keywords:\n list_of_current_key = []\n for citation_instance in self.array_of_citations:\n if key == citation_instance.get_classification():\n list_of_current_key.append(citation_instance)\n if \"Unassigned\" == citation_instance.get_classification():\n list_of_unassigned.append(citation_instance)\n dict_of_assigned_citations[key] = list_of_current_key\n dict_of_assigned_citations[\"Unassigned\"] = list_of_unassigned\n return dict_of_assigned_citations", "def conversion_context(self):\n d = {}\n wanted = ['a', 'h']\n for x in wanted:\n if x in self.properties:\n d[x] = self.properties[x]\n return d", "def insertable_dict(self):\n\n d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('table', 'stats', '_codes')}\n\n x = {('c_' + k).strip('_'): v for k, v in d.items()}\n\n return x", "def do_LZW_Compression(dict_of_abc, list_of_data):\n \n # rozdil mezi None a [] je v pouziti metody extend na listu\n \n result = []\n P = []\n C = [] # C je vzdy jeden prvek ze vstupu\n PC = []\n \n #how it works video xplanation https://www.youtube.com/watch?v=MQ4ObKv2L_M\n \n for i in range(len(list_of_data)):\n \"\"\"\n Cyklus pres vsecky vstupni prvky\n \"\"\"\n\n C = []\n C.append(list_of_data[i])\n\n #PC je vzdy kombinace P a C\n PC = []\n PC.extend(P)\n PC.extend(C)\n\n index_founded = dict_cointains_list(dict_of_abc, PC)\n if index_founded == -1:\n #pokud PC neni ve slovniku, pridam ho tam a P = C\n dict_of_abc[len(dict_of_abc) +1] = PC\n #output P key in dictionary\n result.append(dict_cointains_list(dict_of_abc, P))\n P = C\n else:\n #pokud PC je ve slovniku P = PC pro dalsi iteraci\n P = PC\n #pridani posledniho prvku\n result.append(dict_cointains_list(dict_of_abc, P))\n return dict_of_abc, result", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)", "def navigate_case_dictionary(case_list_for_run, num_cases):", "def mfunc_(d):\r\n _p= {c: k.lower() if c not in ('code', 'label', 'name') else k \r\n for c, k in zip(AGSO_PROPERTIES['props_codes'], d) }\r\n id_= d[ix_].replace('/', '_').replace(\r\n ' ', '_').replace('\"', '').replace(\"'\", '').lower()\r\n return id_, _p", "def test_fields_to_dict(self):\r\n test_data = \\\r\n \"\"\"0\tR27DLI_4812\tR27DLI_600\tR27DLI_727\tU1PLI_403\tU1PLI_8969\tU1PLI_9080\tU1PLI_9526\tW3Cecum_6642\tW3Cecum_8992\r\n1\tU1PLI_7889\r\n2\tW3Cecum_4858\r\n3\tR27DLI_3243\tR27DLI_4562\tR27DLI_6828\tR27DLI_9097\tU1PLI_2780\tU1PLI_67\tU9PSI_10475\tU9PSI_4341\tW3Cecum_5191\"\"\".splitlines() # output from cd-hit\r\n obs = fields_to_dict(test_data)\r\n exp = {\r\n '0': ['R27DLI_4812', 'R27DLI_600', 'R27DLI_727', 'U1PLI_403',\r\n 'U1PLI_8969', 'U1PLI_9080', 'U1PLI_9526', 'W3Cecum_6642', 'W3Cecum_8992'],\r\n '1': ['U1PLI_7889'],\r\n '2': ['W3Cecum_4858'],\r\n '3': ['R27DLI_3243', 'R27DLI_4562', 'R27DLI_6828', 'R27DLI_9097', 'U1PLI_2780', 'U1PLI_67', 'U9PSI_10475', 'U9PSI_4341', 'W3Cecum_5191']}\r\n self.assertEqual(obs, exp)", "def test_emapa_simple(self):\n \n result = self.emapa \n self.assertEqual(set(result[\"EMAPA:2\"]), set([\"Y:006\"]), \n \"simple mapping\")\n self.assertEqual(set(result[\"EMAPA:3\"]), set([\"Y:007\"]), \n \"simple mapping\")", "def test_mapping(self):\n vark = VarKeyword()\n assert vark.name in vark\n assert '{}_'.format(vark.name) not in vark\n assert len(vark) == 1\n assert list(vark) == [vark.name]" ]
[ "0.59698254", "0.5688297", "0.56781894", "0.56143355", "0.5542839", "0.5520396", "0.5489875", "0.5482796", "0.5428855", "0.54169875", "0.5414742", "0.54147077", "0.5387781", "0.5383238", "0.53804624", "0.5378157", "0.53751314", "0.533766", "0.53338116", "0.5317185", "0.530112", "0.5300158", "0.5291576", "0.5287203", "0.5287203", "0.52816", "0.52609473", "0.52605194", "0.52579767", "0.52557224" ]
0.61399186
0
Returns a double, the average number of days between the rating objects specified in the list parameter.
def get_diffs_of_ratings(l): if len(l) <= 1: return str(None) total = 0.0 for v in range(0, len(l)-1): total = total + float((l[v+1]["date"] - l[v]["date"]).days) return str(float(total / (len(l)-1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avg(list):\n return sum(list) / len(list)", "def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)", "def average_rating(business_list):\n\tratings = []\n\ttry:\n\t\tfor b in business_list:\n\t\t\tratings.append(b['rating'])\n\texcept KeyError:\n\t\tpass\n\n\tavg_float = float(sum(ratings)) / float(len(ratings))\n\treturn math.ceil(avg_float*100)/100", "def _avg(cls, l):\n\n return sum(l) / float(len(l))", "def donation_avg(donor_list, donor):\n return sum(donor_list[donor]) // len(donor_list[donor])", "def price_average(lst):\n\n return sum(lst) / len(lst)", "def avg():\n\n # call sum method to add up the values in the collection & div by the num of items\n # call len method to compute the # of vals in collection which is divided by sum total \n mean = sum(inlist) / len(inlist)\n return mean \n\n # alternate method would be calling the reduce method with lamda \n # return reduce(lambda a, b: a + b, inlist) / len(inlist)", "def average(lst):\n return sum(lst)/len(lst)", "def CalculateListAverage(values):\n if not values:\n return 0\n return sum(values) / float(len(values))", "def average(l):\n return float(sum(l)/len(l))", "def dishlist_avg(n:list)->float:\r\n all_prices = dishlist_prices(n)\r\n return sum(all_prices)/len(all_prices)", "def get_average_rating(self):\n count = 0\n total = 0\n ratings_length = len(self.ratings)\n if ratings_length > 0:\n for rating in self.ratings:\n count += 1\n total += rating\n average = total / count\n return average\n else:\n print(\"There does not seem to be any ratings for {book}\".format(book=self.title))", "def avg(l):\n return (sum(l)/float(len(l)))", "def avg(lst: list):\n return sum(lst) / len(lst)", "def average(self, num_list):\n try:\n total = 0\n accumulator = 0\n\n for number in num_list:\n try:\n total += number\n accumulator += 1\n except Exception as e:\n print (\"Error: \", e)\n\n average = total / accumulator\n except Exception as e:\n print(\"Error: \", e)\n\n return average", "def number_list_average(numbers):\n return sum(numbers)/len(numbers)", "def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3) / 3", "def average(old_rating, new_rating, count):\n return float(round(Decimal((old_rating * count + new_rating) / (count + 1)), 1))", "def average_rating(self):\n ratings = AttractionRating.objects.filter(attraction=self)\n total_rating = 0\n for rating in ratings:\n total_rating += rating.rating\n\n # If there are no rating, then we set the average to 0\n # otherwise we calculate the average\n try:\n avg = total_rating / len(ratings)\n except ZeroDivisionError:\n avg = total_rating\n\n return avg", "def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3 + self.rating_4 + self.rating_5 + self.rating_6 + self.rating_7) / 7", "def _avg_sd_from_list(lst):\n arr = flex.double(lst)\n avg = round(flex.mean(arr), 5)\n std = round(arr.standard_deviation_of_the_sample(), 5)\n return avg, std", "def average_ratings(self):\n return get_average_rate(\n model=Rating,\n article=self.pk\n )", "def weighted_average(value_weight_list): \n numerator = sum([v * w for v,w in value_weight_list])\n denominator = sum([w for v,w in value_weight_list])\n if(denominator != 0):\n return(float(numerator) / float(denominator))\n else:\n return None", "def average(some_list):\n # This function will take a list and return average of value of element in list.\n result = 0 # Define result to contain sum of element in list.\n for i in some_list:\n result += i \n return result/len(some_list)", "def func(lst):\n tot = 0\n for i in lst:\n tot = tot + i\n avg = tot / len(lst)\n return avg", "def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n if len(ratings):\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.value\n\n # Calculate the averge and return it.\n average = total_rating / len(ratings)\n return average\n\n # else: \n return 0", "def getMean(list):\n return sum(list) / len(list)", "def average(l: List[float]) -> float:\n n = len(l)\n if n == 0:\n return 0\n return sum(l) / n", "def average(self, returns):\r\n return returns.mean() * self.day", "def weighted_average(value_weight_list):\n numerator = sum([v * w for v, w in value_weight_list])\n denominator = sum([w for v, w in value_weight_list])\n if(denominator != 0):\n return(float(numerator) / float(denominator))\n else:\n return None" ]
[ "0.68933636", "0.66389376", "0.6581203", "0.65615976", "0.65188193", "0.6491516", "0.64800775", "0.64580154", "0.6454823", "0.6444565", "0.6435506", "0.64231765", "0.64099354", "0.63959163", "0.6393575", "0.63914645", "0.63865495", "0.63710004", "0.6341628", "0.6338856", "0.6284986", "0.6237024", "0.62044895", "0.6193193", "0.61837155", "0.6176371", "0.61706644", "0.6141152", "0.6135633", "0.6133256" ]
0.6772968
1
Load truncation parameters from config or container defaults.
def _get_params(self, container): if container in TRUNC_SPEC: self.log.info("Truncating from preset for container {}".format(container)) for key in [ "dataset", "weight_dataset", "fixed_precision", "variance_increase", ]: attr = getattr(self, key) if attr is None: setattr(self, key, TRUNC_SPEC[container][key]) else: self.log.info("Overriding container default for '{}'.".format(key)) else: if ( self.dataset is None or self.fixed_precision is None or self.variance_increase is None ): raise pipeline.PipelineConfigError( "Container {} has no preset values. You must define all of 'dataset', " "'fixed_precision', and 'variance_increase' properties.".format( container ) ) # Factor of 3 for variance over uniform distribution of truncation errors self.variance_increase *= 3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setupConfigAnnotation(self):\n annotations = IAnnotations(self)\n settings = annotations.get(\"PLOMINOFIELDCONFIG\", None)\n if not settings:\n annotations[\"PLOMINOFIELDCONFIG\"] = PersistentDict()", "def _load_hyperopt_config(self, config: Dict[str, Any]) -> Dict[str, Any]:\n\n if \"hyperopt\" in self.args:\n # Add the hyperopt file to use\n config.update({'hyperopt': self.args.hyperopt})\n\n # If --epochs is used we add it to the configuration\n if 'epochs' in self.args and self.args.epochs:\n config.update({'epochs': self.args.epochs})\n logger.info('Parameter --epochs detected ...')\n logger.info('Will run Hyperopt with for %s epochs ...', config.get('epochs'))\n\n # If --spaces is used we add it to the configuration\n if 'spaces' in self.args and self.args.spaces:\n config.update({'spaces': self.args.spaces})\n logger.info('Parameter -s/--spaces detected: %s', config.get('spaces'))\n\n return config", "def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf", "def set_derived_configs(self):\n if 'dim' in self.config and self.config['dim'] <= 0:\n self.config['dim'] = self.descriptors['input']['dim']", "def afterSetUp(self):\n self.load_config = {}\n self.load_config['monitor_interval'] = 1\n self.load_config['limit_number_request'] = 100\n self.load_config['limit_memory_used'] = 500", "def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)", "def configure(self, config: ConfigParams):\n parameters = config.get_section(\"parameters\")\n if len(parameters) > 0:\n self.__parameters = parameters", "def apply_startup_params(self):\n config = self._protocol.get_startup_config()\n \n if not isinstance(config, dict):\n raise InstrumentParameterException(\"Incompatible initialization parameters\")\n \n log.trace(\"BARS driver applying config: %s\", config)\n self._protocol.set_readonly_values()\n self.set_resource(config)", "def _load_common_config(self, config: Dict[str, Any]) -> Dict[str, Any] :\n # Log level\n if 'loglevel' in self.args.loglevel:\n config.update({'verbosity': self.args.loglevel})\n else:\n config.update({'verbosity': 0})\n logging.basicConfig(\n level=logging.INFO if config['verbosity'] < 1 else logging.DEBUG,\n format= '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n set_loggers(config['verbosity'])\n logger.info('Verbosity set to %s', config['verbosity'])\n\n # Add dynamic whitelist if found\n if 'dynamic_whitelist' in self.args and self.args.dynamic_whitelist:\n config['pairlist'] = {\n 'method': 'VolumePairList',\n 'config': {'number_assets': self.args.dynamic_whitelist}\n }\n logger.warning(\n 'Parameter --dynamic-whitelist has been deprecated, '\n 'and will be completely replaced by the whitelist dict in the future. '\n 'For now: using dynamically generated whitelist based on VolumePairList. '\n '(not applicable with Backtesting and Hyperopt)'\n )\n if self.args.db_url and self.args.db_url != constant.DEFAULT_DB_PROD_URL:\n config.update({'db_url': self.args.db_url})\n logger.info('Parameter --db-url detected ...')\n\n if config.get('dry_run', False):\n logger.info('Dry run is enabled')\n if config.get('db_url') in [None, constant.DEFAULT_DB_PROD_URL]:\n # Default to in-memory db for dry_run if not specified\n config['db_url'] = constant.DEFAULT_DB_DRYRUN_URL\n else:\n if not config.get('db_url', None):\n config['db_url'] = constant.DEFAULT_DB_PROD_URL\n logger.info('Dry run is disabled')\n\n if config.get('forcebuy_enable', False):\n logger.warning('`forcebuy` RPC message enabled.')\n\n # Setting max_open_trades to infinite if -1\n if config.get('max_open_trades') == -1:\n config['max_open_trades'] = float('inf')\n\n logger.info(f'Using DB: \"{config[\"db_url\"]}\"')\n\n # Check if the exchange set by the user is supported\n self.check_exchange(config)\n\n return config", "def load(self, configs, container):\n pass;", "def load_standard_parameters(self):\n paradic = {'x':'0',\n 'y':'0',\n 'n_oct':'8',\n 'n_spo':'3',\n 'sigma_min':'0.8',\n 'delta_min':'0.5',\n 'sigma_in':'0.5',\n 'C_DoG':'0.015',\n 'C_edge':'10',\n 'n_bins':'36',\n 'lambda_ori':'1.5',\n 't':'0.8',\n 'n_hist':'4',\n 'n_ori':'8',\n 'lambda_descr':'6',\n 'flag_match':'1',\n 'C_match':'0.6'}\n self.cfg['param']['paradic'] = paradic\n self.cfg.save()", "def set_training_parameters(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n self.configure_steps(config, len_train, len_test)\n self.configure_reporting(config)\n self.configure_training_functions(config)", "def configure(self, config):\n super(MemoryPersistence, self).configure(config.with_default_tuples(\"options.path\", \"\"))", "def manage_config() -> dict:\n required_args = {\"embedding_size\", \"hidden_size\", \"num_layers\", \"corpus_dir\"}\n arg_groups = {\n \"general\": {\"recoding_type\"},\n \"model\": {\"embedding_size\", \"hidden_size\", \"num_layers\", \"dropout\"},\n \"train\": {\"weight_decay\", \"learning_rate\", \"batch_size\", \"num_epochs\", \"clip\", \"print_every\", \"eval_every\",\n \"model_save_path\", \"device\", \"model_name\"},\n \"logging\": {\"log_dir\"},\n \"corpus\": {\"corpus_dir\", \"max_seq_len\"},\n \"recoding\": {\"step_type\", \"num_samples\", \"mc_dropout\", \"prior_scale\", \"hidden_size\", \"weight_decay\",\n \"data_noise\", \"share_anchor\", \"use_cross_entropy\"},\n \"step\": {\"predictor_layers\", \"window_size\", \"step_size\", \"hidden_size\"}\n }\n argparser = init_argparser()\n config_object = ConfigSetup(argparser, required_args, arg_groups)\n config_dict = config_object.config_dict\n\n return config_dict", "def truncation(self, truncate: int) -> None:\n self._truncate = truncate", "def _load_edge_config(self, config: Dict[str, Any]) -> Dict[str, Any]:\n\n # If --timerange is used we add it to the configuration\n if 'timerange' in self.args and self.args.timerange:\n config.update({'timerange': self.args.timerange})\n logger.info('Parameter --timerange detected: %s ...', self.args.timerange)\n\n # If --timerange is used we add it to the configuration\n if 'stoploss_range' in self.args and self.args.stoploss_range:\n txt_range = eval(self.args.stoploss_range)\n config['edge'].update({'stoploss_range_min': txt_range[0]})\n config['edge'].update({'stoploss_range_max': txt_range[1]})\n config['edge'].update({'stoploss_range_step': txt_range[2]})\n logger.info('Parameter --stoplosses detected: %s ...', self.args.stoploss_range)\n\n # If -r/--refresh-pairs-cached is used we add it to the configuration\n if 'refresh_pairs' in self.args and self.args.refresh_pairs:\n config.update({'refresh_pairs': True})\n logger.info('Parameter -r/--refresh-pairs-cached detected ...')\n\n return config", "def default_configs(cls):\n config: dict = super().default_configs()\n\n config.update({\n \"file_ext\": '.txt',\n \"num_sent_per_doc\": -1,\n \"doc_break_str\": None,\n \"column_format\": cls._DEFAULT_FORMAT,\n \"entity_mention_class\": None\n })\n return config", "def __init__(self, config):\n\n # controls for scope logging\n self.vars = None\n self.log = {}\n self.conf = config\n pe.set_default_val(self.conf, 'clip_by_norm', 0.3)", "def config_dict(self):\r\n\r\n config_dict = super().config_dict\r\n\r\n if config_dict[\"figsize\"] == \"auto\":\r\n config_dict[\"figsize\"] = None\r\n elif isinstance(config_dict[\"figsize\"], str):\r\n config_dict[\"figsize\"] = tuple(\r\n map(int, config_dict[\"figsize\"][1:-1].split(\",\"))\r\n )\r\n\r\n return config_dict", "def load_config(self):\n pass", "def load_parameters(self):\n with open(INTERNAL_DATA_DIR / self.name_default_params, 'r') as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "def spread_default_parameters(config, dev_cfg):\n def_cfg = config.get('DEFAULT')\n if def_cfg is None:\n return\n\n for (key, value) in def_cfg.items():\n if key not in dev_cfg:\n dev_cfg[key] = value", "def set_default_params(self, opt):\n self.config.embed_dim = opt.embed_dim or 200\n self.config.rnn_size = opt.rnn_size or 512\n self.config.nrnn_layer = opt.nrnn_layer or 2\n self.config.rnn_dropout = opt.rnn_dropout or 0.5\n self.config.rnnout_dim = 2 * self.config.rnn_size * self.config.nrnn_layer\n ## MULTIMODAL (ATTENTION)\n self.config.cnnout_dim = opt.cnnout_dim or 512\n self.config.cnnout_w = opt.cnnout_w or 14\n self.config.cnnout_h = opt.cnnout_h or 14\n self.config.cnnout_spat = self.config.cnnout_w * self.config.cnnout_h\n self.config.multfeat_dim = opt.multfeat_dim or 512\n self.config.attfeat_dim = opt.attfeat_dim or 256\n self.config.netout_dim = opt.answer_size\n ## [attlstm] in: {2*multfeat_dim, att_rnn_s_dim} {att_rnn_size, att_rnn_s_dim}\n self.config.att_rnn_size = opt.att_rnn_size or 512\n self.config.att_rnn_nlayer = opt.att_rnn_nlayer or 1\n self.config.att_rnn_dropout = opt.att_rnn_dropout or 0.0\n # TODO: There could be a protential bugs if self.config.att_rnn_nlayer > 1\n assert(self.config.att_rnn_nlayer == 1)\n self.config.att_rnn_s_dim = self.config.att_rnn_size * self.config.att_rnn_nlayer\n\n # optimization\n self.config.max_grad_norm = opt.max_grad_norm or 0.1\n self.config.initializer_scale = 0.008", "def _preprocess_config(self, config: Dict[str, Any]) -> Dict[str, Any]:\n return cast_config_values(\n {k: v for k, v in config.items() if k in self._hyperparameter_keys},\n config_space=self.config_space,\n )", "def _load_backtesting_config(self, config: Dict[str, Any]) -> Dict[str, Any]:\n\n # If -i/--ticker-interval is used we override the configuration parameter\n # (that will override the strategy configuration)\n if 'ticker_interval' in self.args and self.args.ticker_interval:\n config.update({'ticker_interval': self.args.ticker_interval})\n logger.info('Parameter -i/--ticker-interval detected ...')\n logger.info('Using ticker_interval: %s ...', config.get('ticker_interval'))\n\n # If -l/--live is used we add it to the configuration\n if 'live' in self.args and self.args.live:\n config.update({'live': True})\n logger.info('Parameter -l/--live detected ...')\n\n # If --enable-position-stacking is used we add it to the configuration\n if 'position_stacking' in self.args and self.args.position_stacking:\n config.update({'position_stacking': True})\n logger.info('Parameter --enable-position-stacking detected ...')\n\n # If --disable-max-market-positions is used we add it to the configuration\n if 'use_max_market_positions' in self.args and not self.args.use_max_market_positions:\n config.update({'use_max_market_positions': False})\n logger.info('Parameter --disable-max-market-positions detected ...')\n logger.info('max_open_trades set to unlimited ...')\n else:\n logger.info('Using max_open_trades: %s ...', config.get('max_open_trades'))\n\n # If --timerange is used we add it to the configuration\n if 'timerange' in self.args and self.args.timerange:\n config.update({'timerange': self.args.timerange})\n logger.info('Parameter --timerange detected: %s ...', self.args.timerange)\n\n # If --datadir is used we add it to the configuration\n if 'datadir' in self.args and self.args.datadir:\n config.update({'datadir': self._create_datadir(config, self.args.datadir)})\n else:\n config.update({'datadir': self._create_datadir(config, None)})\n logger.info('Using data folder: %s ...', config.get('datadir'))\n\n # If -r/--refresh-pairs-cached is used we add it to the configuration\n if 'refresh_pairs' in self.args and self.args.refresh_pairs:\n config.update({'refresh_pairs': True})\n logger.info('Parameter -r/--refresh-pairs-cached detected ...')\n\n if 'strategy_list' in self.args and self.args.strategy_list:\n config.update({'strategy_list': self.args.strategy_list})\n logger.info('Using strategy list of %s Strategies', len(self.args.strategy_list))\n\n if 'ticker_interval' in self.args and self.args.ticker_interval:\n config.update({'ticker_interval': self.args.ticker_interval})\n logger.info('Overriding ticker interval with Command line argument')\n\n # If --export is used we add it to the configuration\n if 'export' in self.args and self.args.export:\n config.update({'export': self.args.export})\n logger.info('Parameter --export detected: %s ...', self.args.export)\n\n # If --export-filename is used we add it to the configuration\n if 'export' in config and 'exportfilename' in self.args and self.args.exportfilename:\n config.update({'exportfilename': self.args.exportfilename})\n logger.info('Storing backtest results to %s ...', self.args.exportfilename)\n\n return config", "def _check_config(self):\n self._config[\"dataset_name\"] = MetaDataset(self._config[\"dataset_name\"])\n self._config[\"embedding_crop\"] = EmbeddingCrop(\n self._config[\"embedding_crop\"])\n if self._config[\"dataset_name\"] == MetaDataset.TIERED:\n error_message = \"embedding_crop: {} not supported for {}\".format(\n self._config[\"embedding_crop\"], self._config[\"dataset_name\"])\n assert self._config[\n \"embedding_crop\"] == EmbeddingCrop.CENTER, error_message", "def load_user_config(self, presentation_file, media_root, config=None):\r\n\r\n self.config['PRESENTATION_FILE'] = presentation_file\r\n self.config['MEDIA_ROOT'] = media_root\r\n\r\n if config:\r\n self.config.from_pyfile(config)", "def add_fixed_parameters_from_config_file(self, config_file):\n pass", "def configure(self, config: ConfigParams):\n self.__mode = config.get_as_string_with_default('mode', self.__mode)\n self.__min_timeout = config.get_as_integer_with_default('min_timeout', self.__min_timeout)\n self.__max_timeout = config.get_as_integer_with_default('max_timeout', self.__max_timeout)", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)" ]
[ "0.5405735", "0.5393666", "0.5269585", "0.5220231", "0.5169489", "0.5156765", "0.5130496", "0.5035296", "0.50338167", "0.5030548", "0.49979058", "0.49922258", "0.498201", "0.4959689", "0.4950035", "0.49354002", "0.4933724", "0.49333996", "0.4876323", "0.485839", "0.48395112", "0.4829363", "0.48278263", "0.48194793", "0.48182085", "0.481257", "0.48118582", "0.48046315", "0.4765788", "0.47631797" ]
0.7070541
0
Return a telescope object out of the input (either `ProductManager`, `BeamTransfer` or `TransitTelescope`).
def get_telescope(obj): from drift.core import telescope try: return get_beamtransfer(obj).telescope except RuntimeError: if isinstance(obj, telescope.TransitTelescope): return obj raise RuntimeError("Could not get telescope instance out of %s" % repr(obj))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_beamtransfer(obj):\n from drift.core import manager, beamtransfer\n\n if isinstance(obj, beamtransfer.BeamTransfer):\n return obj\n\n if isinstance(obj, manager.ProductManager):\n return obj.beamtransfer\n\n raise RuntimeError(\"Could not get BeamTransfer instance out of %s\" % repr(obj))", "def setup(self):\n\n import os\n\n from drift.core import beamtransfer\n\n if not os.path.exists(self.product_directory):\n raise RuntimeError(\"BeamTransfers do not exist.\")\n\n bt = beamtransfer.BeamTransfer(self.product_directory)\n\n tel = bt.telescope\n\n try:\n return tel, bt, tel.feeds\n except AttributeError:\n return tel, bt", "def __init__(\n self, telescope: Union[str, ITelescope], pointing: Union[Dict[str, Any], SkyFlatsBasePointing], **kwargs: Any\n ):\n Module.__init__(self, **kwargs)\n\n # store telescope and pointing\n self._telescope = telescope\n self._pointing = pointing", "def telescope(self):\n return _coordsys.coordsys_telescope(self)", "def transformer_factory(tree, options):\n channel_name = tree.xpath(\"/channel/name\")\n if channel_name[0].text == 'PHEME_http_receiver':\n return PHEME_http_receiverTransferAgent(tree, options)\n return CommonTransferAgent(tree, options)", "def getTelescope(self):\n return self.header['TELESCOP']", "def __init__(self, donorReference='', kind=\"other\", receiverReference='', serviceUnitsError=0.0, diverseReference='', serviceUnitsEnergy=0.0, reversedId='', PricingStructure=None, line=None, UserAttributes=None, AuxiliaryAccount=None, VendorShift=None, Receipt=None, Meter=None, CustomerAccount=None, CashierShift=None, *args, **kw_args):\n #: Reference to the entity that is the source of 'amount' (for example: customer for token purchase; or supplier for free issue token).\n self.donorReference = donorReference\n\n #: Kind of transaction. Values are: \"other\", \"serviceChargePayment\", \"accountPayment\", \"tokenSalePayment\", \"tokenCancellation\", \"taxChargePayment\", \"tokenExchange\", \"tokenGrant\", \"diversePayment\", \"auxiliaryChargePayment\", \"meterConfigurationToken\", \"tokenFreeIssue\", \"transactionReversal\"\n self.kind = kind\n\n #: Reference to the entity that is the recipient of 'amount' (for example, supplier for service charge payment; or tax receiver for VAT).\n self.receiverReference = receiverReference\n\n #: Number of service units not reflected in 'serviceUnitsEnergy' due to process rounding or truncating errors.\n self.serviceUnitsError = serviceUnitsError\n\n #: Formal reference for use with diverse payment (traffic fine for example).\n self.diverseReference = diverseReference\n\n #: Actual amount of service units that is being paid for.\n self.serviceUnitsEnergy = serviceUnitsEnergy\n\n #: (if 'kind' is transactionReversal) Reference to the original transaction that is being reversed by this transaction.\n self.reversedId = reversedId\n\n self._PricingStructure = None\n self.PricingStructure = PricingStructure\n\n self.line = line\n\n self._UserAttributes = []\n self.UserAttributes = [] if UserAttributes is None else UserAttributes\n\n self._AuxiliaryAccount = None\n self.AuxiliaryAccount = AuxiliaryAccount\n\n self._VendorShift = None\n self.VendorShift = VendorShift\n\n self._Receipt = None\n self.Receipt = Receipt\n\n self._Meter = None\n self.Meter = Meter\n\n self._CustomerAccount = None\n self.CustomerAccount = CustomerAccount\n\n self._CashierShift = None\n self.CashierShift = CashierShift\n\n super(Transaction, self).__init__(*args, **kw_args)", "def test_create_platfrom(self):\n # procedure object\n proc1 = Procedure(\"procedure 1\", \"proc1\")\n proc2 = Procedure(\"procedure 2\", \"proc2\")\n # list of procedures\n proList = [proc1, proc2]\n # observable property object\n obs1 = ObservableProperty(\"obs-property1\", \"obs-property\")\n obs2 = ObservableProperty(\"obs-property2\", \"obs-property2\")\n obs3 = ObservableProperty(\"obs-property3\", \"obs-property3\")\n # list of observable properties\n obsList = [obs1, obs2]\n obsList2 =[obs1,obs2]\n # sensor object\n s1 = Sensor(\"Sensor 1\", \"first sensor\", obsList, proList)\n s2 = Sensor(\"Sensor 2\", \"second sensor\", obsList2, proList)\n s3 = Sensor(\"Sensor 3\", \"second sensor\", obsList2, proList)\n act1 = Actuator(\"Actuator 1\", \"first actuator\",[],[])\n act2 = Actuator(\"Actuator 2\", \"second actuator\",[],[])\n act3 = Actuator(\"Actuator 3\", \"third actuator\",[],[])\n #list of actuators\n actList =[act1,act2,act3]\n #list of sensors\n senList = [s1,s2]\n # platform object\n p1 = Platform(\"platform 1\", \"p1\", senList, actList,[])\n p1.add_sensor(s3)\n\n this_graph = cfg.get_graph()\n #print(this_graph.serialize(format='turtle'))\n print(this_graph.serialize(format=\"ttl\").decode('utf-8'))", "def transport(self) -> Optional[pulumi.Input['TransportArgs']]:\n return pulumi.get(self, \"transport\")", "async def connection_factory(*args, **kwargs):\n if args[1] == \"5L\":\n protocol.telegram = {\n LUXEMBOURG_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject(\n [{\"value\": \"123456789\", \"unit\": \"\"}]\n ),\n }\n if args[1] == \"5S\":\n protocol.telegram = {\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n if args[1] == \"Q3D\":\n protocol.telegram = {\n Q3D_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n }\n\n return (transport, protocol)", "def get_main_object(tc):\n return Daal(tc)", "def nominal_to_telescope(norm_coord,tel_frame):\n alt_tel,az_tel = tel_frame.pointing_direction\n alt_norm,az_norm = norm_coord.array_direction\n\n alt_trans,az_trans = offset_to_altaz(norm_coord.x,norm_coord.y,az_norm,alt_norm)\n x,y = altaz_to_offset(az_trans,alt_trans,az_tel,alt_tel)\n x = x*u.rad\n y = y*u.rad\n\n representation = CartesianRepresentation(x.to(norm_coord.x.unit),y.to(norm_coord.x.unit),0*norm_coord.x.unit)\n\n return tel_frame.realize_frame(representation)", "def create_device(cls, dev):\n obj = super().__new__(cls)\n if isinstance(dev, Device):\n obj.sycl_queue_ = dev.sycl_queue\n elif isinstance(dev, dpctl.SyclQueue):\n obj.sycl_queue_ = dev\n elif isinstance(dev, dpctl.SyclDevice):\n par = dev.parent_device\n if par is None:\n obj.sycl_queue_ = dpctl.SyclQueue(dev)\n else:\n raise ValueError(\n \"Using non-root device {} to specify offloading \"\n \"target is ambiguous. Please use dpctl.SyclQueue \"\n \"targeting this device\".format(dev)\n )\n else:\n if dev is None:\n obj.sycl_queue_ = dpctl.SyclQueue()\n else:\n obj.sycl_queue_ = dpctl.SyclQueue(dev)\n return obj", "def cast(self):\n if self.validate():\n if 'blueprint' in self.data:\n # A single blueprint\n obj = Blueprint.Blueprint()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n elif 'blueprint-book' in self.data:\n # A book of blueprints\n obj = BlueprintBook.BlueprintBook()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n else:\n # Unknown datatype. Just return the object\n return self\n \n else:\n # Broken validation means just return the object\n return self", "def transport(cls):\n cls.slot_index = len(TransportTypes)\n TransportTypes.append(cls)\n return cls", "def subject(decoy: Decoy, hardware_api: HardwareControlAPI) -> EngineStore:\n return EngineStore(\n hardware_api=hardware_api,\n # Arbitrary choice of robot and deck type. Tests where these matter should\n # construct their own EngineStore.\n robot_type=\"OT-2 Standard\",\n deck_type=pe_types.DeckType.OT2_SHORT_TRASH,\n )", "def camera_to_telescope(camera_coord, telescope_frame):\n x_pos = camera_coord.cartesian.x\n y_pos = camera_coord.cartesian.y\n\n rot = telescope_frame.rotation\n if rot ==0:\n x=x_pos\n y=y_pos\n else:\n x = x_pos*cos(rot) - y_pos*sin(rot)\n y = y_pos*sin(rot) + y_pos*cos(rot)\n\n f = telescope_frame.focal_length\n\n x = (x/f) * u.deg\n y = (y/f) * u.deg\n representation = CartesianRepresentation(x,y,0*u.deg)\n\n return telescope_frame.realize_frame(representation)", "def create_transport(beaver_config, file_config, logger):\n if beaver_config.get('transport') == 'rabbitmq':\n import beaver.rabbitmq_transport\n transport = beaver.rabbitmq_transport.RabbitmqTransport(beaver_config, file_config, logger)\n elif beaver_config.get('transport') == 'redis':\n import beaver.redis_transport\n transport = beaver.redis_transport.RedisTransport(beaver_config, file_config, logger)\n elif beaver_config.get('transport') == 'stdout':\n import beaver.stdout_transport\n transport = beaver.stdout_transport.StdoutTransport(beaver_config, file_config, logger)\n elif beaver_config.get('transport') == 'udp':\n import beaver.udp_transport\n transport = beaver.udp_transport.UdpTransport(beaver_config, file_config, logger)\n elif beaver_config.get('transport') == 'zmq':\n import beaver.zmq_transport\n transport = beaver.zmq_transport.ZmqTransport(beaver_config, file_config, logger)\n else:\n raise Exception('Invalid transport {0}'.format(beaver_config.get('transport')))\n\n return transport", "def __init__(self, init=None, data=None, dq=None, err=None, dq_def=None,\n filt=None, temperature=None, **kwargs):\n super(MiriTelescopeEmissionModel, self).__init__(init=init, data=data,\n dq=dq, err=err,\n dq_def=dq_def,\n **kwargs)\n \n # Data type is telescope emission map.\n self.meta.reftype = 'TEL_EMISSION'\n model_type = get_my_model_type( self.__class__.__name__ )\n if model_type is not None:\n self.meta.model_type = model_type \n\n # This is a reference data model.\n self._reference_model()\n\n # Add filter and temperature to the metadata\n if filt is not None:\n self.meta.instrument.filter = filt\n if temperature is not None:\n self.meta.telescope_temperature = temperature", "def create_from(cls, vehicle: \"ControlledVehicle\") -> \"ControlledVehicle\":\n v = cls(\n vehicle.traffic_mgr,\n vehicle.position,\n heading=vehicle.heading,\n speed=vehicle.speed,\n target_lane_index=vehicle.target_lane_index,\n target_speed=vehicle.target_speed,\n route=vehicle.route\n )\n return v", "def buildProtocol(self, addr):\n protocol = PortalBot()\n protocol.factory = self\n protocol.nickname = self.nickname\n protocol.channel = self.channel\n protocol.network = self.network\n protocol.port = self.port\n protocol.ssl = self.ssl\n protocol.nicklist = []\n return protocol", "def get_transport(self, flags):\n flags &= ~(ChannelCaps.BROADCAST)\n for tr in self.transports:\n if tr is not None and (flags & tr.capabilities) == 0:\n return tr\n return None", "def telescope_to_nominal(tel_coord,norm_frame):\n alt_tel,az_tel = tel_coord.pointing_direction\n alt_norm,az_norm = norm_frame.array_direction\n\n alt_trans,az_trans = offset_to_altaz(tel_coord.x,tel_coord.y,az_tel,alt_tel)\n x,y = altaz_to_offset(az_trans,alt_trans,az_norm,alt_norm)\n x = x*u.rad\n y = y*u.rad\n\n representation = CartesianRepresentation(x.to(tel_coord.x.unit),y.to(tel_coord.x.unit),0*tel_coord.x.unit)\n\n return norm_frame.realize_frame(representation)", "def Instance(self) -> TypeManager:", "def init_device(devtype, armdev):\n\n if devtype == 'SM5':\n try:\n dev = LuigsNeumann_SM5('COM3')\n devmic = Leica()\n microscope = XYMicUnit(dev, devmic, [7, 8])\n except:\n raise SerialException(\"L&N SM-5 not found.\")\n elif devtype == 'SM10':\n try:\n dev = LuigsNeumann_SM10()\n microscope = XYZUnit(dev, [7, 8, 9])\n except SerialException:\n raise SerialException(\"L&N SM-10 not found.\")\n else:\n raise SerialException(\"No supported device detected\")\n\n if armdev == 'dev1':\n arm = XYZUnit(dev, [1, 2, 3])\n elif armdev == 'dev2':\n arm = XYZUnit(dev, [4, 5, 6])\n elif armdev == 'Arduino':\n try:\n # arduino = appel classe manipulateur arduino\n #arm = XYZUnit(arduino, [1, 2, 3])\n arm = 0\n except SerialException:\n raise SerialException(\"Arduino not found.\")\n else:\n raise NameError('Unknown device for arm control.')\n\n # Adjust ramp length for accuracy\n microscope.set_ramp_length([0, 1, 2], 3)\n arm.set_ramp_length([0, 1, 2], 3)\n\n return dev, microscope, arm", "def create_target_api( self, prx=None ):\n commer = rts2comm()\n try:\n targ = commer.get_target(self.name)\n except Exception as err:\n if self.name.endswith(\"target\"):\n raise NameError(\"Target can not end with `target'\")\n else:\n raise\n\n if targ is None: #target does not exist\n ra = Angle( self.ra, unit=u.hour )\n dec = Angle( self.dec, unit=u.deg )\n\n targid = commer.create_target( self.name, ra.deg, dec.deg )\n\n else:\n targid = targ[0][0]\n\n\n\n return targid", "def robot(self):\n return equipment_module.Equipment(\n self._get_attr('extraction_robot_id'))", "def __init__(self, source, product, dag, name, params=None):\n self._params = params or {}\n self._name = name\n self._source = self._init_source(source)\n\n if dag is None:\n raise TypeError('DAG cannot be None')\n\n self.dag = dag\n dag._add_task(self)\n\n if self._source is None:\n raise TypeError('_init_source must return a value, got None')\n\n if not isinstance(self._source, Source):\n raise TypeError('_init_source must return a subclass of Source')\n\n if isinstance(product, Product):\n self._product = product\n\n if self.PRODUCT_CLASSES_ALLOWED is not None:\n if not isinstance(self._product, self.PRODUCT_CLASSES_ALLOWED):\n raise TypeError('{} only supports the following product '\n 'classes: {}, got {}'\n .format(type(self).__name__,\n self.PRODUCT_CLASSES_ALLOWED,\n type(self._product).__name__))\n else:\n # if assigned a tuple/list of products, create a MetaProduct\n self._product = MetaProduct(product)\n\n if self.PRODUCT_CLASSES_ALLOWED is not None:\n if not all(isinstance(p, self.PRODUCT_CLASSES_ALLOWED)\n for p in self._product):\n raise TypeError('{} only supports the following product '\n 'classes: {}, got {}'\n .format(type(self).__name__,\n self.PRODUCT_CLASSES_ALLOWED,\n type(self._product).__name__))\n\n self._logger = logging.getLogger('{}.{}'.format(__name__,\n type(self).__name__))\n\n self.product.task = self\n self.client = None\n\n self._status = TaskStatus.WaitingRender\n self.build_report = None\n self._on_finish = None\n self._on_failure = None", "def __init__(self, antenna=None, board=None, context=None, delay_timing_info=None, frequency=None, fsk_modulation_info=None, gateway_id=None, gps_epoch_timing_info=None, immediately_timing_info=None, lora_modulation_info=None, modulation=None, power=None, timing=None): # noqa: E501 # noqa: E501\n\n self._antenna = None\n self._board = None\n self._context = None\n self._delay_timing_info = None\n self._frequency = None\n self._fsk_modulation_info = None\n self._gateway_id = None\n self._gps_epoch_timing_info = None\n self._immediately_timing_info = None\n self._lora_modulation_info = None\n self._modulation = None\n self._power = None\n self._timing = None\n self.discriminator = None\n\n if antenna is not None:\n self.antenna = antenna\n if board is not None:\n self.board = board\n if context is not None:\n self.context = context\n if delay_timing_info is not None:\n self.delay_timing_info = delay_timing_info\n if frequency is not None:\n self.frequency = frequency\n if fsk_modulation_info is not None:\n self.fsk_modulation_info = fsk_modulation_info\n if gateway_id is not None:\n self.gateway_id = gateway_id\n if gps_epoch_timing_info is not None:\n self.gps_epoch_timing_info = gps_epoch_timing_info\n if immediately_timing_info is not None:\n self.immediately_timing_info = immediately_timing_info\n if lora_modulation_info is not None:\n self.lora_modulation_info = lora_modulation_info\n if modulation is not None:\n self.modulation = modulation\n if power is not None:\n self.power = power\n if timing is not None:\n self.timing = timing", "def fromTrapezoid(cls, trap):\n if isinstance(trap, cls):\n new = type(trap)()\n new.vertices = cls.newVertices(trap.vertices)\n new.size = trap.size\n new.clockwise = trap.clockwise\n return new\n raise TypeError(\n \"TypeError:\\tExpected Trapezoid, \"+\n f\"received {type(trap).__name__}\"\n )" ]
[ "0.59094083", "0.55723965", "0.52075577", "0.49067524", "0.4869637", "0.47760263", "0.46497482", "0.46300894", "0.45779586", "0.45738086", "0.4571514", "0.4460605", "0.44365323", "0.4436234", "0.44346416", "0.44056034", "0.43759432", "0.43554714", "0.4353904", "0.43461516", "0.4338783", "0.4338763", "0.43346506", "0.4289909", "0.42841122", "0.4283856", "0.42828378", "0.4268637", "0.426838", "0.42369333" ]
0.7030609
0
Return a BeamTransfer object out of the input (either `ProductManager`, `BeamTransfer`).
def get_beamtransfer(obj): from drift.core import manager, beamtransfer if isinstance(obj, beamtransfer.BeamTransfer): return obj if isinstance(obj, manager.ProductManager): return obj.beamtransfer raise RuntimeError("Could not get BeamTransfer instance out of %s" % repr(obj))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self):\n\n import os\n\n from drift.core import beamtransfer\n\n if not os.path.exists(self.product_directory):\n raise RuntimeError(\"BeamTransfers do not exist.\")\n\n bt = beamtransfer.BeamTransfer(self.product_directory)\n\n tel = bt.telescope\n\n try:\n return tel, bt, tel.feeds\n except AttributeError:\n return tel, bt", "def cast(self):\n if self.validate():\n if 'blueprint' in self.data:\n # A single blueprint\n obj = Blueprint.Blueprint()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n elif 'blueprint-book' in self.data:\n # A book of blueprints\n obj = BlueprintBook.BlueprintBook()\n obj.versionCode = self.versionCode\n obj.data = self.data\n return obj\n else:\n # Unknown datatype. Just return the object\n return self\n \n else:\n # Broken validation means just return the object\n return self", "def transfer(self, *args, **kwargs):\n return self.dpay.transfer(*args, **kwargs)", "def transformer_factory(tree, options):\n channel_name = tree.xpath(\"/channel/name\")\n if channel_name[0].text == 'PHEME_http_receiver':\n return PHEME_http_receiverTransferAgent(tree, options)\n return CommonTransferAgent(tree, options)", "def transfer_from_result(cls, result: JSON, **kwargs: Any) -> Transfer:\n res = cls.normalize_transfer_result(result)\n res.update(kwargs)\n\n return Transfer(\n hash=to_bytes(hexstr=res['hash']),\n sender=to_checksum_address(res['sender']),\n success=res['success'],\n timestamp=int(res['timestamp']),\n to=to_checksum_address(res['to']),\n token=to_checksum_address(res['token']),\n value=int(res['value']),\n )", "def _construct(self, dataflow):\n dataflow = copy_dataflow(dataflow, self.container)\n return dataflow", "def create_transfer(self, src, dest, **kw):\n _submission_id = self._get_submission_id()\n if not _submission_id:\n logger.error(\"Globus: Unable to obtain Globus transfer submission ID\")\n return None\n _transfer = api_client.Transfer(_submission_id, src, dest,\n notify_on_succeeded=False, notify_on_failed=False, notify_on_inactive=False, **kw)\n self.transfer = _transfer\n return _transfer", "def get_transfer(self):\n return self._transfer", "def transfer_to(self, other):\n # type: (ImmutableJar) -> tuple\n if isinstance(other, ImmutableJar):\n other_potential = other.capacity - other.units\n transfer = min(other_potential, self.units)\n return (ImmutableJar(capacity=self.capacity,\n units=(self.units - transfer)),\n ImmutableJar(capacity=other.capacity,\n units=(other.units + transfer)),)\n return NotImplemented", "def _to_storage_model(self, store, result):\n file_dict = result.as_dict()\n file_dict.pop('object_type')\n file_dict['store'] = store\n return StorageModel(**file_dict)", "def transfer_from(self, other):\n # type: (ImmutableJar) -> tuple\n (jar2, jar1,) = self.transfer_to(other)\n return jar1, jar2,", "def transduce(this, xform):\n return Transduce(this, xform)", "def Transfer(\n vendor=\"aws\", bucket=None, use_encrpytion=True, ignore_prefix=False, profile=None, config=None\n):\n return setup_vendor(\n vendor,\n bucket=bucket,\n use_encryption=use_encrpytion,\n ignore_prefix=ignore_prefix,\n profile=profile,\n config=config\n )", "def retrieveTransferSubscription():\n if GlobalValues._transferSubscription == None:\n GlobalValues._transferSubscription = \\\n _getSubscription(Workflow(spec = \"FileTransfer\", \n owner = \"CMSTier0\",\n name = \"FileTransfer\"),\n Fileset(name = \"Transferable\")\n )\n \n return GlobalValues._transferSubscription", "def __init__(self, source, product, dag, name, params=None):\n self._params = params or {}\n self._name = name\n self._source = self._init_source(source)\n\n if dag is None:\n raise TypeError('DAG cannot be None')\n\n self.dag = dag\n dag._add_task(self)\n\n if self._source is None:\n raise TypeError('_init_source must return a value, got None')\n\n if not isinstance(self._source, Source):\n raise TypeError('_init_source must return a subclass of Source')\n\n if isinstance(product, Product):\n self._product = product\n\n if self.PRODUCT_CLASSES_ALLOWED is not None:\n if not isinstance(self._product, self.PRODUCT_CLASSES_ALLOWED):\n raise TypeError('{} only supports the following product '\n 'classes: {}, got {}'\n .format(type(self).__name__,\n self.PRODUCT_CLASSES_ALLOWED,\n type(self._product).__name__))\n else:\n # if assigned a tuple/list of products, create a MetaProduct\n self._product = MetaProduct(product)\n\n if self.PRODUCT_CLASSES_ALLOWED is not None:\n if not all(isinstance(p, self.PRODUCT_CLASSES_ALLOWED)\n for p in self._product):\n raise TypeError('{} only supports the following product '\n 'classes: {}, got {}'\n .format(type(self).__name__,\n self.PRODUCT_CLASSES_ALLOWED,\n type(self._product).__name__))\n\n self._logger = logging.getLogger('{}.{}'.format(__name__,\n type(self).__name__))\n\n self.product.task = self\n self.client = None\n\n self._status = TaskStatus.WaitingRender\n self.build_report = None\n self._on_finish = None\n self._on_failure = None", "def __await__(self) -> \"Generator[Get[_Product], None, _Product]\":\n result = yield self\n return cast(_Product, result)", "def get(cls) -> BombFactory:\n activity = ba.getactivity()\n factory = activity.customdata.get(cls._STORENAME)\n if factory is None:\n factory = BombFactory()\n activity.customdata[cls._STORENAME] = factory\n assert isinstance(factory, BombFactory)\n return factory", "def bmesh_copy_from_object(obj, transform=True, triangulate=True, apply_modifiers=False):\n\n assert obj.type == 'MESH'\n\n if apply_modifiers and obj.modifiers:\n import bpy\n depsgraph = bpy.context.evaluated_depsgraph_get()\n obj_eval = obj.evaluated_get(depsgraph)\n me = obj_eval.to_mesh()\n bm = bmesh.new()\n bm.from_mesh(me)\n obj_eval.to_mesh_clear()\n else:\n me = obj.data\n if obj.mode == 'EDIT':\n bm_orig = bmesh.from_edit_mesh(me)\n bm = bm_orig.copy()\n else:\n bm = bmesh.new()\n bm.from_mesh(me)\n\n # TODO. remove all customdata layers.\n # would save ram\n\n if transform:\n bm.transform(obj.matrix_world)\n\n if triangulate:\n bmesh.ops.triangulate(bm, faces=bm.faces)\n\n return bm", "def create_transport(beaver_config, file_config, logger):\n if beaver_config.get('transport') == 'rabbitmq':\n import beaver.rabbitmq_transport\n transport = beaver.rabbitmq_transport.RabbitmqTransport(beaver_config, file_config, logger)\n elif beaver_config.get('transport') == 'redis':\n import beaver.redis_transport\n transport = beaver.redis_transport.RedisTransport(beaver_config, file_config, logger)\n elif beaver_config.get('transport') == 'stdout':\n import beaver.stdout_transport\n transport = beaver.stdout_transport.StdoutTransport(beaver_config, file_config, logger)\n elif beaver_config.get('transport') == 'udp':\n import beaver.udp_transport\n transport = beaver.udp_transport.UdpTransport(beaver_config, file_config, logger)\n elif beaver_config.get('transport') == 'zmq':\n import beaver.zmq_transport\n transport = beaver.zmq_transport.ZmqTransport(beaver_config, file_config, logger)\n else:\n raise Exception('Invalid transport {0}'.format(beaver_config.get('transport')))\n\n return transport", "def from_flow(cls, flow: SequenceFlow, lane, backtrack_to, indent):\n instance = cls(\n spec_id=flow.id,\n name=flow.name,\n description=flow.name,\n lane=lane,\n backtrack_to=backtrack_to,\n indent=indent\n )\n instance.set_spec_type(flow)\n return instance", "def create_from_proposal(cls, proposal):\n obj = cls()\n obj.load_from_proposal(proposal)\n return obj", "def setup(self):\n\n import os\n\n from drift.core import manager\n\n if not os.path.exists(self.product_directory):\n raise RuntimeError(\"Products do not exist.\")\n\n # Load ProductManager and Timestream\n pm = manager.ProductManager.from_config(self.product_directory)\n\n return pm", "def get_instance(self, project, parameters):\n\t\t\n\t\tparameters = project.process_node_parameters(\n\t\t\tparameters,\n\t\t\t[\"destination\", \"from\", \"to\"],\n\t\t\t{\"replace\": False, \"retry\": 1},\n\t\t\t{\"destination\": \"variable_name\", \"from\": \"non_empty_string\", \"to\": \"non_empty_string\", \"replace\": \"boolean\", \"retry\": \"integer\"}\n\t\t\t)\n\n\t\treturn DownloadCommand(project, parameters[\"destination\"], parameters[\"from\"], parameters[\"to\"], parameters[\"replace\"], parameters[\"retry\"])", "def _create_transfer(self, m, comp, prod_name):\n name = comp.name\n # transfer functions\n # e.g. 2A + 3B -> 1C + 2E\n # get linear coefficients\n # TODO this could also take a transfer function from an external Python function assuming\n # we're careful about how the expression-vs-float gets used\n # and figure out how to handle multiple ins, multiple outs\n ratios = self._get_transfer_coeffs(m, comp)\n ref_r, ref_name, _ = ratios.pop('__reference', (None, None, None))\n for resource, ratio in ratios.items():\n r = m.resource_index_map[comp][resource]\n rule_name = '{c}_{r}_{fr}_transfer'.format(c=name, r=resource, fr=ref_name)\n rule = partial(self._transfer_rule, ratio, r, ref_r, prod_name) # XXX\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, rule_name, constr)", "def createCasaTool(mytool):\n if (type(casac.Quantity) != type): # casa 4.x\n myt = mytool()\n else: # casa 3.x\n myt = mytool.create()\n return(myt)", "def _from_other(cls, obj):", "def create_mediatedtransfer(self, transfer_initiator, transfer_target, fee,\n amount, expiration, hashlock):\n\n locked_transfer = self.create_lockedtransfer(\n amount,\n expiration,\n hashlock,\n )\n\n mediated_transfer = locked_transfer.to_mediatedtransfer(\n transfer_target,\n transfer_initiator,\n fee,\n )\n return mediated_transfer", "def storage_factory():\n return storage(transaction.manager, **kwargs)", "def factory(name: str, properties: dict, node: FtpNode = None) -> FtpObjectBase:\n properties = defaultdict(int, properties)\n if properties[\"type\"].lower() == \"file\":\n return FtpFile(\n name,\n properties[\"type\"],\n properties[\"size\"],\n properties[\"modify\"],\n node=node,\n )\n if properties[\"type\"].lower() == \"dir\":\n return FtpDirectory(\n name,\n properties[\"type\"],\n properties[\"size\"],\n properties[\"modify\"],\n node=node,\n )", "def GetTransferParamTool(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_WireDivide_GetTransferParamTool(self, *args)" ]
[ "0.5573253", "0.50620794", "0.49997687", "0.4947898", "0.48023778", "0.48023617", "0.47191608", "0.47186506", "0.46855393", "0.46065444", "0.4585532", "0.4478559", "0.4471264", "0.445144", "0.44428593", "0.44386634", "0.4436773", "0.44343606", "0.44192073", "0.44131085", "0.44085488", "0.4379878", "0.43702668", "0.43646777", "0.43508914", "0.4340252", "0.43327257", "0.43291658", "0.4324737", "0.43212542" ]
0.76120263
0
The main function that finds all solutions for a single clue. Uses parser to find all possible parse trees, then calls the solving function for each parsing tree according to its clue type. Returns an ordered list of all possible solutions with score > MIN_SOLUTION VALUE
def solve(clue, solution_format): # Define parser grammar_str = GrammarDefinitions.define_grammar(clue) grammar = nltk.CFG.fromstring(grammar_str) parser = nltk.ChartParser(grammar) solutions = [] # Get all possible solutions for tree in parser.parse(clue): _handle_abbreviations(tree) type = tree[0].label() solutions += SOLVER_DICT[type](tree[0], solution_format) # Filter only relevant solutions solutions.sort(key=lambda x: x[1], reverse=True) solutions = [solution for solution in solutions if solution[1] > MIN_SOLUTION_VALUE and solution[0] not in clue] return solutions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def showSolution(bestTree)->list:\r\n bestSon = bestTree\r\n solved = bestTree.value\r\n minDepth = bestTree.depth\r\n solution = []\r\n while bestSon.sons:\r\n #print(bestSon.state)\r\n solution.append(bestSon.state)\r\n bestSon = getBestSon(bestSon, minDepth)\r\n #print(bestSon.state)\r\n solution.append(bestSon.state)\r\n if solved == 1:\r\n #print(\"Minimum necessary total trips:\", bestSon.depth)\r\n solution.append(minDepth)\r\n else:\r\n solution.append(-1)\r\n return solution", "def solve_complete(puzzle, verbose=False):\n sol = puzzle.extensions()\n s = []\n for i in sol:\n if verbose == True:\n print(i)\n if not i.is_solved():\n solu = solve_complete(i)\n for i in solu:\n s.append(i)\n else:\n s.append(i)\n return s", "def _try_heuristics(f):\n if f.is_ground:\n return []\n if f.is_monomial:\n return [S.Zero]*f.degree()\n\n if f.length() == 2:\n if f.degree() == 1:\n return list(map(cancel, roots_linear(f)))\n else:\n return roots_binomial(f)\n\n result = []\n\n for i in [-1, 1]:\n if not f.eval(i):\n f = f.quo(Poly(f.gen - i, f.gen))\n result.append(i)\n break\n\n n = f.degree()\n\n if n == 1:\n result += list(map(cancel, roots_linear(f)))\n elif n == 2:\n result += list(map(cancel, roots_quadratic(f)))\n elif f.is_cyclotomic:\n result += roots_cyclotomic(f)\n elif n == 3 and cubics:\n result += roots_cubic(f, trig=trig)\n elif n == 4 and quartics:\n result += roots_quartic(f)\n elif n == 5 and quintics:\n result += roots_quintic(f)\n\n return result", "def solve(puzzle, verbose=False):\n sol = puzzle.extensions()\n s = puzzle\n for i in sol:\n if verbose == True:\n print(i)\n if not i.is_solved():\n s = solve(i)\n if s != puzzle:\n break\n else:\n s = i\n break\n return s", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def solve(self, repository, verbose=0):\n self.max_depth = 0\n solutions = []\n for solution in self.solve_all(repository, verbose):\n solutions.append(solution)\n return solutions", "def solver(formula):\n # dictionary initializing output solution\n assignments={}\n\n # check and simplify unit clauses\n for clause in formula:\n # if clause is a unit clause\n if len(clause)==1:\n # extract random literal from clause\n var,val=get_from_set(clause)\n # make assignment such that unit clause is true\n assignments[var] = val\n # update rest of the formula with such assignment\n formula = expand(formula,var,val)\n\n # RECURSION BASE CASE 1: found one of possible solutions\n # NOTE: since I eliminate clauses once satisfied, list is \n # empty when all clauses are satisfied. \n if not formula:\n return assignments\n\n # RECURSION BASE CASE 2: impossible due to contradiction\n # NOTE: if any of the clauses is false, then no solution\n if not all(formula):\n return None\n\n # CORE OF RECURSION: recursive simplification of CNF formula\n var, val = get_from_set(formula[0])\n for attempt in (val, not val): # e.g try True, if no success try False \n assignments[var] = attempt\n new_assignments = solver(expand(formula,var,attempt))\n if new_assignments is not None:\n assignments.update(new_assignments)\n return assignments\n\n # if we get to this line, neither attempt yields a solution\n return None", "def solve_part1(input, verbose=False):\n equations = parse(input)\n\n result = []\n for eq in equations:\n result.append(solve_equation_same_precedence(eq, verbose))\n\n if verbose:\n print(f\"results: {result}\")\n\n return sum(result)", "def solve(num_wizards, num_constraints, wizards, constraints): \n global wiz_const\n wiz_const = mapConstraints(wizards, constraints)\n partial_soltns = []\n\n # counter for priority queue since it doesn't allow \n # identical priorities\n k = 0\n\n # list of wizards sorted by lowest to highest degree\n sorted_wiz = sortWizByConsts(wiz_const)\n wiz_rankings = {wiz: i for i, wiz in enumerate(sorted_wiz)}\n\n const_set = set(map(tuple, constraints))\n for i in range(4) : \n heapq.heappush(partial_soltns, (0, k, nx.DiGraph(), const_set.copy()))\n k += 1\n\n print(\"setup done, commencing solving\")\n\n while len(partial_soltns) : \n\n # for partial_soltn, const_set in partial_soltns : \n# partial_soltns.remove(partial_soltn)\n num_seen, _, partial_soltn, const_set = heapq.heappop(partial_soltns)\n const = findNextConst(partial_soltn, const_set, wiz_rankings)\n print(\"seen \" + str(len(partial_soltn)) + \"\\t num partial_solutions\\t\" + str(len(partial_soltns)))\n try : \n const_set.remove(const)\n except KeyError : \n print(\"BAD SHIT\")\n pass\n possible_arrangements = [(const[0], const[1], const[2]),\n (const[2], const[0], const[1]), \n (const[2], const[1], const[0]),\n (const[1], const[0], const[2])]\n for arr in possible_arrangements:\n soltn = partial_soltn.copy()\n a, b, c = arr\n if not (soltn.has_node(a) and soltn.has_node(b) and nx.has_path(soltn, a, b)) : \n soltn.add_edge(a, b)\n if not (soltn.has_node(b) and soltn.has_node(c) and nx.has_path(soltn, b, c)) : \n soltn.add_edge(b, c)\n # see if we violated any other constraints (seen or not seen)\n is_valid, num_wiz = validNumWiz(soltn, const_set)\n\n if is_valid and len(list(nx.simple_cycles(soltn))) == 0 :\n heapq.heappush(partial_soltns, (-len(soltn), k, soltn, const_set.copy()))\n k += 1\n # are we done?\n if num_wiz == num_wizards :\n print(\"FINAL SOLUTION (found without processing all constraints but validating against them)\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n if foundCompleteOrdering(heapq.heappop(partial_soltns)) : \n print(\"FINAL SOLUTION\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n print(\"NO SOLUTION FOUND\")\n return \"\"", "def solve(self) -> List[Board]:\n # greedy search\n for seq in permutations([i for i in range(self.n)]):\n b = Board(n=self.n)\n for j in range(self.n):\n b.set_queen(at=(j, seq[j]))\n if validate(board=b):\n self.results.append(b)\n # return early if requires taking a solution\n if self.take_one_solution:\n break\n return self.results", "def solution(self):\n return [(\"simple 1\", 1.),\n (\"simple 2\", 1.),\n (\"simple 3\", 1.),\n (\"simple 4\", 1.),\n (\"simple 5\", 1.),\n (\"simple 10\", 1.),\n (\"simple 15\", 1.),\n (\"thai 1\", 1.),\n (\"thai 2\", 1.),\n (\"thai 3\", 1.),\n (\"thai 4\", 1.),\n (\"thai 5\", 1.),\n (\"thai 10\", 1.),\n (\"thai 15\", 1.),\n ]", "def solve(self):\n self.a_tree = self.make_tree(self.a_strings)\n self.b_tree = self.make_tree(self.b_strings)\n\n # apply prefix filter\n self.s_options_for_beginning = self.prefix_filter()\n if len(self.s_options_for_beginning) == 0:\n self.final_sequence = 'IMPOSSIBLE'\n return\n\n # apply postfix filter\n self.s_options_for_ending = self.postfix_filter()\n if len(self.s_options_for_ending) == 0:\n self.final_sequence = 'IMPOSSIBLE'\n return\n\n # apply length balance filter (on the result after prefix and postfix filter)\n self.s_combinations_by_length = self.length_balance_filter(self.s_options_for_beginning,\n self.s_options_for_ending)\n if len(self.s_combinations_by_length) == 0:\n self.final_sequence = 'IMPOSSIBLE'\n return\n\n # apple elements balance filter (on the result after length balance filter)\n self.s_combinations_by_elements = self.elements_balance_filter(self.s_combinations_by_length)\n if len(self.s_combinations_by_elements) == 0:\n self.final_sequence = 'IMPOSSIBLE'\n return\n\n # sort filtered combinations by its elements length in ascending order\n self.s_combinations_filtered = sorted(self.s_combinations_by_elements.items())\n\n # for each combinations length try to find the lexicographically shortest sequence using depth-first search\n for length, combinations in self.s_combinations_filtered:\n for combination in combinations:\n self.combination = combination\n self.dfs()\n\n # return sequence as soon as we got one\n if self.final_sequence:\n return\n\n # otherwise - return 'IMPOSSIBLE'\n if not self.final_sequence:\n self.final_sequence = 'IMPOSSIBLE'\n return", "def run(self):\n while self.proteins:\n \n protein = self.get_next_protein()\n \n # Get the next amino acid in the chain.\n amino_position = protein.get_unplaced_amino_position()\n if amino_position is not None:\n self.build_children(protein, amino_position)\n else:\n self.check_solution(protein)\n\n # Set's the output to be the protein with the highest score.\n protein = self.best_solution\n\n return self.best_solution", "def solver(equation, verbose, short, round_to):\n\tnoms = normalize(equation)\n\tequat_orig = convert_to_list(noms)\n\tif not equat_orig:\n\t\treturn\n\tif verbose:\n\t\tcount_terms(equat_orig)\n\tequat_reduced = equat_reduce(equat_orig)\n\tpolyn_deg = get_polyn_degree(equat_reduced)\n\tprint(f'\\033[93mReduced form is: \\033[0m{format_equation_output(equat_reduced, polyn_deg, short)}')\n\tcompute_roots(equat_reduced, polyn_deg, verbose, round_to)\n\treturn", "def main():\n dirname = os.path.dirname(__file__)\n input_source = os.path.join(dirname, '..', 'input1.txt')\n # Make list, since the generator has to be used multiple times\n d = data_parser(input_source)\n return (solver_1star(d), solver_2star(d))", "def run(self, max_depth):\n while len(self.stack) > 0:\n state = self.get_next_state()\n\n if state.is_solution():\n self.solutions.append(state.moves)\n\n if len(state.moves) < max_depth:\n self.create_children(state)\n\n self.archive[state.get_tuple()] = len(state.moves)\n\n # sort solutions best to worst\n self.solutions.sort(key=len)\n\n if self.solutions:\n return self.solutions[0]\n\n print(\"This depth is not sufficient.\")\n return []", "def solve_best(self, repository, cost_func, verbose=0):\n self.verbose = verbose\n self.max_depth = 0\n best_cost = None\n for solution in self._solve(repository):\n cost = cost_func(**solution)\n if best_cost is None or cost <= best_cost:\n best_cost = cost\n yield solution, cost", "def find_solution(self):\n print(\"\\nFinding ICTS Solution...\")\n ######### Fill in the ICTS Algorithm here #########\n result = self.stat_tracker.time(\"time\", lambda: self.bfs())\n if result == -1:\n self.stat_tracker.stats['time'] = -1\n return []\n self.stat_tracker.write_stats_to_file(self.stat_tracker.get_results_file_name())\n return result\n ###################################################", "def get_solution(self):\n start_time = time.clock()\n frontier = [Node(self, None, 0, None)]\n explored = []\n visited = 0\n\n while True:\n visited += 1\n # pop the lowest value from the frontier (sorted using bisect, so pop(0) is the lowest)\n node = frontier.pop(0)\n\n # if the current node is at the goal state, we're done! \n if node.board.h() == 0:\n # recursively compile a list of all the moves\n moves = []\n while node.parent:\n moves.append(node.action)\n node = node.parent\n moves.reverse()\n\n print(\"Time:\", time.clock() - start_time)\n return calcal(moves, self.original)\n # print(\"Solution found!\")\n # print(\"Moves:\", len(moves))\n # print(\"Nodes visited:\", visited)\n # print(\"All moves:\", \", \".join(str(move) for move in moves))\n # break\n else:\n # we're not done yet:\n # expand the node, and add the new nodes to the frontier, as long\n # as they're not in the frontier or explored list already\n for new_node in node.expand():\n if new_node not in frontier and new_node not in explored:\n # use bisect to insert the node at the proper place in the frontier\n bisect.insort(frontier, new_node)\n \n explored.append(node)", "def get_all_solutions(use_local_cache) -> List[SolutionInfo]:\n if use_local_cache:\n scores = get_from_file()\n else:\n scores = get_from_web()\n\n return [SolutionInfo(user=item['login'], hole=item['hole'], lang=item['lang'],\n strokes=int(item['strokes']), submitted=item['submitted'])\n for item in scores]", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def run(self, op):\n rules = self.rules_dnf()\n self.formula.append(rules)\n try:\n clue_form = [self.eval_clue(clue) for clue in self.clueset]\n except Exception as e:\n print(\"Invalid input: check the format of puzzle clues\")\n print(e)\n return\n self.formula = self.formula + clue_form\n self.formula = And(*[f.to_dnf() for f in self.formula ])\n\n sol, count = self.solve()\n print(\"Number of possible solutions: \", count)\n print(\"Solution:\")\n print(sol)\n\n if (op == \"NONE\"):\n return\n\n if (op == \"MIN\" or op == \"ALL\"):\n print(\"\\n\\nMinimized formula:\")\n print(self.translate_f(self.eval_espresso()))\n\n if (op == \"RED\" or op == \"ALL\"):\n print(\"\\n\\nSets of clues that can be removed to still provide a single solution:\")\n print(self.extra_clues(rules))\n\n if (op == \"ALT\" or op == \"ALL\"):\n print(\"\\n\\nAlternative equivalent clueset example:\")\n pprint(self.alt_clueset())", "def main():\n input_source = \"../input1.txt\"\n # Make list, since the generator has to be used multiple times\n d = list(data_parser(input_source))\n return (solver_1star(d),solver_2star(d))", "def solve(self):", "def all_solutions(size: int) -> None:\n print(f\"Results for board of size {size}:\")\n board = Board(size)\n results = solve_all(col=0, board=board, results=[])\n if results == []:\n print(f\"No valid arrangement for board of size {size}\\n\")\n for result in results:\n for row in result:\n print(row)\n print()", "def print_solution():\n pass", "def get_all_solutions(self, algorithm='backtracking', **kwargs):\n return Solver(self).solve(algorithm=algorithm, take_first=False, **kwargs)", "def solution(data):\n lines = preprocess(data)\n solver = Code(lines)\n return solver.solve()" ]
[ "0.6172393", "0.6126036", "0.61067355", "0.59318", "0.5886695", "0.5886695", "0.5886695", "0.58592933", "0.5837581", "0.57652533", "0.570048", "0.5684845", "0.56494737", "0.56355613", "0.56352055", "0.5631747", "0.5605624", "0.5603883", "0.55791163", "0.5574826", "0.5563982", "0.55575204", "0.5547445", "0.5537939", "0.55355114", "0.5514787", "0.55033314", "0.54842246", "0.5470201", "0.5459639" ]
0.6631623
0
Solver for double synonym clues
def _solve_double_synonym(parse_tree, solution_format=None): def _get_value(solution_list, word): # Gets the value of a word in a solution list for solution in solution_list: if solution[0] == word: return solution[1] return 0 # Get the two synonym parts first_syn, second_syn = _get_parts_ignore_EQU(parse_tree) first_syn = _create_sentence(first_syn) second_syn = _create_sentence(second_syn) # Get top 1000 matching scores for each part of the clue if solution_format is not None: first_solutions = SimilaritySolver.solve(first_syn, length=solution_format.get_total_length(spaces=True)) second_solutions = SimilaritySolver.solve(second_syn, length=solution_format.get_total_length(spaces=True)) second_words = [word for word, _ in second_solutions if solution_format.check(word)] else: first_solutions = SimilaritySolver.solve(first_syn) second_solutions = SimilaritySolver.solve(second_syn) second_words = [word for word, _ in second_solutions] # Combine both lists, with the value being the product of the value for eac syn part solutions = [(solution[0], _get_value(second_solutions, solution[0]) * solution[1]) for solution in first_solutions if solution[0] in second_words] return solutions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(self):", "def test_synonym(self): \n pass", "def test_syndome_LUT(self):\r\n syns = []\r\n errvecs = golay._make_3bit_errors()\r\n for errvec in errvecs:\r\n syn = tuple(numpy.mod(numpy.dot(errvec, golay.DEFAULT_H.T), 2))\r\n syns.append(syn)\r\n self.assertEqual(set(syns), set(golay.DEFAULT_SYNDROME_LUT.keys()))\r\n self.assertEqual(len(set(syns)), len(syns))\r\n self.assertEqual(len(syns), len(errvecs))\r\n self.assertEqual(len(errvecs), 2325)", "def find_synonymous(input_file, work_dir):\n # create the .ctl file\n ctl_file = op.join(work_dir, \"yn-input.ctl\")\n output_file = op.join(work_dir, \"nuc-subs.yn\")\n ctl_h = open(ctl_file, \"w\")\n ctl_h.write(\"seqfile = %s\\noutfile = %s\\nverbose = 0\\n\" %\n (input_file, output_file))\n ctl_h.write(\"icode = 0\\nweighting = 0\\ncommonf3x4 = 0\\n\")\n ctl_h.close()\n\n cl = YnCommandline(ctl_file)\n print >>sys.stderr, \"\\tyn00:\", cl\n r, e = cl.run()\n ds_value_yn = None\n ds_value_ng = None\n dn_value_yn = None\n dn_value_ng = None\n\n # Nei-Gojobori\n output_h = open(output_file)\n row = output_h.readline()\n while row:\n if row.find(\"Nei & Gojobori\") >=0:\n for x in xrange(5):\n row = output_h.next()\n dn_value_ng, ds_value_ng = row.split('(')[1].split(')')[0].split()\n break\n row = output_h.readline()\n output_h.close()\n\n # Yang\n output_h = open(output_file)\n for line in output_h:\n if line.find(\"+-\") >= 0 and line.find(\"dS\") == -1:\n parts = line.split(\" +-\")\n ds_value_yn = extract_subs_value(parts[1])\n dn_value_yn = extract_subs_value(parts[0])\n\n if ds_value_yn is None or ds_value_ng is None:\n h = open(output_file)\n print >>sys.stderr, \"yn00 didn't work: \\n%s\" % h.read()\n\n return ds_value_yn, dn_value_yn, ds_value_ng, dn_value_ng", "def synsets_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n # Compute similarity\n if len(synsets_sentence_1) != 0 and len(synsets_sentence_2) != 0:\n similarity = 1 - jaccard_distance(set(synsets_sentence_1), set(synsets_sentence_2))\n return similarity\n else:\n return 0", "def solution(s):", "def solve(self):\n ...", "def get_synonyms(word,w2v,pos,max=20):\n synonyms = []\n count = 0\n synsets = wordnet.synsets(word,pos=pos)\n for synset in synsets:\n candidate_names = []\n for lemma in synset.lemma_names():\n candidate_names.append(lemma)\n for hypo in synset.hyponyms():\n candidate_names.append(hypo)\n for hyper in synset.hypernyms():\n candidate_names.append(hyper)\n\n for lemma in candidate_names:\n if count >= max:\n break\n # print pos,word,lemma\n try:\n similarity = w2v.n_similarity([word.lower()],[lemma.lower() ])\n if similarity > .34 and lemma not in synonyms:\n synonyms.append(lemma)\n\n count += 1\n except:\n continue\n\n return synonyms", "def syndSolveLazy(wdsize): # PENDING to debug\r\n # NameMat = wd2N[wdsize]\r\n r = int(math.ceil(math.log(wdsize, 2)) ) + 2 \r\n \r\n P = k2pmap[wdsize] # get the full parity matrix <kxr> \r\n assert P.shape[0]==wdsize and P.shape[1]==r \r\n finals = '' \r\n s1='assign noerr = '\r\n for i in xrange(r):\r\n ss = '~synd[{0}] & '.format(i) if i !=r-1 else '~synd[{0}];'.format(i) \r\n s1 += ss \r\n\r\n s1 += '\\n' \r\n\r\n finals += s1 \r\n\r\n # neg = lambda x: '~' if x==0 else '' \r\n\r\n s2 = ''\r\n\r\n for idx, name in enumerate(P):\r\n flip_s = 'assign flip[{0}] = '.format(idx)\r\n setbit_ind_array = np.nonzero(name)[0].astype('int') \r\n for i in setbit_ind_array: # set bit index\r\n subs = 'synd[{index}] & '.format(index = i) if i != setbit_ind_array[-1] else \\\r\n 'synd[{index}];'.format( index=i) \r\n flip_s += subs \r\n s2 = s2 + flip_s + '\\n' \r\n\r\n\r\n finals += s2 \r\n\r\n return finals", "def weed_out_synonyms(word, potential_synonyms):\n real_synonyms = set()\n for synonym in potential_synonyms:\n max_distance = abs(len(word) - len(synonym))\n abbr_len = min(len(word), len(synonym))\n forgiveness = round(1/7 * abbr_len)\n if lev.distance(word, synonym) <= max_distance + forgiveness:\n # Then it's a synonym!\n real_synonyms.add(synonym)\n return real_synonyms", "def synonyms_wiktionary(name, lang=\"fr\"):\n import wptools\n page = wptools.page(name, wiki='{0}.wiktionary.org'.format(\n lang), lang=lang, silent=True)\n page.get_parse()\n text = page.data['wikitext']\n syn = \"==== {{S|synonymes}} ====\"\n if syn not in text:\n return None\n text = text.split(syn)[1].split(\"====\")[0]\n reg = re.compile(\"[[]{2}(.*?)[]]{2}\")\n res = reg.findall(text)\n return res", "def solve(self):\n pass", "def solve(self):\n pass", "def get_synonym(name: str) -> str:\n return s2n.get_name(n2s.get_smiles(name))", "def _solve(self, mu=None):\n pass", "def __synonym_replacement(self, tokenized_sentence: list) -> str:\n sentence_length = len(tokenized_sentence)\n # Initialize the return string\n new_sentence = \"\"\n # Some variables to keep track of changes and attempted changes\n has_changed = False\n attempts = 0\n # Keep trying to make a change until either:\n # 1) You've made a change, OR\n # 2) You've tried to make a change for half the words in the sentence with no success\n while has_changed is not True and attempts <= sentence_length/2:\n # Grab a random word from the tokenized sentence\n index_to_change = random.randint(0, sentence_length-1)\n pair_to_change = tokenized_sentence[index_to_change]\n # Get the list of synonyms based off of that (word, POS) pair from the tokenized sentence\n list_of_syns = nltk_methods.list_of_syns_from_pos_pair(pair_to_change)\n # ...but what if it's a word that doesn't have any synonyms matching the POS tag? \n if len(list_of_syns) < 1: \n # Failed synonym swap, so bump up the attempts tracker by one\n attempts += 1\n continue\n # Else, the word does have synonyms we can swap the word for\n else:\n # Randomly pick a word from the synonym list\n random_pick = random.randint(0, len(list_of_syns)-1)\n new_word = list_of_syns[random_pick]\n new_word_pair = (new_word, \"NA\") # \"NA\" is a dummy POS tag\n # Now update the tokenized sentence with the new word\n tokenized_sentence[index_to_change] = new_word_pair\n # Pull the sentence back together\n new_sentence = nltk_methods.put_string_together_from_pos_tagged_list(tokenized_sentence)\n # Now let's clean up our brand new sentence really quickly\n new_sentence = nltk_methods.clean_sentence(new_sentence)\n # BUT WAIT, what if this is a duplicate? We don't want that!\n if new_sentence in self.return_augmented_sentences():\n # Bump up the attempts and skip this sentence\n attempts += 1\n continue\n # Update the flags\n has_changed = True\n return new_sentence", "def get_synonyms(word):\n syns_sets = wordnet.synsets(word)\n if syns_sets:\n # if there's synonyms, take the first set\n desired = syns_sets[0].lemma_names()\n desired = [the_name.replace(\"_\", \" \") for the_name in desired]\n return desired\n\n else:\n return False", "def dpll_satisfiable(s):\n clauses = conjuncts(to_cnf(s))\n symbols = prop_symbols(s)\n\n print ' >>> Got clauses (',len(clauses),') and symbols (', len(symbols), ')'\n print ' >>> starting dpll proper'\n \n return dpll(clauses, symbols, {})", "def information_content_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets_sentence_1:\n L = []\n for ss in synsets_sentence_2:\n try:\n L.append(synset.lin_similarity(ss, brown_ic))\n except:\n continue\n if L: \n best_score = max(L)\n score += best_score\n count += 1\n # Average the values\n if count > 0: score /= count\n return score", "def get_sol(self):", "def testSynonym(self):\n\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\tassert isinstance(syn, spinner.Synonym), syn\n\t\t\t\t\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()", "def test_tuple_synapses(self):\n # reproducible arbitrariness\n np.random.seed(5003)\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n self.rule.alpha = 1.0\n self.rule.beta = 1.5\n\n tmax = 10*self.dt\n\n W0 = np.copy(self.syns.W)\n\n sim1 = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim1.run(tmax)\n\n final1 = np.copy(self.syns.W)\n\n self.syns.W = np.copy(W0)\n\n rule2 = TwoExponentialsPlasticity(\n (self.syns.source, self.syns.target, self.syns.W),\n self.tutor, constrain_positive=False, rate=1-6)\n rule2.alpha = 1.0\n rule2.beta = 1.5\n\n sim2 = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, rule2, dt=self.dt)\n sim2.run(tmax)\n\n final2 = np.copy(self.syns.W)\n\n self.assertTrue(np.allclose(final1, final2))", "def test_tuple_synapses(self):\n # reproducible arbitrariness\n np.random.seed(5003)\n\n self.conductor.out_step = np.random.randn(self.Nc)\n self.tutor.out_step = np.random.randn(self.Ns)\n\n self.rule.alpha = 1.0\n self.rule.beta = 1.5\n\n tmax = 10*self.dt\n\n W0 = np.copy(self.syns.W)\n\n sim1 = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim1.run(tmax)\n\n final1 = np.copy(self.syns.W)\n\n self.syns.W = np.copy(W0)\n\n rule2 = SuperExponentialPlasticity(\n (self.syns.source, self.syns.target, self.syns.W),\n self.tutor, constrain_positive=False, rate=1-6)\n rule2.alpha = 1.0\n rule2.beta = 1.5\n\n sim2 = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, rule2, dt=self.dt)\n sim2.run(tmax)\n\n final2 = np.copy(self.syns.W)\n\n self.assertTrue(np.allclose(final1, final2))", "def _simplification_technique_1(rels):\n rels = list(set(rels))\n rels.sort()\n l_rels = len(rels)\n\n # all syllables with single syllable\n one_syllable_rels = set()\n # since \"nw\" has a max size = l_rels, only identity element\n # removal can possibly happen\n nw = [None]*l_rels\n for i in range(l_rels):\n w = rels[i].identity_cyclic_reduction()\n if w.number_syllables() == 1:\n\n # replace one syllable relator with the corresponding inverse\n # element, for ex. x**-4 -> x**4 in relator list\n if w.array_form[0][1] < 0:\n rels[i] = w**-1\n one_syllable_rels.add(rels[i])\n\n # since modifies the array rep., so should be\n # added a list\n nw[i] = list(rels[i].array_form)\n\n # bound the exponent of relators, making use of the single\n # syllable relators\n for i in range(l_rels):\n k = nw[i]\n rels_i = rels[i]\n for gen in one_syllable_rels:\n n = gen.array_form[0][1]\n gen_arr0 = gen.array_form[0][0]\n j = len(k) - 1\n while j >= 0:\n if gen_arr0 == k[j][0] and gen is not rels_i:\n t = Mod(k[j][1], n)\n\n # multiple of one syllable relator\n if t == 0:\n del k[j]\n zero_mul_simp(k, j - 1)\n j = len(k)\n\n # power should be bounded by (-n/2, n/2]\n elif t <= n/2:\n k[j] = k[j][0], Mod(k[j][1], n)\n elif t > n/2:\n k[j] = k[j][0], Mod(k[j][1], n) - n\n j -= 1\n\n return nw", "def _get_all_insertions(synonym, enc_word, ins_word, solution_format=None):\n words = [enc_word[0:i] + ins_word + enc_word[i:] for i in range(1, len(enc_word))]\n if solution_format is not None:\n words = [solution_format.add_spaces(word) for word in words if solution_format.check(word)]\n\n solutions = [(word, SimilaritySolver.solve(synonym, 0, word.replace(\" \", \"_\"))) for word in words]\n return solutions", "def calc_syndrome(codeword, n):\r\n sym = 0\r\n for i in range(1, n):\r\n if codeword[i]:\r\n sym ^= i\r\n extra_parity = calc_parity_vector(codeword)\r\n if extra_parity == codeword[0]:\r\n if sym == 0:\r\n return 0, sym\r\n else:\r\n return 2, sym\r\n else:\r\n if sym >= n:\r\n pass\r\n else:\r\n codeword[sym] ^= 1\r\n return 1, sym", "def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio", "def test_solve_quadratic_fixed(self):\n iden1 = Identity()\n iden2 = Identity()\n iden3 = Identity()\n iden1.x.val = 4\n iden2.x.val = 5\n iden3.x.val = 6\n iden1.x.name = 'x1'\n iden2.x.name = 'x2'\n iden3.x.name = 'x3'\n iden2.x.fixed = False\n term1 = LeastSquaresTerm(iden1.target, 1, 1)\n term2 = LeastSquaresTerm(iden2.target, 2, 2)\n term3 = LeastSquaresTerm(iden3.target, 3, 3)\n prob = LeastSquaresProblem([term1, term2, term3])\n prob.solve()\n self.assertAlmostEqual(prob.objective, 10)\n self.assertAlmostEqual(iden1.x.val, 4)\n self.assertAlmostEqual(iden2.x.val, 2)\n self.assertAlmostEqual(iden3.x.val, 6)", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def solve_nonlinear(self, params, unknowns, resids):\n pass" ]
[ "0.60946655", "0.60578865", "0.60223013", "0.5942013", "0.58613205", "0.58297867", "0.58198977", "0.5809236", "0.571314", "0.57117873", "0.5688526", "0.56218284", "0.56218284", "0.55990916", "0.5592245", "0.557801", "0.55571336", "0.55550945", "0.55278677", "0.5526694", "0.55076754", "0.5505073", "0.55006814", "0.5447038", "0.54408485", "0.5438443", "0.5428019", "0.54076904", "0.53815925", "0.53815925" ]
0.7369051
0
Solver for anagram clues
def _solve_anagram(parse_tree, solution_format=None): anag, syn = _get_parts_ignore_EQU(parse_tree) # Get the anagramed word anag_word = anag[0] if not anag_word.label() == 'ANAG_WORD': anag_word = anag[1] anag_word = _create_sentence(anag_word, space=False) syn_sent = _create_sentence(syn) if len(anag_word) > ANAG_MAX: # If anagram is too long, ignore it return [] # Get all possible permutations and filter with solution_format (if given) words = ["".join(perm) for perm in itertools.permutations(anag_word)] if solution_format is not None: words = [solution_format.add_spaces(word) for word in words if solution_format.check(word)] # Calculate match score for all possible solutions solutions = [(word, SimilaritySolver.solve(syn_sent, 0, word.replace(" ", "_"))) for word in words] return solutions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution(s):", "def anagram(s):\n minchanges = 0\n\n if len(s) % 2 == 1:\n return -1\n else:\n word1 = s[0:len(s) // 2]\n word2 = s[len(s) // 2:]\n print(word1, word2)\n c1 = Counter(word1)\n for letter in word2:\n if c1[letter] > 0:\n c1[letter] -= 1\n elif c1[letter] == 0:\n minchanges += 1\n else:\n minchanges += 1\n\n return minchanges", "def solve(an):\n # First, split the expresion into left and right parts by ==\n # split each part into words by +\n # strip spaces fro, each word, reverse each work to\n # enumerate the digit rank from lower to higer\n fullexp = [list(map(lambda x: list(reversed(x.strip())), s.split(\"+\")))\n for s in an.strip().upper().split(\"==\")]\n # Find the maximal lenght of the work, maximal possive digit rank or\n # the power of 10, should the < maxp\n maxp = max([len(w) for s in fullexp for w in s])\n # Extract the leading letters for each (reversed) word\n # those cannot be zeros as the number cannot start with 0\n nzchars = set([w[-1] for s in fullexp for w in s])\n # initialize the lists for digit ranks\n unzchars = [] # non-zero letters unique at level\n uokzchars = [] # zero-allowed letters unique at level\n uchars = [] # all letters unique at level\n tchars = [] # all letter with multipliers per level\n for i in range(maxp):\n tchars.append(dict())\n unzchars.append(set())\n uokzchars.append(set())\n # Now lets scan the expression and accumulate the letter counts\n for si, s in enumerate(fullexp):\n sgn = 1 - (si << 1) # left side (0) is +1, right right (1) is -1\n for w in s: # for each word in the side (already reversed)\n for p, c in enumerate(w): # enumerate with ranks\n if c not in tchars[p]: # check if the letter was alread there\n tchars[p][c] = 0\n tchars[p][c] += sgn # append to the rank dictionary\n\n totchars = set() # Keep track of letters already seen at lower ranks\n # go through the accumulated rank dictionaries\n for p, chardict in enumerate(tchars):\n for c, cnt in tuple(chardict.items()):\n if cnt == 0: # if the cumulative is 0\n del chardict[c] # remove the letter from check dictionry\n # it does not impact the sum with 0-multiplier\n # if the letter contributes to the sum\n # and was not yet seen at lower ranks\n elif c not in totchars:\n # add the letter to either non-zero set\n # or allowed-zero set\n if c in nzchars:\n unzchars[p].add(c)\n else:\n uokzchars[p].add(c)\n # add to the list as seen letter to ignore at the next\n # ranks\n totchars.add(c)\n # pre-build the combo list of letters for the rank\n # non-zero first, followed by zero-allowed\n uchars.append(tuple(unzchars[p]) + tuple(uokzchars[p]))\n # pre-convert check dictionaries to tuples\n tchars[p] = tuple(chardict.items())\n # go for the recursion\n return check_rec([maxp, tchars, unzchars, uokzchars, uchars])", "def anagram(s1, s2):\n pass", "def question1a(s,t):\n\n anagrams = permutations(t, len(t))\n for anagram in anagrams:\n if anagram:\n if ''.join(anagram) in s:\n return True\n return False", "def solve(self):", "def solve(self, cipher):\r\n a, b = cipher\r\n m = len(a)\r\n n = len(b)\r\n dp = [[0 for _ in xrange(n+1)] for _ in xrange(m+1)]\r\n for i in xrange(1, m+1):\r\n for j in xrange(1, n+1):\r\n if a[i-1]==b[j-1]:\r\n dp[i][j] = dp[i-1][j-1]+1\r\n else:\r\n dp[i][j] = max(dp[i][j-1], dp[i-1][j]) # superset\r\n return dp[-1][-1]", "def test_anagram(self):\n self.assertTrue(anagram(\"cinema\", \"iceman\"))\n self.assertTrue(anagram(\"dormitory\", \"dirtyroom\"))\n self.assertFalse(anagram(\"hello\", \"lohae\"))\n self.assertFalse(anagram(\"ill\", \"like\"))\n self.assertFalse(anagram(\"illness\", \"nes\"))", "def cherche_anagramme(mot):\n global DICO\n res = []\n lettres = dict((l, mot.count(l)) for l in set(mot))\n if len(mot) == 1:\n res.append(mot)\n else:\n for ana in DICO:\n if len(ana) == len(mot):\n anag = True\n for lettre in mot:\n if lettre not in ana:\n anag = False\n if anag:\n lettres2 = dict((l, ana.count(l)) for l in set(ana))\n if lettres == lettres2:\n res.append(ana)\n return res", "def gen_linear_anagram_candidates(word):\n anagram_candidates = []\n for pos in range(1, len(word)):\n anagram_candidates += [word[pos:] + word[0:pos]]\n return anagram_candidates", "def anagrams(word):\n\t# Question 4b: Generates all permutations of word and filters it to contain only valid words\n\treturn word_perms(word) & word_sets[len(word)]", "def test_anagram_dd(self):\n self.assertTrue(anagram_dd(\"cinema\", \"iceman\"))\n self.assertTrue(anagram_dd(\"dormitory\", \"dirtyroom\"))\n self.assertFalse(anagram_dd(\"hello\", \"lohae\"))\n self.assertFalse(anagram_dd(\"ill\", \"like\"))\n self.assertFalse(anagram(\"illness\", \"nes\"))", "def test_solver(allowed_symbols, len_sequence=3):\n secret_sequence = \"\"\n for _ in range(len_sequence):\n secret_sequence += allowed_symbols[random.randint(0, len_sequence - 1)]\n print('secret:', secret_sequence)\n\n solution = brute_force_solver(allowed_symbols, secret_sequence)\n return solution == tuple(secret_sequence)", "def dict_judge1(_str1):\n\tglobal final_output\n\tif _str1==\"\":\n\t\treturn 'Finished.'\n\t_list0=dict_check34(_str1)\n\t#Judge1: Longest\n\t_list=[]\n\t_list1=[]\n\tfor i in range(len(_list0)):\n\t\tn=0\n\t\tfor j in range(3):\n\t\t\tn+=len(_list0[i][j])\n\t\t_list.append(n)\n\n\t_max=max(_list)\n\tfor i in range(len(_list0)):\n\t\tif _list[i]==_max:\n\t\t\twhile '' in _list0[i]:\n\t\t\t\t_list0[i].remove('')\n\t\t\tif not _list0[i] in _list1:\n\t\t\t\t_list1.append(_list0[i])\n\n\t#Judge2: Max Average Length\n\tif len(_list1)==1:\n\t\t_list2=_list1\n\telse:\n\t\t_list=[]\n\t\t_list2=[]\n\t\tfor i in range(len(_list1)):\n\t\t\tn=0\n\t\t\tfor j in range(len(_list1[i])):\n\t\t\t\tn+=len(_list1[i][j])\n\t\t\t_list.append(n/len(_list1[i]))\n\n\t\t_max=max(_list)\n\t\tfor i in range(len(_list1)):\n\t\t\tif _list[i]==_max:\n\t\t\t\t_list2.append(_list1[i])\n\n\t#Judge3: Take Variance for guarantee they're same patern\n\tif len(_list2)==1:\n\t\t_list3=_list2\n\telse:\n\t\t_list=[]\n\t\t_list3=[]\n\t\tfor i in range(len(_list2)):\n\t\t\tn=0\n\t\t\tfor j in range(len(_list2[i])):\n\t\t\t\tn+=len(_list2[i][j])**2\n\t\t\t_list.append(n/len(_list2[i]))\n\n\t\t_max=max(_list)\n\t\tfor i in range(len(_list2)):\n\t\t\tif _list[i]==_max:\n\t\t\t\t_list3.append(_list2[i])\n\n\t#Judge4: Single Word Frequency\n\tif len(_list3)==1:\n\t\t_list4=_list3\n\telse:\n\t\t_min=4\n\t\tfor i in range(len(_list3)):\n\t\t\tfor j in range(len(_list3[i])):\n\t\t\t\tif len(_list3[i][j])<_min:\n\t\t\t\t\t_min=len(_list3[i][j])\n\t\t_list=[]\n\t\t_list4=[]\n\t\tfor i in range(len(_list3)):\n\t\t\tn=0\n\t\t\tfor j in range(len(_list3[i])):\n\t\t\t\tif len(_list3[i][j])==_min:\n\t\t\t\t\tn+=_dict_ori[_list3[i][j]]\n\t\t\t_list.append(n)\n\n\t\t_max=max(_list)\n\t\tfor i in range(len(_list3)):\n\t\t\tif _list[i]==_max:\n\t\t\t\t_list4.append(_list3[i])\n\n\t#Output\n\tif len(_list4)!=1:\n\t\t_list4=_list4[0]\n\tif len(''.join(_list4[0]))==len(_str1):\n\t\tfinal_output=final_output+(' '.join(_list4[0]))\n\telse:\n\t\tfinal_output=final_output+_list4[0][0]+' '\n\t\tdict_judge1(_str1[len(_list4[0][0]):])", "def cost(solution):\n cost = 0\n alm_count = 0 # alignment operand/operator count\n fst_len = len(solution[0])\n snd_len = len(solution[1])\n min_len = min(fst_len, snd_len)\n\n for i in range(min_len):\n if solution[0][i] == solution[1][i]:\n alm_count += 1\n\n max_spaces = max(solution[0].count(\" \"), solution[1].count(\" \"))\n cost = ((alm_count * 2.0) + max_spaces) / 3.0\n\n return cost", "def isAnagram(self, s, t):\n \n s_count = {}\n t_count = {}\n for char in s:\n s_count[char] = s_count.get(char, 0) +1\n \n for char in t:\n t_count[char] = t_count.get(char, 0) +1\n \n return t_count == s_count", "def solver2(input_val):\n sum_div = [1] * (input_val + 1)\n for i in range(2, int(input_val ** 0.5) + 1):\n sum_div[i * i] += i\n for k in range(i + 1, input_val // i + 1):\n sum_div[k * i] += k + i\n\n abundants, result = set(), 0\n for n in range(1, input_val + 1):\n if sum_div[n] > n:\n abundants.add(n)\n if not any((n - a in abundants) for a in abundants):\n result += n\n return result", "def solve():\n n = int(input())\n m = int(input())\n Ks = list(map(int, input().split()))\n\n # 4重ループにより全探索\n for a in range(n):\n for b in range(n):\n for c in range(n):\n for d in range(n):\n if Ks[a]+Ks[b]+Ks[c]+Ks[d] == m:\n print(\"Yes\")\n return\n print(\"No\")", "def solution(s, p, q):\n a_prefix = [0 for _ in range(len(s) + 1)]\n c_prefix = a_prefix.copy()\n g_prefix = a_prefix.copy()\n for i, nucleotide in enumerate(s):\n a_prefix[i + 1] = a_prefix[i] + (1 if nucleotide == 'A' else 0)\n c_prefix[i + 1] = c_prefix[i] + (1 if nucleotide == 'C' else 0)\n g_prefix[i + 1] = g_prefix[i] + (1 if nucleotide == 'G' else 0)\n result = []\n for left, right in zip(p, q):\n if a_prefix[right + 1] - a_prefix[left] > 0:\n result.append(1)\n elif c_prefix[right + 1] - c_prefix[left] > 0:\n result.append(2)\n elif g_prefix[right + 1] - g_prefix[left] > 0:\n result.append(3)\n else:\n result.append(4)\n return result", "def score_solution(g, s):\n pass", "def test_on_anagram(self):\n\n test_string = \"anagram\"\n test_anagram = \"gramana\"\n actual = is_anagram(test_string, test_anagram)\n assert actual == True", "def anagrama(palabra1, palabra2):\r\n return ''.join(sorted(palabra1)) == ''.join(sorted(palabra2))", "def valid_anagram(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(''.join(sorted(series_of_words.pop())))\n for word in series_of_words:\n word = ''.join(sorted(word))\n if word in words:\n return False\n words.append(word)\n return True", "def solve_part1(start):\n inputs = load_inputs(False)\n two_matches = []\n tiles = inputs.keys()\n for elem in tiles:\n matches = defaultdict(list)\n for elem2 in tiles:\n if elem != elem2 and compare_tile(inputs[elem], inputs[elem2]):\n l = matches[elem]\n l.append(elem2)\n\n if len(matches[elem]) == 2:\n print matches\n two_matches.append(elem)\n\n return reduce((lambda x, y: int(x) * int(y)), two_matches)", "def solve(self):\n pass", "def solve(self):\n pass", "def test_basic(self):\n self.assertEqual(solution(\"\"\"11111\n19991\n19191\n19991\n11111\"\"\"), 6)\n self.assertEqual(solution(\"\"\"5483143223\n2745854711\n5264556173\n6141336146\n6357385478\n4167524645\n2176841721\n6882881134\n4846848554\n5283751526\"\"\"), 195)", "def is_anagram_of_palindrome(word):\n\n counts = {}\n num_of_odd_occurences = 0\n\n for char in word:\n counts[char] = counts.get(char, 0) + 1\n for val in counts.values():\n if val % 2 != 0:\n num_of_odd_occurences += 1\n\n return num_of_odd_occurences <= 1", "def anagram_dd(str1, str2):\n dic1 = defaultdict(int)\n for char in str1:\n dic1[char] += 1\n for char in str2:\n if char in dic1:\n if dic1[char] == 0:\n return False\n else:\n dic1[char] -= 1\n if dic1[char] == 0:\n del dic1[char]\n else:\n return False\n if dic1 == {}:\n return True\n return False", "def test_match(self):\n self.assertEqual(solution( \"\"\"p=<-6,0,0>, v=< 3,0,0>, a=< 0,0,0>\np=<-4,0,0>, v=< 2,0,0>, a=< 0,0,0>\np=<-2,0,0>, v=< 1,0,0>, a=< 0,0,0>\np=< 3,0,0>, v=<-1,0,0>, a=< 0,0,0>\"\"\"), 1)" ]
[ "0.66833496", "0.65617925", "0.6440984", "0.63482744", "0.6340745", "0.62304044", "0.6203706", "0.6193661", "0.6119746", "0.6078559", "0.60522294", "0.60006964", "0.5970734", "0.5944802", "0.59298414", "0.592687", "0.58776724", "0.5774756", "0.57737607", "0.57636297", "0.57621825", "0.57521105", "0.5738744", "0.5729819", "0.57208514", "0.57208514", "0.56974596", "0.56960577", "0.56724983", "0.56668794" ]
0.6842031
0
Solver for hidden word clues
def _solve_hidden_word(parse_tree, solution_format=None): hidden, syn = _get_parts_ignore_EQU(parse_tree) # Get the hiding word hiding_word = hidden[0] if not hiding_word.label() == 'HID_WORD': hiding_word = hidden[1] hiding_word = _create_sentence(hiding_word, space=False) syn_sent = _create_sentence(syn) if solution_format is not None: # Get all substrings of the hiding word in the right length and check format total_length = solution_format.get_total_length() words = [hiding_word[i:i + total_length] for i in range(len(hiding_word) - total_length + 1)] words = [word for word in words if solution_format.check(word)] else: # If format is unknown, try all lengths for substrings words = [] for length in range(1, len(hiding_word)): words += [hiding_word[i:i + length] for i in range(len(hiding_word) - length + 1)] # Get match score for all substrings solutions = [(word, SimilaritySolver.solve(syn_sent, 0, word.replace(" ", "_"))) for word in words] return solutions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_clues(self):\r\n print(\"\\n************Searching Clues************\\n\")\r\n for word_id in self.words.keys():\r\n if not self.words[word_id].see and not self.words[word_id].wth:\r\n clue = pop_backslash(self.words[word_id].clue)\r\n temp = word_domain(\"allintext:\" + clue +' -crossword',self.words[word_id].length)\r\n temp2 = temp + word_domain(clue +' -crossword',self.words[word_id].length)\r\n domain = temp2 + data_muse(clue, self.words[word_id].length)\r\n unique_list = []\r\n for x in domain: \r\n y = x.upper()\r\n # check if exists in unique_list or not \r\n if y not in unique_list: \r\n unique_list.append(y) \r\n \r\n self.words[word_id].assign_word_domain(unique_list)\r\n print(\"\\nSearch is done...\")", "def solve(self):", "def solve(self):\n \n self.action_button.config(state = DISABLED)\n \n self.get_grid()\n resultdict = self.logic.solve()\n for word in resultdict:\n if resultdict[word]:\n self.word_search_grid[resultdict[word][0]][resultdict[word][1]].entry[\"fg\"] = \"red\"", "def solve(self):\n\n words = re.sub(r'[^\\w ]+', '', self.ciphertext).split()\n words.sort(key=lambda word: -len(word))\n\n for max_unknown_word_count in range(0, max(3, len(words) // 10)):\n solution = self._recursive_solve(words, {}, 0,\n max_unknown_word_count)\n if solution:\n self._translation = solution\n break", "def neutralize(word, g, word_to_vec_map):\n\n ### START CODE HERE ###\n # Select word vector representation of \"word\". Use word_to_vec_map. (≈ 1 line)\n e = word_to_vec_map[word]\n\n # Compute e_biascomponent using the formula given above. (≈ 1 line)\n e_biascomponent = np.dot(e, g) / np.sum(np.dot(g, g)) * g\n # e_biascomponent = np.sqrt(np.sum(np.dot(e,e))) * cosine_similarity(e, g) * g/np.sqrt(np.sum(np.dot(g,g)))\n # Neutralize e by subtracting e_biascomponent from it\n # e_debiased should be equal to its orthogonal projection. (≈ 1 line)\n e_debiased = e - e_biascomponent\n ### END CODE HERE ###\n\n return e_debiased", "def solution(s):", "def solve_puzzle(self):\n\n # for each word in the words list\n # ...for each row in the game board\n # ......for each column in each row\n for word in self.words:\n for y, row in enumerate(self.board):\n for x, col in enumerate(row):\n \n # for each direction\n # try to find a word in said direction\n for dir in self.directions:\n self.scan_word(word, y, x, dir)", "def npa_constraints(\n assemblage: dict[tuple[int, int], cvxpy.Variable], k: int | str = 1, referee_dim: int = 1\n) -> list[cvxpy.constraints.constraint.Constraint]:\n a_out, a_in, b_out, b_in = _get_nonlocal_game_params(assemblage, referee_dim)\n\n words = _gen_words(k, a_out, a_in, b_out, b_in)\n dim = len(words)\n\n r_var = cvxpy.Variable((referee_dim * dim, referee_dim * dim), PSD=True, name=\"R\")\n # Normalization.\n norm = sum(r_var[i * dim, i * dim] for i in range(referee_dim))\n constraints = [norm == 1]\n\n seen = {}\n for i in range(dim):\n for j in range(i, dim):\n w_i, w_j = words[i], words[j]\n w_i = tuple(reversed(w_i))\n word = _reduce(w_i + w_j)\n\n sub_mat = r_var[i::dim, j::dim]\n # if i = 0 we would consider (ε, ε) as an empty word.\n if i != 0 and _is_zero(word):\n constraints.append(sub_mat == 0)\n\n elif _is_meas(word):\n s_a, s_b = word\n constraints.append(\n sub_mat\n == assemblage[s_a.question, s_b.question][\n s_a.answer * referee_dim : (s_a.answer + 1) * referee_dim,\n s_b.answer * referee_dim : (s_b.answer + 1) * referee_dim,\n ]\n )\n\n elif _is_meas_on_one_player(word):\n symbol = word[0]\n if symbol.player == \"Alice\":\n sum_all_bob_meas = sum(\n assemblage[symbol.question, 0][\n symbol.answer * referee_dim : (symbol.answer + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for b_ans in range(b_out)\n )\n\n constraints.append(sub_mat == sum_all_bob_meas)\n\n if symbol.player == \"Bob\":\n sum_all_alice_meas = sum(\n assemblage[0, symbol.question][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n symbol.answer * referee_dim : (symbol.answer + 1) * referee_dim,\n ]\n for a_ans in range(a_out)\n )\n\n constraints.append(sub_mat == sum_all_alice_meas)\n\n elif word in seen:\n old_i, old_j = seen[word]\n old_sub_mat = r_var[old_i::dim, old_j::dim]\n constraints.append(sub_mat == old_sub_mat)\n\n else:\n seen[word] = (i, j)\n\n # now we impose constraints to the assemblage operator\n for x_alice_in in range(a_in):\n for y_bob_in in range(b_in):\n sum_all_meas_and_trace = 0\n for a_ans in range(a_out):\n for b_ans in range(b_out):\n sum_all_meas_and_trace += sum(\n assemblage[x_alice_in, y_bob_in][\n i + a_ans * referee_dim, i + b_ans * referee_dim\n ]\n for i in range(referee_dim)\n )\n\n # r x r sub - block is PSD since it's an unnormalized quantum state.\n constraints.append(\n assemblage[x_alice_in, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n >> 0\n )\n\n constraints.append(sum_all_meas_and_trace == 1)\n\n # Bob marginal consistency\n for y_bob_in in range(b_in):\n for b_ans in range(b_out):\n sum_first_question = sum(\n assemblage[0, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for a_ans in range(a_out)\n )\n\n for x_alice_in in range(1, a_in):\n sum_cur_question = sum(\n assemblage[x_alice_in, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for a_ans in range(a_out)\n )\n\n constraints.append(sum_first_question == sum_cur_question)\n\n # Alice marginal consistency\n for x_alice_in in range(a_in):\n for a_ans in range(a_out):\n sum_first_question = sum(\n assemblage[x_alice_in, 0][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for b_ans in range(b_out)\n )\n\n for y_bob_in in range(1, b_in):\n sum_cur_question = sum(\n assemblage[x_alice_in, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for b_ans in range(b_out)\n )\n\n constraints.append(sum_first_question == sum_cur_question)\n\n return constraints", "def solve(self):\n ...", "def solve(self):\n pass", "def solve(self):\n pass", "def generate_wordnet_candidates(self, word):\n candidates = set()\n if self.check_if_replacable(word):\n for synset in wordnet.synsets(word):\n for lemma in synset.lemmas():\n converted = convert(lemma.name().lower(), word)\n if converted != word and converted != None:\n try:\n w1 = wordnet.synsets(word)[0]\n w2 = wordnet.synsets(converted)[0]\n similarity = w1.wup_similarity(w2)\n if isinstance(similarity,float) and w1.wup_similarity(w2) >0.6 :\n candidates.add(converted)\n except:\n pass\n # print(\"candidate\",word,candidates)\n return candidates", "def syndSolveLazy(wdsize): # PENDING to debug\r\n # NameMat = wd2N[wdsize]\r\n r = int(math.ceil(math.log(wdsize, 2)) ) + 2 \r\n \r\n P = k2pmap[wdsize] # get the full parity matrix <kxr> \r\n assert P.shape[0]==wdsize and P.shape[1]==r \r\n finals = '' \r\n s1='assign noerr = '\r\n for i in xrange(r):\r\n ss = '~synd[{0}] & '.format(i) if i !=r-1 else '~synd[{0}];'.format(i) \r\n s1 += ss \r\n\r\n s1 += '\\n' \r\n\r\n finals += s1 \r\n\r\n # neg = lambda x: '~' if x==0 else '' \r\n\r\n s2 = ''\r\n\r\n for idx, name in enumerate(P):\r\n flip_s = 'assign flip[{0}] = '.format(idx)\r\n setbit_ind_array = np.nonzero(name)[0].astype('int') \r\n for i in setbit_ind_array: # set bit index\r\n subs = 'synd[{index}] & '.format(index = i) if i != setbit_ind_array[-1] else \\\r\n 'synd[{index}];'.format( index=i) \r\n flip_s += subs \r\n s2 = s2 + flip_s + '\\n' \r\n\r\n\r\n finals += s2 \r\n\r\n return finals", "def get_sol(self):", "def findSecretWord(self, w, master):\n\n h = [None] * len(w) # keeps the set\n n = [None] * len(w) # keeps the near matrix\n for i in range(len(h)):\n h[i] = defaultdict(set)\n n[i] = [0] * len(w)\n\n for i in range(0, len(w) - 1):\n for j in range(i + 1, len(w)):\n nr = self.near(i, j, w)\n n[i][j], n[j][i] = nr, nr\n h[i][nr].add(j)\n h[j][nr].add(i)\n\n # print(n)\n # print(h)\n\n def remaining_choices(select, nr, choices):\n return len(h[select][nr] & choices)\n\n choices = set(range(len(w)))\n while True:\n max_cost = {}\n if len(choices) > 1:\n for select in choices:\n cost = {}\n visited = set()\n for secret in choices:\n if select != secret:\n nr = n[select][secret]\n if nr not in visited:\n cost[secret] = remaining_choices(\n select, nr, choices)\n visited.add(nr)\n # print(\"select {}\".format(select), cost)\n # find the max cost among all the secrets\n max_cost[select] = max(cost.items(), key=lambda x: x[1])\n # print(\"per select max cost\", max_cost)\n mcost = {k: v[1] for k, v in max_cost.items()}\n # print(mcost)\n minmax = min(mcost.items(), key=lambda x: x[1])\n # print(minmax)\n # master.guess(w[minmax])\n selection = minmax[0]\n else:\n selection = list(choices)[0]\n\n offline = False\n if offline:\n my_secret = w[1]\n my_secret = \"hbaczn\"\n matches = self.guess(w[selection], my_secret)\n my_secret_index = w.index(my_secret)\n print(\n (\"Secret: {}, Index: {}, \" +\n \"Matches: {}, N: {}, |N|: {}\").format(\n my_secret, my_secret_index,\n n[selection][my_secret_index],\n h[selection][matches],\n len(h[selection][matches])))\n else:\n matches = master.guess(w[selection])\n\n if matches == 6:\n print(\"found\")\n break\n choices = h[selection][matches] & choices\n # print(\"Choices\", choices)\n # print(\"sel, worst cost: {}, matches, remain: {}\".format(\n # minmax, (matches, len(choices))))\n\n return w[selection]", "def potentialSolver5(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def intent_of_text_LnDOR(ChapterTextS, TargetQuestionsD, TestS, StopWords):\n \n # Chapter Text - stokenize\n StokensCT = stokenize(ChapterTextS, StopWords) \n\n # Test question - stokenize\n StokensTest = stokenize(TestS, StopWords)\n\n # Knowledge Base Dict - stokenize\n KBD_structure = stokenizeKBD(TargetQuestionsD, StopWords)\n\n # List (because list is mutable, set is not) of all stokens in document\n StokensDoc = StokensCT[:] # from chapter text\n StokensDoc.extend(StokensTest[:]) # += Test string\n\n # extend list of stokens in Doc\n for i in TargetQuestionsD:\n StokensDoc.extend(TargetQuestionsD[i][\"mq stokens\"][:]) # += KB target [matched Q]s\n StokensDoc.extend(TargetQuestionsD[i][\"ans stokens\"][:]) # += KB answers\n \n StokensTestV = set(StokensTest)\n StokensDocV = set(StokensDoc)\n StokensAntiTgtV = StokensDocV\n \n # Complement of all targets\n for i in TargetQuestionsD:\n StokensAntiTgtV = StokensAntiTgtV.difference(set(TargetQuestionsD[i][\"mq stokens\"]))\n \n # calculate confusion matrix and DOR etc.\n LnDORD = {}\n # Anti Target\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensAntiTgtV, StokensTestV) \n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN) \n \n LnDORD[\"AntiTgt\"] = {'lndor': LnDOR, 'theta': someAngle}\n\n # total occurences\n total_occ = 0\n for i in TargetQuestionsD:\n total_occ += TargetQuestionsD[i]['count']\n\n for i in TargetQuestionsD:\n StokensTgtV = set(TargetQuestionsD[i][\"mq stokens\"][:])\n\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensTgtV, StokensTestV) \n priorOR = TargetQuestionsD[i]['count'] / total_occ\n\n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN, priorOR) \n \n LnDORD[i] = {'lndor': LnDOR, 'theta': someAngle}\n # LnDORD = {i: {'lndor': , 'theta': }}, KB indices + \"AntiTgt\"\n\n return LnDORD", "def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)", "def min_dist_solution(self, words, keyword_dict = None):\r\n\r\n\t\t# default settings\r\n\t\tif keyword_dict == None:\r\n\t\t\tkeyword_dict = self.keyword_dict\r\n\r\n\t\tindexed_text = list(enumerate(words))\r\n\t\t# all found keyword positions\r\n\t\tkeyword_pos = []\r\n\t\tkw_counts = [(len(kw.split()),kw) for kw in keyword_dict]\r\n\t\tkw_length_set = set((l[0] for l in kw_counts))\r\n\t\t\r\n\t\t# seperate keywords by their length\r\n\t\tfor length in kw_length_set:\r\n\t\t\tkw_lgram = ngrams(indexed_text, length)\r\n\t\t\t# start, end, ngram token\r\n\t\t\tkw_lgram_text = [(g[0][0],g[-1][0],' '.join([token[1] for token in g])) \r\n\t\t\t\t\t\t\t for g in kw_lgram]\r\n\t\t\tfixed_length_kw = [kw[1] for kw in kw_counts if kw[0] == length]\r\n\t\t\t\r\n\t\t\tfixed_keyword_pos = [(kw_s,kw_e,token) for kw_s,kw_e,token in kw_lgram_text\r\n\t\t\t \t\t\t\t\t if token in fixed_length_kw]\r\n\t\t\tkeyword_pos += fixed_keyword_pos\r\n\t\t# all found distances\r\n\t\tdistances = []\r\n\t\tfor kw_s,kw_e,kw in keyword_pos:\r\n\t\t\tdistance = keyword_dict[kw]['distance']\r\n\t\t\t# TODO handle case when value we search for is consisted of multiple words\r\n\t\t\tregex_pattern = keyword_dict[kw]['regex']\r\n\t\t\tsearch_direction = keyword_dict[kw]['search_direction']\r\n\t\t\t# start of the block\r\n\t\t\tstart = kw_s - distance if kw_s-distance > 0 else 0\r\n\t\t\t# end of the block\r\n\t\t\tend = kw_e + distance\r\n\t\t\tif search_direction == 'right':\r\n\t\t\t\tsearchable_block = indexed_text[kw_e:end]\r\n\t\t\telif search_direction == 'left':\r\n\t\t\t\tsearchable_block = indexed_text[start:kw_s]\r\n\t\t\telif search_direction == 'both':\r\n\t\t\t\tsearchable_block = indexed_text[start:end]\r\n\t\t\telse:\r\n\t\t\t\t# mb hanlde search_direction value\r\n\t\t\t\tsearchable_block = []\r\n\t\t\t\r\n\t\t\tvalue_pos = [index for index,value in searchable_block\r\n\t\t\t\t\t\t if re.search(regex_pattern,value)]\r\n\t\t\tdistance = [(self.dist(vp,kw_s,kw_e),vp,kw) for vp in value_pos]\r\n\r\n\t\t\tdistances += distance\r\n\t\tif len(distances) == 0:\r\n\t\t\treturn ('not found', None,'no kw')\r\n\t\telse:\r\n\t\t\tmin_distance,found_target_pos,kw = min(distances)\r\n\t\t\treturn words[found_target_pos],found_target_pos,kw", "def getClue(self, invalid_words):\r\n pass", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def solve_nonlinear(self, params, unknowns, resids):\n pass", "def nn_words(table, wordvecs, query, k=10):\n\tkeys = table.keys()\n\tqf = table[query]\n\tscores = numpy.dot(qf, wordvecs.T).flatten()\n\tsorted_args = numpy.argsort(scores)[::-1]\n\twords = [keys[a] for a in sorted_args[:k]]\n\tprint ('QUERY: ' + query)\n\tprint ('NEAREST: ')\n\tfor i, w in enumerate(words):\n\t\tprint (w)", "def solve(self):\r\n words = list(self.words.keys())\r\n words.sort(key= self.get_length,reverse = True)\r\n self.satisfiers = {}\r\n print(\"\\nTrying to populate the grid...\")\r\n for word_id in words:\r\n self.satisfiers[word_id] = {}\r\n for possible_word in self.words[word_id].domain:\r\n result = self.satisfied_constraints(word_id,possible_word)\r\n self.satisfiers[word_id][possible_word] = result\r\n # print(\"\\nword_id: {}, possible_word: {}, result: {}\".format(word_id,possible_word, result))\r\n \r\n final_answers = {}\r\n highest_conf = 0\r\n for word_id in words:\r\n found_words,score = self.evaluate_score(word_id)\r\n # print(\"\\nword_id: {}, found: {}, score: {}\".format(word_id,found_words,score))\r\n for el in found_words.keys():\r\n if el in final_answers.keys():\r\n if found_words[el][1] > final_answers[el][0]:\r\n final_answers[el] = [found_words[el][1],found_words[el][0]]\r\n elif found_words[el][1] == final_answers[el][0] and found_words[el][0] not in final_answers[el]:\r\n final_answers[el].append(found_words[el][0])\r\n else:\r\n final_answers[el] = [found_words[el][1],found_words[el][0]]\r\n if final_answers[el][0] > highest_conf:\r\n highest_conf = final_answers[el][0] \r\n print()\r\n print(final_answers) \r\n \r\n #sort the elements of dictionary so that highest confidence comes first in for loop\r\n final_answers = {k: v for k, v in sorted(final_answers.items(), key=lambda item: item[1][0],reverse=True)}\r\n secondary = dict(final_answers)\r\n #first run that we restrict the confidence to be minimum 50%\r\n for key in final_answers.keys():\r\n if final_answers[key][0] >= self.words[key].length/2:\r\n high_conf = final_answers[key][0] == highest_conf\r\n check, word = self.check_grid(key,final_answers[key][1:],high_conf)\r\n if check:\r\n if word != None:\r\n self.words[key].assign_word(word,self.cells)\r\n print(\"Assigned word for {}: {}\".format(key,word))\r\n secondary.pop(key)\r\n \r\n #secondary run that any confidence value can be assigned \r\n for key in secondary.keys():\r\n if secondary[key][0] > 0:\r\n check, word = self.check_grid(key,secondary[key][1:],False)\r\n if check:\r\n if word != None:\r\n self.words[key].assign_word(word,self.cells)\r\n print(\"Assigned word for {}: {}\".format(key,word))", "def needleman_wunsch1(x,y,lodict=None,gop=-2.5, gep=-1.75, local=False):\n n,m = len(x),len(y)\n dp = np.zeros((n+1,m+1))\n pointers = np.zeros((n+1,m+1),np.int32)\n for i in range(1,n+1):\n dp[i,0] = dp[i-1,0]+(gep if i>1 else gop)\n pointers[i,0]=1\n for j in range(1,m+1):\n dp[0,j] = dp[0,j-1]+(gep if j>1 else gop)\n pointers[0,j]=2\n for i in range(1,n+1):\n for j in range(1,m+1):\n if not lodict:\n if x[i-1] == y[j-1]:\n match = dp[i-1,j-1]+1\n else:\n match = dp[i-1,j-1]-1\n else:\n match = dp[i-1,j-1]+lodict[x[i-1],y[j-1]]\n insert = dp[i-1,j]+(gep if pointers[i-1,j]==1 else gop)\n delet = dp[i,j-1]+(gep if pointers[i,j-1]==2 else gop)\n max_score = max([match,insert,delet])\n dp[i,j] = max_score\n pointers[i,j] = [match,insert,delet].index(max_score)\n alg = []\n i,j = n,m\n while(i>0 or j>0):\n pt = pointers[i,j]\n if pt==0:\n i-=1\n j-=1\n alg = [[x[i],y[j]]]+alg\n if pt==1:\n i-=1\n alg = [[x[i],'-']]+alg\n if pt==2:\n j-=1\n alg = [['-',y[j]]]+alg\n return dp[-1,-1], alg", "def needleman_wunsch(x, y, lodict={}, gop=-2.5, gep=-1.75, local=False, indel=''):\n n, m = len(x), len(y)\n dp = np.zeros((n + 1, m + 1))\n pointers = np.zeros((n + 1, m + 1), np.int32)\n if not local:\n for i1, c1 in enumerate(x):\n if gop is None:\n dp[i1 + 1, 0] = lodict.get((c1, indel), gep)\n else:\n dp[i1 + 1, 0] = dp[i1, 0]+(gep if i1 + 1 > 1 else gop)\n pointers[i1 + 1, 0] = 1\n for i2, c2 in enumerate(y):\n if gop is None:\n dp[0, i2 + 1] = lodict.get((indel, c2), gep)\n else:\n dp[0, i2 + 1] = dp[0, i2]+(gep if i2 + 1 > 1 else gop)\n pointers[0, i2 + 1] = 2\n for i1, c1 in enumerate(x):\n for i2, c2 in enumerate(y):\n match = dp[i1, i2] + lodict.get(\n (c1, c2),\n 1 if c1 == c2 else -1)\n insert = dp[i1, i2 + 1] + (\n lodict.get((c1, indel), gep) if gop is None else\n gep if pointers[i1, i2 + 1] == 1 else gop)\n delet = dp[i1 + 1, i2] + (\n lodict.get((indel, c2), gep) if gop is None else\n gep if pointers[i1 + 1, i2] == 2 else gop)\n pointers[i1 + 1, i2 + 1] = p = np.argmax([match, insert, delet])\n max_score = [match, insert, delet][p]\n if local and max_score < 0:\n max_score = 0\n dp[i1 + 1, i2 + 1] = max_score\n alg = []\n if local:\n i, j = np.unravel_index(dp.argmax(), dp.shape)\n else:\n i, j = n, m\n score = dp[i, j]\n while (i > 0 or j > 0):\n pt = pointers[i, j]\n if pt == 0:\n i -= 1\n j -= 1\n alg = [(x[i], y[j])] + alg\n if pt == 1:\n i -= 1\n alg = [(x[i], indel)] + alg\n if pt == 2:\n j -= 1\n alg = [(indel, y[j])] + alg\n if local and dp[i, j] == 0:\n break\n return score, alg", "def nn(model, text, vectors, query, k=5):\n\tqf = encode(model, [query])\n\tqf /= norm(qf)\n\tscores = numpy.dot(qf, vectors.T).flatten()\n\tsorted_args = numpy.argsort(scores)[::-1]\n\tsentences = [text[a] for a in sorted_args[:k]]\n\tprint ('QUERY: ' + query)\n\tprint ('NEAREST: ')\n\tfor i, s in enumerate(sentences):\n\t\tprint (s, sorted_args[i])" ]
[ "0.62825", "0.621615", "0.59519327", "0.59129626", "0.59106004", "0.5901776", "0.58596104", "0.5811822", "0.5787408", "0.5749001", "0.5749001", "0.5701069", "0.5651413", "0.56111234", "0.56057537", "0.5567448", "0.5547416", "0.55322987", "0.552673", "0.552673", "0.55166656", "0.55155975", "0.55121386", "0.54930264", "0.54930264", "0.54819703", "0.54586816", "0.5450155", "0.5415526", "0.5412067" ]
0.6624858
0
Given a parse tree with 2 parts, with a possible EQU part in the middle, finds the synonym part and the other part
def _get_parts_ignore_EQU(parse_tree): first_part = parse_tree[0] second_part = parse_tree[1] if second_part.label() == 'EQU': second_part = parse_tree[2] if first_part.label() == 'SYN': syn = first_part other = second_part else: syn = second_part other = first_part return other, syn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _solve_double_synonym(parse_tree, solution_format=None):\n def _get_value(solution_list, word):\n # Gets the value of a word in a solution list\n for solution in solution_list:\n if solution[0] == word:\n return solution[1]\n\n return 0\n\n # Get the two synonym parts\n first_syn, second_syn = _get_parts_ignore_EQU(parse_tree)\n first_syn = _create_sentence(first_syn)\n second_syn = _create_sentence(second_syn)\n\n # Get top 1000 matching scores for each part of the clue\n if solution_format is not None:\n first_solutions = SimilaritySolver.solve(first_syn, length=solution_format.get_total_length(spaces=True))\n second_solutions = SimilaritySolver.solve(second_syn, length=solution_format.get_total_length(spaces=True))\n second_words = [word for word, _ in second_solutions if solution_format.check(word)]\n else:\n first_solutions = SimilaritySolver.solve(first_syn)\n second_solutions = SimilaritySolver.solve(second_syn)\n second_words = [word for word, _ in second_solutions]\n\n # Combine both lists, with the value being the product of the value for eac syn part\n solutions = [(solution[0], _get_value(second_solutions, solution[0]) * solution[1]) for\n solution in first_solutions if solution[0] in second_words]\n\n return solutions", "def test_refersto_multi_word_no_quotes_no_index(self):\n inv_search = 'refersto:\"s parke\"'\n spi_search = 'find refersto s parke'\n self._compare_searches(inv_search, spi_search)", "def get_synonyms(word):\n syns_sets = wordnet.synsets(word)\n if syns_sets:\n # if there's synonyms, take the first set\n desired = syns_sets[0].lemma_names()\n desired = [the_name.replace(\"_\", \" \") for the_name in desired]\n return desired\n\n else:\n return False", "def extract_abbreviation_synonyms(self, corpus, use_gold, use_pred):\n assert not (use_gold and use_pred), \"No support for both\"\n entities = corpus.entities() if use_gold else corpus.predicted_entities()\n\n for entity in entities:\n prev2 = entity.prev_tokens(entity.sentence, 2)\n next1 = entity.next_tokens(entity.sentence, 1)\n in_parenthesis = len(prev2) == 2 and prev2[-1].word == \"(\" and len(next1) == 1 and next1[0].word == \")\"\n\n if (in_parenthesis):\n prev_entity = prev2[0].get_entity(entity.part, use_gold, use_pred)\n\n if prev_entity is not None and prev_entity.class_id == entity.class_id:\n # We could combine features already -- Yet, give more freedom to final clients to use the synonym's features or not\n # merged_binary_features = {key: (b1 or b2) for ((key, b1), (_, b2)) in zip(prev_entity.features.items(), entity.features.items())}\n\n prev_entity.features['synonym'] = entity\n entity.features['synonym'] = prev_entity", "def get_synonym(name: str) -> str:\n return s2n.get_name(n2s.get_smiles(name))", "def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)", "def synonyms_wiktionary(name, lang=\"fr\"):\n import wptools\n page = wptools.page(name, wiki='{0}.wiktionary.org'.format(\n lang), lang=lang, silent=True)\n page.get_parse()\n text = page.data['wikitext']\n syn = \"==== {{S|synonymes}} ====\"\n if syn not in text:\n return None\n text = text.split(syn)[1].split(\"====\")[0]\n reg = re.compile(\"[[]{2}(.*?)[]]{2}\")\n res = reg.findall(text)\n return res", "def parse_special_word(s):\n index1 = s.find(special_word_marker)\n if index1 != -1:\n index2 = s.find(special_word_marker, index1 + 1)\n if index2 != -1:\n sw = normalize(s[index1+len(special_word_marker) : index2])\n rest = normalize(s[index2+len(special_word_marker) :])\n return sw, rest\n return None, s", "def get_synonyms(word,w2v,pos,max=20):\n synonyms = []\n count = 0\n synsets = wordnet.synsets(word,pos=pos)\n for synset in synsets:\n candidate_names = []\n for lemma in synset.lemma_names():\n candidate_names.append(lemma)\n for hypo in synset.hyponyms():\n candidate_names.append(hypo)\n for hyper in synset.hypernyms():\n candidate_names.append(hyper)\n\n for lemma in candidate_names:\n if count >= max:\n break\n # print pos,word,lemma\n try:\n similarity = w2v.n_similarity([word.lower()],[lemma.lower() ])\n if similarity > .34 and lemma not in synonyms:\n synonyms.append(lemma)\n\n count += 1\n except:\n continue\n\n return synonyms", "def get_synonyms(word):\n try:\n query = {'word': word}\n cursor = database['Words'].find(query)\n synonym_set = set()\n if cursor is None:\n return None\n for document in cursor:\n if len(document['synsets']) > 0:\n for key, synset in document['synsets'].items():\n synonyms = synset['synonyms'].split(\",\")\n for synonym in synonyms:\n synonym_set.add(synonym.strip())\n if len(synonym_set) == 0:\n return None\n return synonym_set\n except Exception as e:\n print(e)\n return None", "def match_taxon(name):\n taxid = None\n scientific_name = None\n automatic_replacement = False\n iname = name.strip().lower().replace(' ', ' ')\n\n if name:\n # 1. 'name' matches the scientific name of a virus:\n if name in scientific_names:\n taxid = scientific_names[name]\n scientific_name = name\n # 2. 'name' is a close case-insensitive match for a virus:\n elif iname in lowercase_names:\n taxid = lowercase_names[iname]\n scientific_name = taxid_names[taxid]\n automatic_replacement = True\n # 3. 'name' is the exact synonym of some taxon\n elif name in synonyms:\n taxid = synonyms[name]\n scientific_name = taxid_names[taxid]\n # 4. 'name' is a substring of exactly one scientific name:\n else:\n matches = []\n for scientific_name in scientific_names.keys():\n if name in scientific_name:\n matches.append(scientific_name)\n if len(matches) > 1:\n break\n if len(matches) == 1:\n scientific_name = matches[0]\n taxid = scientific_names[scientific_name]\n\n return (name, taxid, scientific_name, automatic_replacement)", "def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)", "def _match_short_names(self, token_set_one, token_set_two):\n copy_set_one = token_set_one.copy()\n copy_set_two = token_set_two.copy()\n matching_dict = {}\n\n\n for token in token_set_one:\n res = self.dotted_name_re.search(token)\n if res:\n initials = res.group('name')\n for other_token in token_set_two:\n if other_token.startswith(initials):\n copy_set_one.remove(token)\n try:\n copy_set_two.remove(other_token)\n except KeyError:\n continue\n matching_dict[token] = other_token\n break\n else:\n return False, None, None, None\n\n return True, copy_set_one, copy_set_two, matching_dict", "def get_synonyms(word):\n synsets = [];\n syns = wn.synsets(word)\n for ss in syns:\n lemmas = []\n for l in ss.lemmas():\n lemma = { \"name\": l.name(), \"related_forms\": [] }\n for x in l.derivationally_related_forms():\n lemma['related_forms'].append(x.name())\n lemmas.append(lemma)\n synsets.append({\n \"lemmas\": lemmas,\n \"d\": ss.definition(),\n \"pos\": ss.pos(),\n \"id\": ss.name()\n })\n return synsets", "def _flow_terms(flow):\n return flow.synonyms", "def test_sqpp_nested_U1_or_SL2(self):\n self.assertEqual(self.parser.parse_query('(U(1) or SL(2,Z))'),\n ['+', 'u(1) | sl(2,z)'])", "def get_ligand_resname_from_topology( topfile ):\n\n file = open( topfile, 'r')\n text = file.readlines()\n file.close()\n indices = extract_section(text, 'atoms')\n for linenr in indices:\n line, comments = stripcomments(text[linenr])\n elements = line.split()\n if len(elements) > 5:\n resname = elements[3]\n return resname", "def weed_out_synonyms(word, potential_synonyms):\n real_synonyms = set()\n for synonym in potential_synonyms:\n max_distance = abs(len(word) - len(synonym))\n abbr_len = min(len(word), len(synonym))\n forgiveness = round(1/7 * abbr_len)\n if lev.distance(word, synonym) <= max_distance + forgiveness:\n # Then it's a synonym!\n real_synonyms.add(synonym)\n return real_synonyms", "def _get_entity1(span):\n for word in span:\n if word.head is word: # main verb\n for child in word.children:\n if child.dep_.endswith(\"nsubj\"):\n return child\n break\n return None", "def test_syntax_converter_expand_search_patterns_conjoined(self):\n spi_search = \"find t bob and sam\"\n inv_search = \"title:bob and title:sam\"\n self._compare_searches(inv_search, spi_search)", "def test_synonym(self): \n pass", "def _find_alias(line, aliases):\n for alias in aliases:\n if _soft_in(line, alias):\n return _soft_idx(line, alias)", "def find_synonymous(input_file, work_dir):\n # create the .ctl file\n ctl_file = op.join(work_dir, \"yn-input.ctl\")\n output_file = op.join(work_dir, \"nuc-subs.yn\")\n ctl_h = open(ctl_file, \"w\")\n ctl_h.write(\"seqfile = %s\\noutfile = %s\\nverbose = 0\\n\" %\n (input_file, output_file))\n ctl_h.write(\"icode = 0\\nweighting = 0\\ncommonf3x4 = 0\\n\")\n ctl_h.close()\n\n cl = YnCommandline(ctl_file)\n print >>sys.stderr, \"\\tyn00:\", cl\n r, e = cl.run()\n ds_value_yn = None\n ds_value_ng = None\n dn_value_yn = None\n dn_value_ng = None\n\n # Nei-Gojobori\n output_h = open(output_file)\n row = output_h.readline()\n while row:\n if row.find(\"Nei & Gojobori\") >=0:\n for x in xrange(5):\n row = output_h.next()\n dn_value_ng, ds_value_ng = row.split('(')[1].split(')')[0].split()\n break\n row = output_h.readline()\n output_h.close()\n\n # Yang\n output_h = open(output_file)\n for line in output_h:\n if line.find(\"+-\") >= 0 and line.find(\"dS\") == -1:\n parts = line.split(\" +-\")\n ds_value_yn = extract_subs_value(parts[1])\n dn_value_yn = extract_subs_value(parts[0])\n\n if ds_value_yn is None or ds_value_ng is None:\n h = open(output_file)\n print >>sys.stderr, \"yn00 didn't work: \\n%s\" % h.read()\n\n return ds_value_yn, dn_value_yn, ds_value_ng, dn_value_ng", "def find_hypernyms(self, syns):\n names = set()\n # Find hypernyms of each syn\n for syn in syns:\n hypernyms = syn.hypernyms()\n # find hypernyms one more level up\n for hypernym in hypernyms:\n names.add(hypernym.name())\n hypernyms_second = hypernym.hypernyms()\n for h in hypernyms_second:\n names.add(h.name())\n return names", "def parse_prefix(s):\n\n Term.str = replace_string(s) # replace operators with more than one letter to be one letter\n second = None\n\n # if there is a left parentheses it means that we are having an operator that is enclosed by parenthesis\n if is_left_parenthese(Term.str[0]):\n Term.eat() # eat left parentheses\n first, Term.str = Formula.parse_prefix(Term.str) # take first formula of the operator\n root = switch_root_to_str(Term.str[0]) # take the root\n Term.eat() # eat the root\n second, Term.str = Formula.parse_prefix(Term.str) # take second formula of the operator\n Term.eat() # eat right parentheses\n\n # if first letter is a quantifier ('A' or 'E')\n elif is_quantifier(Term.str[0]):\n root = Term.str[0] # take the quantifier as root\n Term.eat() # eat the root ( quantifier)\n first = Term.get_whole_name() # take the name of the variable\n Term.eat() # eat the left bracket\n second, Term.str = Formula.parse_prefix(Term.str) # take the formula\n Term.eat() # eat the right bracket\n\n # if first letter is a relation (starts with capital letter)\n elif is_relation(Term.str[0]):\n root = Term.get_whole_name() # take the name of the relation\n first = []\n Term.eat() # eat left parentheses\n\n # if we didn't find closing parenthesis - than there must be at least one Term inside the parenthesis.\n # take it.\n if not is_right_parenthese(Term.str[0]):\n term_obj, Term.str = Term.parse_prefix(Term.str)\n first.append(term_obj)\n\n # while there is a comma, take the next term\n while is_comma(Term.str[0]):\n Term.eat() # eat left parentheses\n term_obj, Term.str = Term.parse_prefix(Term.str)\n first.append(term_obj)\n Term.eat() # eat right parentheses\n\n # else , it is an operator\n else:\n\n # if it's an unary operator\n if is_unary(Term.str[0]):\n root = Term.str[0]\n Term.eat()\n first, Term.str = Formula.parse_prefix(Term.str)\n\n # else , the operator is binary or equaluty\n else:\n first, Term.str = Term.parse_prefix(Term.str)\n # if it's a binary operator\n if is_binary(Term.str[0]):\n root = Term.str[0:2]\n Term.eat()\n\n # if it's an equal operator\n else:\n root = Term.str[0]\n Term.eat()\n second, Term.str = Term.parse_prefix(Term.str)\n returned_formula = Formula(root, first, second)\n return returned_formula, Term.str", "def select_syninfo(self, cellname, srctype, syntype):\n idx = np.char.startswith(self.syntab['dest'], cellname+'/') & \\\n np.char.startswith(self.syntab['source'], srctype) & \\\n np.char.startswith(self.syntab['type'], syntype)\n return self.syntab[idx]", "def test_spires_syntax_trailing_colon(self):\n spi_search = \"find a watanabe:\"\n invenio_search = \"author:watanabe:\"\n self._compare_searches(invenio_search, spi_search)", "def GetSynonymTerm(con, cur, synonym):\n err, termid = GetSynonymTermId(con, cur, synonym)\n if err:\n debug(2, 'ontology term %s is not a synonym' % synonym)\n return err, str(termid)\n err, term = dbidval.GetDescriptionFromId(con, cur, 'ontologyTable', termid)\n if err:\n debug(3, 'ontology term not found for termid %d (synonym %s)' % (termid, synonym))\n return err, term\n return '', term", "def nsrSynonyms():\r\n # Input file\r\n synonymsFile = pd.read_csv(args.indir+\"/\"+args.infile2, header=2,\r\n sep=\"\\t\", encoding=\"utf8\")\r\n\r\n # Parse taxonomic names into their elementary components\r\n synonyms = synonymsFile.loc[synonymsFile['language'] == 'Scientific']\r\n synonymDict = {}\r\n for synonym, taxon in zip(synonyms['synonym'], synonyms['taxon']):\r\n synonym = taxonParser(synonym)\r\n taxon = taxonParser(taxon)\r\n if not taxon or synonym is False or taxon is False:\r\n pass\r\n else:\r\n synonymDict[synonym] = taxon\r\n\r\n # Write dictionary to file\r\n with io.open(par_path+\"/results/nsr_synonyms.csv\", \"w\", encoding=\"utf-8\") as outfile:\r\n outfile.write(\"synonym_name,identification_reference,taxon_name,taxon_author\\n\")\r\n for key, value in synonymDict.items():\r\n outfile.write('\"%s\",\"%s\",\"%s\",\"%s\"' % (' '.join(str(key).split()[:2]), ' '.join(str(key).split()[2:]),\r\n ' '.join(str(value).split()[:2]), ' '.join(str(value).split()[2:])))\r\n outfile.write(\"\\n\")\r\n return [*synonymDict], synonymDict", "def meronym(self, sense=0):\n s = self._synset(self.text, sense=sense)\n\n if not s:\n return []\n\n return s.member_meronyms()" ]
[ "0.6501454", "0.59847134", "0.57638747", "0.56675124", "0.56613374", "0.54569155", "0.54319113", "0.5426494", "0.539753", "0.5339401", "0.53225523", "0.5321102", "0.5310823", "0.5307761", "0.52566755", "0.5253671", "0.5250814", "0.5199034", "0.51778674", "0.5174493", "0.5171042", "0.5166936", "0.51542944", "0.5154032", "0.5118358", "0.51022786", "0.50980407", "0.509162", "0.5046906", "0.5034817" ]
0.6401357
1
Parses the abbreviation file to find all possible abbreviations, then calls helper function to update the tree
def _handle_abbreviations(parse_tree): path = os.path.join(GrammarDefinitions.FOLDER, GrammarDefinitions.ABBREVIATION_FILE) with open(path, "r") as f: lines = f.read().splitlines() abbr_dict = {line.split(GrammarDefinitions.ABBR_SEP)[0]: line.split(GrammarDefinitions.ABBR_SEP)[1] for line in lines} _replace_abbreviation(parse_tree, abbr_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _replace_abbreviation(parse_tree, abbr_dict):\n if not isinstance(parse_tree, nltk.Tree):\n # Reached a leaf\n return\n\n if parse_tree.label() == 'ABBR':\n # Replace word with its abbreviation\n word = parse_tree[0]\n parse_tree.set_label('WORD')\n parse_tree[0] = abbr_dict[word]\n return\n\n for son in parse_tree:\n # Recursive call\n _replace_abbreviation(son, abbr_dict)", "def read_abbrevs(): \n abbrevs = {}\n with open('abbrev.txt','r') as fhx:\n for line in fhx:\n line = line.decode(\"utf-8\").upper()\n abbrevs[line.split('=')[0]] = line.split('=')[1].rstrip() + \" \"\n \n return abbrevs", "def test_abbreviate_all():\n statement = \"ENDPROC\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"ENDP.\"\n statement = \"POSITION\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"POS.\"", "def parse_abbreviation(text):\n rv = {}\n match = ABBREVIATION.findall(text)\n for m in match:\n line = m[0]\n key = m[1]\n value = m[2]\n # print key + \", \" + value\n text = re.sub(re.escape(line), '', text)\n text = re.sub('(?<=\\s|:|\\()' + re.escape(key) + '(?=\\s|.|\\))',\n '<abbr title=\"' + value + '\" >' + key + '</abbr>', text)\n return text", "def test_abbreviate_miss():\n statement = \"PEEK(1234)\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"PEEK(1234)\"\n statement = \"QUIT\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"QUIT\"\n statement = \"ENDPRO\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"ENDPRO\"\n statement = \"POSITIOM\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"POSITIOM\"", "def test_load_abbreviations() -> None:\n config.utils.journal_abbreviations = [(\"Test Journal\", \"Test J.\")]\n JournalAbbreviations.load_abbreviations()\n assert JournalAbbreviations._abbreviations == {\"Test Journal\": \"Test J.\"}\n assert JournalAbbreviations._fullwords == {\"Test J.\": \"Test Journal\", \"Test J\": \"Test Journal\"}", "def test_abbreviate_partial():\n statement = \"ENDPROC A\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"ENDP. A\"\n statement = \"POSITION10,5\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"POS.10,5\"", "def _parse_abbreviation(self, game_data):\n name = game_data('td[data-stat=\"opp\"]:first')\n name = re.sub(r'.*/teams/', '', str(name))\n name = re.sub('/.*', '', name).upper()\n setattr(self, '_opponent_abbr', name)", "def expand_abbreviation(abbr, doc_type = 'html', profile_name = 'plain'):\n\ttree = parse_into_tree(abbr, doc_type)\n\tif tree:\n\t\treturn replace_variables(re.sub('\\|', insertion_point, tree.to_string(profile_name) or ''))\n\t\t\n\treturn ''", "def parse_into_tree(abbr, doc_type='html'):\n\troot = Tag('', 1, doc_type)\n\ttoken = re.compile(r'([\\+>])?([a-z@\\!][a-z0-9:\\-]*)(#[\\w\\-\\$]+)?((?:\\.[\\w\\-\\$]+)*)(\\*(\\d*))?(\\+$)?', re.IGNORECASE)\n\t\n\tif not abbr:\n\t\treturn None\n\t\n\tdef expando_replace(m):\n\t\tex = m.group(0)\n\t\ta = get_abbreviation(doc_type, ex)\n\t\treturn a and a.value or ex\n\t\t\n\tdef token_expander(operator, tag_name, id_attr, class_name, has_multiplier, multiplier, has_expando):\n\t\t\n\t\tmultiply_by_lines = (has_multiplier and not multiplier)\n\t\tmultiplier = multiplier and int(multiplier) or 1\n\t\t\n\t\tif has_expando:\n\t\t\ttag_name += '+'\n\t\t\n\t\tcurrent = is_snippet(tag_name, doc_type) and Snippet(tag_name, multiplier, doc_type) or Tag(tag_name, multiplier, doc_type)\n\t\t\n\t\tif id_attr:\n\t\t\tcurrent.add_attribute('id', id_attr[1:])\n\t\tif class_name:\n\t\t\tcurrent.add_attribute('class', class_name[1:].replace('.', ' '))\n\t\t\t\n\t\t# dive into tree\n\t\tif operator == '>' and token_expander.last:\n\t\t\ttoken_expander.parent = token_expander.last;\n\t\t\t\n\t\ttoken_expander.parent.add_child(current)\n\t\ttoken_expander.last = current\n\t\t\n\t\tif multiply_by_lines:\n\t\t\troot.multiply_elem = current\n\t\t\n\t\treturn ''\n\t\t\n\t# replace expandos\n\tabbr = re.sub(r'([a-z][a-z0-9]*)\\+$', expando_replace, abbr)\n\t\n\ttoken_expander.parent = root\n\ttoken_expander.last = None\n\t\n\t\n#\tabbr = re.sub(token, lambda m: token_expander(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5), m.group(6), m.group(7)), abbr)\n\t# Issue from Einar Egilsson\n\tabbr = token.sub(lambda m: token_expander(m.group(1), m.group(2), m.group(3), m.group(4), m.group(5), m.group(6), m.group(7)), abbr)\n\t\n\troot.last = token_expander.last\n\t\n\t# empty 'abbr' variable means that abbreviation was expanded successfully, \n\t# non-empty variable means there was a syntax error\n\treturn not abbr and root or None;", "def build_match_tree(abbreviation_list):\n match_tree = {}\n for word, abbreviation in abbreviation_list:\n tree_node = match_tree\n for letter in word[:-1]:\n if letter not in tree_node:\n tree_node[letter] = {}\n tree_node = tree_node[letter]\n tree_node[word[-1]] = abbreviation\n return match_tree", "def abbreviate(match_tree, statement):\n\n result = statement\n current_node = match_tree\n for position, letter in enumerate(statement.upper()):\n current_node = current_node.get(letter)\n if not isinstance(current_node, dict):\n if isinstance(current_node, str):\n result = current_node + statement[(position + 1):]\n break\n return result", "def expand_abbrevs(name):\n key = name.upper()\n for abbrev, word in ABBREVS.iteritems():\n key = re.sub(abbrev, word, key)\n \n #Remove (.*) from the street name\n key = re.sub(r'\\(.*?(:?\\)|$)', '', key)\n \n #Unify names\n key = NUMBER_IN_NAMES_REGEX.sub(lambda i: i.group(1) + \" \", key)\n key = re.sub(u\"Ё\", u\"Е\", key)\n key = re.sub(u\"[\\\"'«»№]\", u\" \", key)\n\n # remove \"им\" prefix\n key = re.sub(ur'[^\\s]ИМ[\\.\\s]+', u' ', key)\n\n #Change name parts order\n words = key.split(r\" \")\n words.sort()\n key = \" \".join(words)\n\n key = re.sub(u\"\\s+\", u\" \", key).strip()\n\n logging.debug(\"Street name %s was converted to %s\" % (name, key))\n \n return key", "def deabbreviate(self, st):\n\t\tabbrs = {'gws': 'greater western sydney giants',\n\t\t\t\t 'gwsg': 'greater western sydney giants',\n\t\t\t\t 'afl': 'australian football league',\n\t\t\t\t 'nrc': 'national rugby championship',\n\t\t\t\t 'nrl': 'national rugby league',\n\t\t\t\t 'syd': 'sydney',\n\t\t\t\t 'mel': 'melbourne',\n\t\t\t\t 'melb': 'melbourne',\n\t\t\t\t 'bris': 'brisbane',\n\t\t\t\t 'brisb': 'brisbane',\n\t\t\t\t 'gc': 'gold coast',\n\t\t\t\t 'adel': 'adelaide',\n\t\t\t\t 'canb': 'canberra',\n\t\t\t\t 'mt': 'mount',\n\t\t\t\t 'utd': 'united',\n\t\t\t\t 'cty': 'city',\n\t\t\t\t 'football club': 'fc',\n\t\t\t\t 'snr': 'senior',\n\t\t\t\t 'jr': 'junion',\n\t\t\t\t 'nsw': 'new south wales' ,\n\t\t\t\t 'vic': 'victoria',\n\t\t\t\t 'tas' : 'tasmania',\n\t\t\t\t 'sa': 'south australia',\n\t\t\t\t 'wa': 'western australia',\n\t\t\t\t 'act': 'australian capital territory',\n\t\t\t\t 'nt': 'northern territory',\n\t\t\t\t 'qld': 'queensland',\n\t\t\t\t 'champs': 'championships', \n\t\t\t\t 'champ': 'championship', \n\t\t\t\t 'soc': 'society',\n\t\t\t\t 'ent': 'entertainment',\n\t\t\t\t 'intl': 'international', \n\t\t\t\t 'int': 'international', \n\t\t\t\t 'aust': 'australian'}\n\n\t\t# first replace full state names by abbreviations;\n\t\tfor ab in abbrs:\n\t\t\tst = re.sub(r'\\b' + ab + r'\\b', abbrs[ab], st)\n\n\t\treturn st", "def map2mw_Aug(d,k1,entry):\n L = entry.metad['L']\n if L in ['7201','7202']: # 7203 relates to 'hay'\n return 'hA'\n if k1 in map2mw_special_Aug:\n return map2mw_special_Aug[k1]\n regexes = [\n u'<ab>aug.</ab> de {%(.*?)%}',\n u'<ab>aug.</ab> {%(.*?)%}',\n u'<ab>aug.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'", "def parse_file():\r\n # Open the text file as read only\r\n file = open(\"formulas.txt\", \"r\")\r\n\r\n # Iterate through each line in the file\r\n for formula in file:\r\n # Create a new tree based on the formula\r\n tree = parse_formula(formula.rstrip())\r\n # Formatting\r\n print(\"Formula: {}\".format(formula.rstrip()))\r\n print(\"Tree:\")\r\n tree.display()\r\n print(\"-----------------------------\")", "def test_abbreviate_nothing():\n statement = \"\"\n assert abbreviate(build_match_tree(ABBREVIATIONS), statement) == \"\"", "def main():\n # call open_file() to get file pointer \n fd = open_file()\n # call fill completion to get dict, then close the openned file\n full_set = create_dict(fd)\n wrds = find_words(full_set)\n print(wrds)\n fd.close()\n # ask for a prefix in while loop", "def _parse_abbreviation(self, cell_content):\n span = cell_content.find(\"span\")\n full = span.attrs[\"title\"].strip()\n abbrv = span.text.strip()\n return abbrv, full", "def parseBA(fd):\n aut = dict()\n first_line = fd.readline().strip()\n aut[\"initial\"] = [first_line]\n aut[\"transitions\"] = []\n aut[\"final\"] = []\n\n while True:\n line = fd.readline()\n if not line:\n return aut\n\n line = line.strip()\n if line == \"\":\n continue\n\n match = re.match(r'^(?P<state>[^-,>]+)$', line)\n if match:\n aut[\"final\"].append(match.group(\"state\"))\n continue\n\n match = re.match(r'^(?P<symb>[^-,>]+),(?P<src>[^-,>]+)->(?P<tgt>[^-,>]+)$',\n line)\n if match:\n symb = match.group(\"symb\")\n src = match.group(\"src\")\n tgt = match.group(\"tgt\")\n aut[\"transitions\"].append((src, symb, tgt))\n continue\n\n raise Exception(\"Invalid format: \" + line)", "def __init__(self, dictionary):\n self.abbrev_dict = {}\n for s in dictionary:\n if len(s) < 3:\n abbrev = s\n else:\n abbrev = s[0] + str(len(s) - 2) + s[-1]\n \n if abbrev not in self.abbrev_dict:\n self.abbrev_dict[abbrev] = set()\n self.abbrev_dict[abbrev].add(s)", "def parse(self, file):\n # The root tree\n tree = Tree()\n # Dictionary of subtrees that are created\n # The key is the name and the value is the corresponding TreeElement\n subtrees = dict()\n\n current_subtree = tree\n current_tree_element = None\n next_is_start = False\n next_is_comment = False\n comment = False\n last_indent = 0\n lnr = 0\n with open(file, 'r') as bfile:\n for line in bfile:\n lnr += 1\n comment = next_is_comment\n\n line = re.sub(r'//\\*\\*.*?\\*\\*//', '', line) # Block comments starting and ending in the same line\n\n if '**//' in line:\n # Block comments ending in this line\n # This line as well as the following will contain valid code\n next_is_comment = False\n comment = False\n line = re.sub(r'.*\\*\\*//', '', line)\n if '//**' in line:\n # Block comments starting in this line\n # This line may contain valid code, the next ones won't\n next_is_comment = True\n line = re.sub(r'//\\*\\*.*', '', line)\n\n line = re.sub(r'//.*', '', line) # Line comments\n\n line = line.rstrip()\n if not line:\n continue\n\n if not comment:\n indent = len(line) - len(line.lstrip())\n if indent % 4 != 0:\n raise ParseError('Error parsing line {}: Indent is not a multiple of 4'.format(lnr))\n\n line_content = line.lstrip()\n\n if indent == 0 and line_content.startswith('-->'):\n # This is the declaration of the start. Next line contains root element\n next_is_start = True\n current_subtree = tree\n last_indent = indent\n continue\n\n if next_is_start:\n # This line contains the root element of the main tree\n next_is_start = False\n element = self.create_tree_element(line_content, current_tree_element)\n tree.set_root_element(element)\n current_tree_element = element\n\n if indent == 0 and line_content.startswith('#'):\n # This is the declaration of a new subtree\n current_subtree = Tree()\n subtrees[line_content[1:]] = current_subtree\n current_tree_element = None\n last_indent = indent\n continue\n\n if indent < last_indent:\n # Go layers up, depending on indent difference\n for _ in range(indent, last_indent, 4):\n current_tree_element = current_tree_element.parent\n\n if re.search(r'\\s*-?->\\s*', line_content):\n # Arrow in line, split in decision result and call\n result, call = re.split(r'\\s*-?->\\s*', line_content, 1)\n\n if call.startswith('#'):\n # A subtree is called here.\n subtree_name = call.strip('#')\n if subtree_name not in subtrees:\n raise AssertionError('Error parsing line {}: {} not defined'.format(lnr, call))\n # The root element of the subtree should be placed in this tree position\n if current_tree_element is None:\n # The current subtree is empty, set the subtree as its root element\n current_subtree.set_root_element(subtrees[subtree_name].root_element)\n else:\n # Append this subtree in the current position\n current_tree_element.add_child_element(copy.copy(subtrees[subtree_name].root_element), result)\n\n elif re.search(r'\\s*,\\s*', call):\n # A sequence element\n actions = re.split(r'\\s*,\\s*', call)\n element = self.create_sequence_element(actions, current_tree_element)\n current_tree_element.add_child_element(element, result)\n\n elif call.startswith('@'):\n # An action is called\n element = self.create_tree_element(call, current_tree_element)\n current_tree_element.add_child_element(element, result)\n\n elif call.startswith('$'):\n # A decision is called\n element = self.create_tree_element(call, current_tree_element)\n current_tree_element.add_child_element(element, result)\n current_tree_element = element\n\n else:\n raise ParseError('Error parsing line {}: Element {} is neither an action nor a decision'.format(lnr, call))\n\n else:\n # No arrow, must be the beginning of a new subtree\n element = self.create_tree_element(line_content, current_tree_element)\n current_subtree.set_root_element(element)\n current_tree_element = element\n\n last_indent = indent\n return tree", "def combine_state_names_and_abbreviations():\n lst=[]\n for k,v in us_state_abbrev.items():\n lst.append(v)\n lst = sorted(lst[:10])\n state = sorted(states)\n print(lst+state[-10:])\n return", "def map2mw_Des(d,k1,entry):\n if k1 in map2mw_special_Des:\n return map2mw_special_Des[k1]\n regexes = [\n u'<ab>dés.</ab> de {%(.*?)%}',\n u'<ab>dés.</ab> {%(.*?)%}',\n u'<ab>dés.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'", "def test_abbreviation(self):\n self.assertEqual(self.compound.abbreviation, \"Cool\")", "def map2mw_F(d,k1,entry):\n if k1 in map2mw_special_F:\n return map2mw_special_F[k1]\n regexes = [\n u'<ab>f2.</ab> de {%(.*?)%}',\n u'<ab>f2.</ab> {%(.*?)%}',\n #u'<ab>f2.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'", "def convert_abbrev_in_text(text):\r\n tokens = word_tokenize(text)\r\n tokens = [convert_abbrev(word) for word in tokens]\r\n text = ' '.join(tokens)\r\n return text", "def audit_abbr(over_abbreviated, street_name):\n m = over_abbr_re.search(street_name)\n if m:\n abbr = m.group()\n over_abbreviated[abbr].add(street_name)", "def load_data() -> list:\n # trans_dict is used for changing the given names into standardized names.\n trans_dict = {\"chr1\": \"1\", \"chr2\": \"2\", \"chr3\": \"3\", \"chr4\": \"4\", \"chr5\": \"5\", \"chr6\": \"6\", \"chr7\": \"7\",\n \"chr8\": \"8\", \"chr9\": \"9\", \"chr10\": \"10\", \"chr11\": \"11\", \"chr12\": \"12\", \"chr13\": \"13\", \"chr14\": \"14\",\n \"chr15\": \"15\", \"chr16\": \"16\", \"chr17\": \"17\", \"chr18\": \"18\", \"chr19\": \"19\", \"chrx\": \"x\", \"chry\": \"y\"}\n # This try statement catches user error.\n try:\n with open(sys.argv[1]) as bed_file, open(sys.argv[2]) as fasta_file:\n fasta_records = []\n # Opens the bed file and splits into lists\n bed_file = list(csv.reader(bed_file, delimiter='\\t'))\n # Changes the names of the chromosomes in bed file, does some light rearranging and formatting.\n bed_file = [[trans_dict[record[0].lower()], record[1], record[3][0:record[3].index(\n '\\'')]] for record in bed_file]\n # Sorts the desired indices by chromosome, then by index in the chromosome.\n bed_file = sorted(bed_file, key=itemgetter(1))\n bed_file = sorted(bed_file, key=itemgetter(0))\n # This stores the desired indexes for each chromosome.\n indexable_bed_records = {'1': [], '2': [], '3': [], '4': [], '5': [], '6': [], '7': [], '8': [], '9': [],\n '10': [], '11': [], '12': [], '13': [], '14': [], '15': [], '16': [], '17': [],\n '18': [], '19': [], 'x': [], 'y': []}\n # Put each desired index into it's appropriate chromosome list.\n for record in bed_file:\n indexable_bed_records[record[0]].append([record[2], record[1]])\n # Loops over fasta records in the supplied fasta file\n for fasta_record in fasta_iter(fasta_file):\n # grabs the chromosome id\n chrom_id = fasta_record[\"header\"][:fasta_record[\"header\"].index(' ')].lower()\n # Some chromosomes are not desired, skip them.\n if chrom_id not in indexable_bed_records.keys():\n continue\n # Grabs the indexes we want to extract from the chromosome.\n indexes = indexable_bed_records[chrom_id]\n # Grabs each index+/-10 from the sequence\n for index in indexes:\n fasta_records.append([index[0], fasta_record[\"seq\"][int(index[1]) - 10:int(index[1]) + 10]])\n # Returns a list of lists of format [5'/3',splice site sequence]\n return fasta_records\n # Catches user error.\n except (FileNotFoundError, IndexError) as e:\n if type(e) is IndexError:\n sys.stderr.write(\"Usage: {} bed_file fasta_file\\n\\tbed_file: The appropriate bed file. \\n\\t\"\n \"fasta_file: The appropriate fasta file.\\n\".format(os.path.basename(__file__)))\n elif type(e) is FileNotFoundError:\n sys.stderr.write(\"One of the specified files was not found.\\n\")\n sys.exit(1)", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r" ]
[ "0.6594188", "0.64209026", "0.6191263", "0.6113771", "0.59502375", "0.59138536", "0.58576477", "0.5774968", "0.56978416", "0.5646811", "0.5631029", "0.5490518", "0.54843515", "0.54673076", "0.54151493", "0.5384615", "0.53294075", "0.51091653", "0.5101626", "0.50498986", "0.503508", "0.5031566", "0.4996437", "0.4976494", "0.4968995", "0.49684757", "0.49628216", "0.49322784", "0.4926831", "0.49171528" ]
0.7767338
0
Updates the given tree by replacing the word with its abbreviated form
def _replace_abbreviation(parse_tree, abbr_dict): if not isinstance(parse_tree, nltk.Tree): # Reached a leaf return if parse_tree.label() == 'ABBR': # Replace word with its abbreviation word = parse_tree[0] parse_tree.set_label('WORD') parse_tree[0] = abbr_dict[word] return for son in parse_tree: # Recursive call _replace_abbreviation(son, abbr_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def abbreviate(match_tree, statement):\n\n result = statement\n current_node = match_tree\n for position, letter in enumerate(statement.upper()):\n current_node = current_node.get(letter)\n if not isinstance(current_node, dict):\n if isinstance(current_node, str):\n result = current_node + statement[(position + 1):]\n break\n return result", "def insert(self, word: str) -> None:\n curr = self.root\n for ch in word:\n curr = curr.children[ch]\n curr.is_word = True", "def insert(self, word: str) -> None:\n current = self.root\n for i, letter in enumerate(word): \n if current.children.get(letter):\n current = current.children.get(letter)\n else:\n current.children[letter] = Node(letter)\n current = current.children[letter]\n if i == len(word) - 1:\n current.is_word = True", "def insert(self, word):\n current = self.root\n for letter in word:\n current = current.children[letter]\n current.is_word = True", "def update_short_name(name):\n # First verify that the common errors have been fixed\n name = update_street_name(name)\n\n # Find the abbreviation to replace\n m = over_abbr_re.search(name)\n if m:\n if m.group() in abbreviations:\n name = over_abbr_re.sub(abbreviations[m.group()], name)\n\n return name", "def insert(self, word: str) -> None:\r\n nroot=self.root\r\n for i in word:\r\n \r\n # index=ord(i)-ord('a')\r\n if i not in nroot.children:\r\n nroot.children[i]=self.root\r\n nroot=nroot.children[i] \r\n \r\n nroot.endofword=True", "def expand_abbreviation(abbr, doc_type = 'html', profile_name = 'plain'):\n\ttree = parse_into_tree(abbr, doc_type)\n\tif tree:\n\t\treturn replace_variables(re.sub('\\|', insertion_point, tree.to_string(profile_name) or ''))\n\t\t\n\treturn ''", "def convert_abbrev(word):\r\n return abbreviations[word.lower()] if word.lower() in abbreviations.keys() else word", "def update_word(self, word):\n self.word = word", "def _handle_abbreviations(parse_tree):\n path = os.path.join(GrammarDefinitions.FOLDER, GrammarDefinitions.ABBREVIATION_FILE)\n with open(path, \"r\") as f:\n lines = f.read().splitlines()\n\n abbr_dict = {line.split(GrammarDefinitions.ABBR_SEP)[0]: line.split(GrammarDefinitions.ABBR_SEP)[1] for line in\n lines}\n _replace_abbreviation(parse_tree, abbr_dict)", "def insert(self, word: str) -> None:\n currnode=self.root\n for ch in word:\n #dic.get(parameter, default value)\n node=currnode.children.get(ch,TrieNode())\n currnode.children[ch]=node\n currnode=node\n \n currnode.iswordend=True", "def insert(self, word: str) -> None:\n parent = self.root\n for i, char in enumerate(word):\n if char not in parent.children:\n parent.children[char] = TreeNode(char)\n parent = parent.children[char]\n if i == len(word) - 1:\n parent.endhere = True", "def addWord(self, word: str) -> None:\n node = self.root\n \n for c in word:\n node = node.children[c]\n \n node.word = True", "def deabbreviate(self, st):\n\t\tabbrs = {'gws': 'greater western sydney giants',\n\t\t\t\t 'gwsg': 'greater western sydney giants',\n\t\t\t\t 'afl': 'australian football league',\n\t\t\t\t 'nrc': 'national rugby championship',\n\t\t\t\t 'nrl': 'national rugby league',\n\t\t\t\t 'syd': 'sydney',\n\t\t\t\t 'mel': 'melbourne',\n\t\t\t\t 'melb': 'melbourne',\n\t\t\t\t 'bris': 'brisbane',\n\t\t\t\t 'brisb': 'brisbane',\n\t\t\t\t 'gc': 'gold coast',\n\t\t\t\t 'adel': 'adelaide',\n\t\t\t\t 'canb': 'canberra',\n\t\t\t\t 'mt': 'mount',\n\t\t\t\t 'utd': 'united',\n\t\t\t\t 'cty': 'city',\n\t\t\t\t 'football club': 'fc',\n\t\t\t\t 'snr': 'senior',\n\t\t\t\t 'jr': 'junion',\n\t\t\t\t 'nsw': 'new south wales' ,\n\t\t\t\t 'vic': 'victoria',\n\t\t\t\t 'tas' : 'tasmania',\n\t\t\t\t 'sa': 'south australia',\n\t\t\t\t 'wa': 'western australia',\n\t\t\t\t 'act': 'australian capital territory',\n\t\t\t\t 'nt': 'northern territory',\n\t\t\t\t 'qld': 'queensland',\n\t\t\t\t 'champs': 'championships', \n\t\t\t\t 'champ': 'championship', \n\t\t\t\t 'soc': 'society',\n\t\t\t\t 'ent': 'entertainment',\n\t\t\t\t 'intl': 'international', \n\t\t\t\t 'int': 'international', \n\t\t\t\t 'aust': 'australian'}\n\n\t\t# first replace full state names by abbreviations;\n\t\tfor ab in abbrs:\n\t\t\tst = re.sub(r'\\b' + ab + r'\\b', abbrs[ab], st)\n\n\t\treturn st", "def insert(self, word: str) -> None:\n node = self.root\n for w in word:\n child = node.children.get(w)\n if not child:\n node.children[w] = TreeNode(w)\n node = node.children[w]\n node.end = True", "def update_name(name, mapping):\n words_name = name.split(\" \")\n if words_name not in expected:\n for word in words_name:\n if word in mapping:\n name = name.replace(word, mapping[word])\n \n if word == word.lower():\n if word not in allowed_lowercase:\n name = name.replace(word, word.capitalize())\n \n if words_name[0] not in expected:\n if words_name[0] not in mapping:\n if words_name[0] == \"Fernando\":\n name = \"Avenida \" + name\n elif words_name[0] == \"rua\":\n pass\n else:\n name = \"Rua \" + name\n\n return name", "def insert(self, word: str) -> None:\n node = self.root\n for c in word:\n if c not in node.children:\n node.children[c] = TrieNode(c)\n node = node.children[c]\n node.isword = True\n node.word = word", "def convert_abbrev_in_text(text):\r\n tokens = word_tokenize(text)\r\n tokens = [convert_abbrev(word) for word in tokens]\r\n text = ' '.join(tokens)\r\n return text", "def insert(self, word: str) -> None:\r\n node=self.root\r\n for c in word:\r\n if c not in node:\r\n node = node.setdefault(c,{})\r\n else:\r\n node = node[c]\r\n node[self.end_of_words]=self.end_of_words", "def update_name(name, mapping): \n words = name.split()\n for w in range(len(words)):\n if words[w] in mapping:\n #print words[w]\n words[w] = mapping[words[w]]\n name = \" \".join(words)\n return name", "def insert(self, word: str) -> None:\n curr = self.root\n for c in word:\n if not c in curr.adj:\n curr.adj[c] = Node(c)\n curr = curr.adj[c]\n curr.isWord = True", "def insert(self, word: str) -> None:\n node = self.root\n for char in word:\n if char not in node.child:\n #append the children\n node.child[char] = Node(char)\n #descend node to node.child that has the previous char, simlar to node = node.left\n node = node.child.get(char)\n node.isWord = True", "def adjust_tree(tree, args_dict = {}):\n if ((tree.category() == 'VBar') and (len(tree.children) == 2) and (tree.children[1].label.has_key('SUBCAT')) and (tree.children[1].label['SUBCAT'] == 'copula')):\n if (tree.children[0].label[feature_type] == 'DP'):\n DP = tree.children[0].label\n tree.children[0].label = FeatStructNonterminal(dict([item for item in DP.items() if (item[0] != 'PARTICLE')] + [('PARTICLE', 'pred')])) # give the DP a dummy particle\n if ((tree.category() == 'TP') and (len(tree.children) == 1)): # insert vacuous subject node\n tree.children = [SynTree(Trace(tree.children[0].ID, False), [], tree.QR_level, tree.language), tree.children[0]]\n if ((tree.category() == 'DBar') and (len(tree.children) == 1) and (tree.children[0].category() == 'NP')): # insert ambiguous determiner\n tree.children = [SynTree(FeatStructNonterminal([('PropN', False), (feature_type, 'D'), ('TRACE', False)]), [SynTree('*det*', [], tree.QR_level, tree.language)], tree.QR_level, tree.language), tree.children[0]]\n return args_dict", "def insert_word(self, word):\n if word:\n current = self.root\n for letter in word:\n \n if not current.get_child(letter): # if letter is not a child of current node\n current.set_child(letter) # add letter as a child\n\n current = current.get_child(letter)\n \n if not current.get_end():\n current.set_end() # set last letter of word to end if word not already in trie\n self.size += 1", "def insert(self, word):\n #edge case\n if word == \"\": \n self._dict.children[26] = TrieNode(\"\") \n self._dict.children[26].isleaf = True\n \n \n cur = self._dict\n for c in word:\n ind = ord(c) - 97\n if cur.children[ind] != None :\n cur = cur.children[ind]\n else:\n cur.children[ind] = TrieNode(c)\n cur = cur.children[ind]\n cur.isleaf = True", "def insert(self, word: str) -> None:\n current = self.root\n for letter in word:\n if letter not in current.children:\n current.children[letter] = TrieNode(letter)\n current = current.children[letter]\n current.has_end = True", "def addWord(self, word: str) -> None:\n current = self.root\n for letter in word:\n if letter not in current.children:\n current.children[letter] = TrieNode(letter)\n\n current = current.children[letter]\n\n current.is_word = True", "def insert(self, word: str) -> None:\n node = self.root\n for char in word:\n node = node.setdefault(char, {})\n node[self.end_of_word] = self.end_of_word", "def insert(self, word):\n node = self.root\n for letter in word:\n if letter not in node.children:\n node.children[letter] = TrieNode()\n node = node.children[letter]\n node.word = True", "def replace_word_candidate(self, word):\n capital_flag = word[0].isupper()\n word = word.lower()\n if capital_flag and word in self.teencode_dict:\n return self.replace_teencode(word).capitalize()\n elif word in self.teencode_dict:\n return self.replace_teencode(word)\n\n for couple in self.word_couples:\n for i in range(2):\n if couple[i] == word:\n if i == 0:\n if capital_flag:\n return couple[1].capitalize()\n else:\n return couple[1]\n else:\n if capital_flag:\n return couple[0].capitalize()\n else:\n return couple[0]" ]
[ "0.64932525", "0.60075444", "0.6002033", "0.5950056", "0.5884344", "0.5873636", "0.58596784", "0.5755214", "0.5720153", "0.57188857", "0.5702307", "0.57001936", "0.5692823", "0.56882954", "0.5668327", "0.5638983", "0.5637053", "0.5602583", "0.55859566", "0.55837816", "0.5562106", "0.5541984", "0.5533052", "0.55125153", "0.5510256", "0.55085665", "0.54975575", "0.5489985", "0.54836243", "0.54597026" ]
0.75555784
0
Creates a common python list of object, no matter what information are supported by the parsed xml file for test results junit().
def parse(self): def parse_testcase(xml_object): testcase = xml_object tc_dict = { "classname": testcase.attrib.get("classname", "unknown"), "file": testcase.attrib.get("file", "unknown"), "line": int(testcase.attrib.get("line", -1)), "name": testcase.attrib.get("name", "unknown"), "time": float(testcase.attrib.get("time", -1)), } # The following data is normally a subnode (e.g. skipped/failure). # We integrate it right into the testcase for better handling if hasattr(testcase, "skipped"): result = testcase.skipped tc_dict["result"] = "skipped" tc_dict["type"] = result.attrib.get("type", "unknown") # tc_dict["text"] = re.sub(r"[\n\t]*", "", result.text) # Removes newlines and tabs # result.text can be None for pytest xfail test cases tc_dict["text"] = result.text or "" tc_dict["message"] = result.attrib.get("message", "unknown") elif hasattr(testcase, "failure"): result = testcase.failure tc_dict["result"] = "failure" tc_dict["type"] = result.attrib.get("type", "unknown") # tc_dict["text"] = re.sub(r"[\n\t]*", "", result.text) # Removes newlines and tabs tc_dict["text"] = result.text tc_dict["message"] = "" else: tc_dict["result"] = "passed" tc_dict["type"] = "" tc_dict["text"] = "" tc_dict["message"] = "" if hasattr(testcase, "system-out"): tc_dict["system-out"] = testcase["system-out"].text else: tc_dict["system-out"] = "" return tc_dict def parse_testsuite(xml_object): testsuite = xml_object tests = int(testsuite.attrib.get("tests", -1)) errors = int(testsuite.attrib.get("errors", -1)) failures = int(testsuite.attrib.get("failures", -1)) # fmt: off skips = int( testsuite.attrib.get("skips") or testsuite.attrib.get("skip") or testsuite.attrib.get("skipped") or -1 ) # fmt: on passed = int(tests - sum(x for x in [errors, failures, skips] if x > 0)) ts_dict = { "name": testsuite.attrib.get("name", "unknown"), "tests": tests, "errors": errors, "failures": failures, "skips": skips, "passed": passed, "time": float(testsuite.attrib.get("time", -1)), "testcases": [], "testsuite_nested": [], } # add nested testsuite objects to if hasattr(testsuite, "testsuite"): for ts in testsuite.testsuite: # dict from inner parse inner_testsuite = parse_testsuite(ts) ts_dict["testsuite_nested"].append(inner_testsuite) elif hasattr(testsuite, "testcase"): for tc in testsuite.testcase: new_testcase = parse_testcase(tc) ts_dict["testcases"].append(new_testcase) return ts_dict # main flow starts here junit_dict = [] if self.junit_xml_object.tag == "testsuites": for testsuite_xml_object in self.junit_xml_object.testsuite: complete_testsuite = parse_testsuite(testsuite_xml_object) junit_dict.append(complete_testsuite) else: complete_testsuite = parse_testsuite(self.junit_xml_object) junit_dict.append(complete_testsuite) return junit_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self):\n try:\n data = etree.parse(self.resultfilename).getroot()\n except OSError:\n data = []\n\n testresults = []\n for testcase in data:\n category = Category.OK\n status = 'ok'\n module = testcase.get('classname')\n name = testcase.get('name')\n message = ''\n time = float(testcase.get('time'))\n extras = []\n\n for child in testcase:\n if child.tag in ('error', 'failure', 'skipped'):\n if child.tag == 'skipped':\n category = Category.SKIP\n else:\n category = Category.FAIL\n status = child.tag\n type_ = child.get('type')\n message = child.get('message', default='')\n if type_ and message:\n message = '{0}: {1}'.format(type_, message)\n elif type_:\n message = type_\n if child.text:\n extras.append(child.text)\n elif child.tag in ('system-out', 'system-err'):\n if child.tag == 'system-out':\n heading = _('Captured stdout')\n else:\n heading = _('Captured stderr')\n contents = child.text.rstrip('\\n')\n extras.append('----- {} -----\\n{}'.format(heading,\n contents))\n\n extra_text = '\\n\\n'.join(extras)\n testresults.append(\n TestResult(category, status, name, module, message, time,\n extra_text))\n\n return testresults", "def parse(self):\n return []", "def getTestData(self):\n raise NotImplementedError", "def get_results(xml_files):\n results = []\n info = {'files': [], 'duration': 0, 'end_time': arrow.utcnow(), 'passed': True}\n\n for xml in xml_files:\n info['files'].append({'name': xml, 'content': read_file(xml)})\n suite, result = parse(xml)\n\n results.extend(getattr(result, 'tests'))\n\n if len(result.tests) != len(result.passed) + len(result.skipped):\n info['passed'] = False\n\n # sum the time from testcase\n\n for test in results:\n info['duration'] += test.time.total_seconds()\n\n info['start_time'] = info['end_time'].shift(seconds=-info['duration'])\n info['start_time'] = info['start_time'].format()\n info['end_time'] = info['end_time'].format()\n\n return results, info", "def unique_classes(srcfile, listfile):\n cls_list = []\n with open(listfile, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n xml_file = srcfile.format(line.strip())\n\n tree = ET.parse(xml_file)\n objs = tree.findall('object')\n\n for ix, obj in enumerate(objs):\n cls = obj.find('name').text\n if cls in cls_list:\n pass\n else:\n cls_list.append(cls)\n print(cls)", "def getTestResults():", "def test_01_parser_factory(self):\n for gpx_file in TEST_GPX_FILES_2:\n parser = parser_factory(gpx_file, CONFIG_STRAVAGPX)\n self.assertIsInstance(parser, BaseGPXParser)\n for fit_file in TEST_FIT_FILES:\n parser = parser_factory(os.path.join(TEST_FIT_FILES_DIR, fit_file), CONFIG_FIT)\n self.assertIsInstance(parser, FITParser)\n for tcx_file in TEST_TCX_FILES:\n parser = parser_factory(os.path.join(TEST_TCX_FILES_DIR, tcx_file), CONFIG_GARMINTCX)\n self.assertIsInstance(parser, TCXParser)", "def load_tests_list(cls, to):\n tests = [unittest.TestLoader().loadTestsFromModule(to)]\n\n return tests", "def getList(self):", "def getList(self):", "def __init__(self,resultList):\n self.writeList = list()\n self.readList = list()\n self.xList = list()\n for row in resultList:\n self.xList.append(row[0])\n self.writeList.append(row[1])\n self.readList.append(row[2])", "def make_tests(test_descr=TRANSFORMS_TESTINFO):\n tests = []\n for _transform, tr_input, tr_output, _normalize, _subobjects in test_descr:\n # load transform if necessary\n if type(_transform) is type(''):\n try:\n _transform = load(_transform).register()\n except MissingBinary:\n # we are not interessted in tests with missing binaries\n continue\n except:\n import traceback\n traceback.print_exc()\n continue\n\n if TR_NAMES is not None and not _transform.name() in TR_NAMES:\n print 'skip test for', _transform.name()\n continue\n\n class TransformTestSubclass(TransformTest):\n input = input_file_path(tr_input)\n output = output_file_path(tr_output)\n transform = _transform\n normalize = lambda x, y: _normalize(y)\n subobjects = _subobjects\n\n tests.append(TransformTestSubclass)\n\n return tests", "def test_list(self):\n pass", "def test_list(self):\n pass", "def X(self)->list:", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def test_get_list(self):\n pass", "def _get_elements(cls):\n raise NotImplementedError()", "def xml(self):\n testcase = ET.Element('testcase')\n testcase.set('classname', self._class)\n testcase.set('name', self._method)\n testcase.set('time', '%.4f' % self._time)\n if self._failure is not None:\n self._print_error(testcase, 'failure', self._failure)\n if self._error is not None:\n self._print_error(testcase, 'error', self._error)\n return testcase", "def getNodeTests():\n\n nodeTestsQuery = NodeTest.query.all()\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None", "def gather_tests(self):\n rosie_tests_dir = os.path.join(cp_tests_dir(),\n \"circuitpython\",\n \"rosie_tests\")\n test_files = []\n for test in os.scandir(rosie_tests_dir):\n # TODO: implement exclusions by board\n if test.path.endswith(\".py\"):\n test_files.append(TestObject(test.path))\n\n return test_files", "def get_element_list(self):\n pass", "def get_tests(self, obj=None):\n # create class to unit test notebooks\n if obj is None:\n obj = \"{}\".format(self._name)\n obj = type(obj, (unittest.TestCase,), self.test_dict)\n else:\n for key, val in self.test_dict:\n setattr(obj, key, val)\n obj.ignore = self.ignore\n obj.py2_ignore = self.py2_ignore\n return obj", "def generate(self):\n return []", "def process_assertion_list(self, cls, functions):", "def init_test_cases():\n test_cases = []\n\n # add info to list in memory, one by one, following signature values\n test_case_ID = 1\n test_case_name = \"auto-resiliency-pif-001\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-9\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 2\n test_case_name = \"auto-resiliency-pif-002\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-10\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 3\n test_case_name = \"auto-resiliency-pif-003\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-11\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 4\n test_case_name = \"auto-resiliency-pif-004\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-12\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 5\n test_case_name = \"auto-resiliency-vif-001\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-13\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 6\n test_case_name = \"auto-resiliency-vif-002\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-14\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 7\n test_case_name = \"auto-resiliency-vif-003\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-15\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 8\n test_case_name = \"auto-resiliency-sec-001\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-16\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 9\n test_case_name = \"auto-resiliency-sec-002\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-17\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 10\n test_case_name = \"auto-resiliency-sec-003\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-18\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n # write list to binary file\n write_list_bin(test_cases, FILE_TEST_CASES)\n\n return test_cases", "def getTestInstance(self):\r\n return [self.currentTestState, self.currentTestPhenotype]", "def tests(self):\n return [self]", "def _extract_elements(self, tree, element_type):\n # creates a new attribute, e.g. 'self.nodes' and assigns it an\n # empty list\n setattr(self, element_type, [])\n etree_elements = get_elements(tree, element_type)\n for i, etree_element in enumerate(etree_elements):\n # create an instance of an element class (e.g. TokenNode)\n salt_element = create_class_instance(etree_element, i, self.doc_id)\n # and add it to the corresponding element type list,\n # e.g. 'self.nodes'\n getattr(self, element_type).append(salt_element)\n # In case of a 'nodes' element this is equivalent to:\n # self.nodes.append(TokenNode(etree_element, document_id))", "def init_test_definitions():\n test_definitions = []\n\n # add info to list in memory, one by one, following signature values\n test_def_ID = 5\n test_def_name = \"VM failure impact on virtual firewall (vFW VNF)\"\n test_def_challengeDefID = 5\n test_def_testCaseID = 5\n test_def_VNFIDs = [1]\n test_def_associatedMetricsIDs = [2]\n test_def_recipientIDs = [2]\n test_def_testCLICommandSent = [\"pwd\",\"kubectl describe pods --include-uninitialized=false\"]\n test_def_testAPICommandSent = [\"data1\",\"data2\"]\n test_def_testCodeID = 5\n test_definitions.append(TestDefinition(test_def_ID, test_def_name,\n test_def_challengeDefID,\n test_def_testCaseID,\n test_def_VNFIDs,\n test_def_associatedMetricsIDs,\n test_def_recipientIDs,\n test_def_testCLICommandSent,\n test_def_testAPICommandSent,\n test_def_testCodeID))\n\n # write list to binary file\n write_list_bin(test_definitions, FILE_TEST_DEFINITIONS)\n\n return test_definitions" ]
[ "0.6335188", "0.5985103", "0.59026104", "0.5846985", "0.57126236", "0.56514776", "0.5649591", "0.56286633", "0.5615803", "0.5615803", "0.5605953", "0.55911666", "0.5566683", "0.5566683", "0.5559948", "0.55295116", "0.54831856", "0.54688597", "0.54634106", "0.54622257", "0.5442048", "0.5439346", "0.54330957", "0.5380458", "0.53798866", "0.53666884", "0.5364151", "0.5363397", "0.5351924", "0.5344334" ]
0.6112433
1
Manual assignment of a (stopped) times object as a subdivision of running timer. Use cases are expected to be very limited (mainly provided as a oneTimes variant of attach_par_subdivision).
def attach_subdivision(times): t = timer() if not isinstance(times, Times): raise TypeError("Expected Times object for param 'times'.") assert times.total > 0., "Attached subdivision has total time 0, appears empty." name = times.name f.r.self_agg += times.self_agg if name not in f.t.subdvsn_awaiting: times_copy = copy.deepcopy(times) times_copy.parent = f.r f.t.subdvsn_awaiting[name] = times_copy else: merge.merge_times(f.t.subdvsn_awaiting[name], times) f.t.self_cut += timer() - t
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_interval_sub(self, time_step, nsteps):\n world.subtime = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting subtime\")", "def __set_time_elements(*args):\n args[0].TimeState.delay_elements = args[1]\n args[0].TimeState.set_delay_elements()", "def time_interval_prop(self, time_step, nsteps):\n world.time = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting time\")", "def setNumTimeSubSteps(*argv):", "def setTimeDiscretisation(self,timeStepIntervals = None, timeStepSizes = None):\n if timeStepIntervals != None:\n self.timeStepIntervals = timeStepIntervals\n pass\n elif timeStepSizes != None:\n self.timeStepSizes = timeStepSizes\n pass\n else:\n raise Warning(\"You should give at least an argument to the setTimeDiscretisation function\")\n if self.timeStepIntervals != None:\n print(\"dbg hm \",self.timeStepIntervals)\n print(\"dbg hm \",self.problem.calculationTimes[-1])\n self.timeStepSizes = self.problem.calculationTimes[-1]/self.timeStepIntervals\n pass\n else:\n self.timeStepIntervals = self.problem.calculationTimes[-1]/self.timeStepSizes\n pass", "def timestep(self, simsystem, osc, obs):\n pass", "def control_timestep(self):\n if self._overridden_n_sub_steps is not None:\n return self.physics.timestep() * self._overridden_n_sub_steps\n else:\n return self.task.control_timestep", "def attach_par_subdivision(par_name, par_times):\n t = timer()\n if not isinstance(par_times, (list, tuple)):\n raise TypeError(\"Expected list or tuple for param 'par_times'.\")\n for times in par_times:\n if not isinstance(times, Times):\n raise TypeError(\"Expected each element of param 'par_times' to be Times object.\")\n assert times.total > 0., \"An attached par subdivision has total time 0, appears empty.\"\n par_name = str(par_name)\n sub_with_max_tot = max(par_times, key=lambda x: x.total)\n f.r.self_agg += sub_with_max_tot.self_agg\n if par_name not in f.t.par_subdvsn_awaiting:\n f.t.par_subdvsn_awaiting[par_name] = []\n for times in par_times:\n times_copy = copy.deepcopy(times)\n times_copy.parent = f.r\n times_copy.par_in_parent = par_name\n f.t.par_subdvsn_awaiting[par_name].append(times_copy)\n else:\n for new_sub in par_times:\n is_prev_sub = False\n for old_sub in f.t.par_subdvsn_awaiting[par_name]:\n if old_sub.name == new_sub.name:\n is_prev_sub = True\n break\n if is_prev_sub:\n merge.merge_times(old_sub, new_sub)\n else:\n new_sub_copy = copy.deepcopy(new_sub)\n new_sub_copy.parent = f.r\n new_sub_copy.par_in_parent = par_name\n f.t.par_subdvsn_awaiting[par_name].append(new_sub_copy)\n f.t.self_cut += timer() - t", "def setTimepoint(self, tp):\n\t\tpass", "def time_slot(self):\n pass", "def zero_timings(self):\r\n self.step = 0\r\n self.current_T = 0.0", "def _set_runtimes(self):\n self._run_times =np.zeros(self.n_runs, dtype = np.float)", "def setStopTime(self, t1):\n self._simulator_.update(t1=t1)\n return", "def run(self):\r\n\r\n # t=0 is singular point\r\n\r\n print 'Time of laboratory clock Tw =', self.tick\r\n tt = self.tmp\r\n ll = self.lst\r\n car = self.interaction(self.carr)\r\n ll.item_run(tt, self.tick, car)\r\n tt = tt.next\r\n\r\n # run of local time\r\n\r\n while not tt is None:\r\n\r\n if tt.dedicated_node:\r\n self.tick = self.tick + 1\r\n print 'Time of laboratory clock Tw =', self.tick\r\n\r\n # self.move() # It is classical motion of particle (example).\r\n\r\n self.move_reset()\r\n car = self.interaction(self.carr)\r\n\r\n ll = self.lst\r\n while not ll is None:\r\n ll.item_run(tt, self.tick, car)\r\n ll = ll.right\r\n\r\n tt = tt.next", "def test_custom_time(self):\n interval = 0.5\n M = simulation.StateMonitor(self.G, 'v', interval=interval)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, interval)))", "def __init__ (self, times, lower, upper):\n self.times = times\n self.lower = lower\n self.upper = upper\n\n self.counter = 0", "def test_timer_create_and_run():\n scope_label = \"theMethod\"\n timer = Timer(\"aTimer\", \"Some text with details\")\n timer.start(scope_label)\n time.sleep(0.1)\n timer.stop(scope_label)\n\n # the timing +/- result is platform dependant\n assert timer.report_totals() >= 0.09", "def time_processor(self):\n while True:\n rexp = (yield)\n self.time = float(rexp.group(1))\n self.time_str = rexp.group(1)\n # Reset subIteration counters\n for k in self.subiter_map:\n self.subiter_map[k] = 0\n self._tick = False", "def _reset(self) -> ts.TimeStep:", "def testMeasureNested(self):\n timer = timing_util.IntervalTimer()\n self.assertEqual(timer.intervals, [])\n with timer.Measure('Outer Interval'):\n with timer.Measure('Inner Interval'):\n pass\n self.assertEqual(len(timer.intervals), 2)\n inner_interval = timer.intervals[0]\n self.assertEqual(len(inner_interval), 3)\n inner_name = inner_interval[0]\n inner_start = inner_interval[1]\n inner_stop = inner_interval[2]\n self.assertEqual(inner_name, 'Inner Interval')\n outer_interval = timer.intervals[1]\n self.assertEqual(len(outer_interval), 3)\n outer_name = outer_interval[0]\n outer_start = outer_interval[1]\n outer_stop = outer_interval[2]\n self.assertEqual(outer_name, 'Outer Interval')\n self.assertLessEqual(outer_start, inner_start)\n self.assertLessEqual(inner_start, inner_stop)\n self.assertLessEqual(inner_stop, outer_stop)", "def testMeasureSequential(self):\n timer = timing_util.IntervalTimer()\n self.assertEqual(timer.intervals, [])\n with timer.Measure('First Interval'):\n pass\n with timer.Measure('Second Interval'):\n pass\n self.assertEqual(len(timer.intervals), 2)\n first_interval = timer.intervals[0]\n self.assertEqual(len(first_interval), 3)\n first_name = first_interval[0]\n first_start = first_interval[1]\n first_stop = first_interval[2]\n self.assertEqual(first_name, 'First Interval')\n second_interval = timer.intervals[1]\n self.assertEqual(len(second_interval), 3)\n second_name = second_interval[0]\n second_start = second_interval[1]\n second_stop = second_interval[2]\n self.assertEqual(second_name, 'Second Interval')\n self.assertLessEqual(first_start, first_stop)\n self.assertLessEqual(first_stop, second_start)\n self.assertLessEqual(second_start, second_stop)", "def timer_setup(self):\n pass", "def update_signal(self,current_time):\r\n time = (current_time+self.offset)%self.cycle\r\n \r\n for ph_id,group in self.lane_groups.items():\r\n \r\n ph = self.phases[ph_id]\r\n \r\n if not (ph.start<=time<ph.end):\r\n # when the light is red, the section cannot generate demand\r\n for sec in group:\r\n sec.demand=0", "def setTimeRegime(*args):\n\n args[0].TimeState.TimeRegime.time_regime = args[1]", "def test_timestep(self):\n class Mock(object):\n def __init__(self):\n self.t = 0.0\n self.dt = None\n\n def evolve(self1, t, dt):\n if self1.dt is not None:\n self.assertAlmostEqual(self1.dt, dt)\n else:\n self1.dt = dt\n\n self.assertAlmostEqual(self1.t, t)\n\n self1.t += self1.dt\n\n t_max = 10.0\n dt = 0.2\n\n G = Mock()\n simulation.Simulation(G, dt=dt).run(t_max)\n self.assertAlmostEqual(G.dt, dt)", "def tick(self):\n self.times.append(timeit.default_timer())", "def __init__(self, min=0, sec=0):\n self.min = min\n self.sec = sec", "def setup(self):\n cls = type(\"Timings\", (Structure,),\n {\"_fields_\": [(n, c_double) for n in self._timers]})\n self._C_timings = cls()\n return byref(self._C_timings)", "def setSplit(tmr_channel, total, newSecondPart):\n writeTMR(tmr_channel, TMR_CMPLD1, total-newSecondPart)\n writeTMR(tmr_channel, TMR_CMPLD2, newSecondPart)", "def get_times():\n if f.root.stopped:\n return copy.deepcopy(f.root.times)\n else:\n t = timer()\n times = collapse.collapse_times()\n f.root.self_cut += timer() - t\n return times" ]
[ "0.6630454", "0.6081781", "0.5878871", "0.5870936", "0.5784389", "0.57493865", "0.5726583", "0.5696444", "0.56729144", "0.5598866", "0.558305", "0.5582902", "0.55632716", "0.54817516", "0.54677033", "0.5444429", "0.5438984", "0.54319745", "0.5367586", "0.53596765", "0.53501314", "0.5316355", "0.53096884", "0.53069687", "0.529513", "0.5290922", "0.52852964", "0.5281112", "0.52671874", "0.5262283" ]
0.6671484
0
Serialize and / or save a Times data object using pickle (cPickle).
def save_pkl(filename=None, times=None): if times is None: if not f.root.stopped: times = collapse.collapse_times() else: times = f.root.times else: if isinstance(times, (list, tuple)): for t in times: if not isinstance(t, Times): raise TypeError("Expected single Times instance or list/tuple of Times instances for param 'times'.") elif not isinstance(times, Times): raise TypeError("Expected single Times instance or list/tuple of Times instances for param 'times'.") if filename is not None: with open(str(filename), 'wb') as file: pickle.dump(times, file) else: return pickle.dumps(times)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)", "def pickle(self,data,filename):\n pickle.dump(data, open(filename, 'wb'))", "def saveData(self): \n self.spIndex.close()\n output = open(PublicTransit.PICKLE_SAVE_FILE, 'wb') \n # cPickle the list using the highest protocol available.\n cPickle.dump(self.nodesDict, output, -1)\n cPickle.dump(self.linksDict, output, -1)\n cPickle.dump(self.stopsByRoute, output, -1)\n cPickle.dump(self.stopsByNode, output, -1)\n cPickle.dump(self.routeXref, output, -1)\n cPickle.dump(self.transitRoutes, output, -1)\n output.close()\n self.spIndex = index.Index(PublicTransit.SPATIAL_INDEX_FILE)", "def to_serializable(self):\n time = self._time\n data = {\n str(cuid): [pyo_value(val) for val in values]\n for cuid, values in self._data.items()\n }\n return TimeSeriesTuple(data, time)", "def serialize(self):\n # Employ RCU method to prevent race condition.\n update_pickle = self._pickle_path + '.tmp'\n with open(update_pickle, 'wb') as fh:\n cPickle.dump((self._stations_history,\n self._coordinates,\n self._running_sum,\n self._weather_parser.serialize()), fh)\n\n os.rename(update_pickle, self._pickle_path)", "def to_pickle(self, path=None):\n if path:\n with open(path, \"wb\") as f:\n dill.dump(self, f)\n return None\n return dill.dumps(self)", "def save_object(self, filename, data):\n with open(filename, 'wb') as outp: # Overwrites any existing file.\n pickle.dump(data, outp, pickle.HIGHEST_PROTOCOL)", "def save_pickle(self, path):\n with open(path, 'wb') as f:\n pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)", "def save(self, ts):\n with open(self, 'w') as f:\n Timestamp.wrap(ts).dump(f)", "def picklesave(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save(self):\n #--Data file exists?\n filePath = self.path\n if os.path.exists(filePath):\n ins = open(filePath)\n outData = compat.uncpickle(ins)\n ins.close()\n #--Delete some data?\n for key in self.deleted:\n if key in outData:\n del outData[key]\n else:\n outData = {}\n #--Write touched data\n for key in self.changed:\n outData[key] = self.data[key]\n #--Pickle it\n tempPath = filePath+'.tmp'\n cPickle.dump(outData,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)", "def dump_pickle(path, data):\n with open(path, 'wb') as f:\n pickle.dump(data, f)", "def test_dumps_time(self):\n try:\n _build_test_dirs()\n dicti = {\n 'mintime': datetime.time.min,\n 'maxtime': datetime.time.max,\n 'array': [1, 2, 3],\n 'string': 'trololo',\n 'int': 1,\n 'float': 4.32,\n 'true': True,\n 'false': False,\n 'null': None\n }\n with open(_TEST_FILE, 'w+') as fileobj:\n morejson.dump(dicti, fileobj)\n with open(_TEST_FILE, 'r') as fileobj:\n self.assertEqual(dicti, morejson.load(fileobj))\n finally:\n _dismantle_test_dirs()", "def pickle_save(file_path, obj):\n with open(file_path, 'wb') as f:\n pickle.dump(obj, f)", "def serialize(obj):\n return pickle.dumps(obj)", "def objToPickle(self, x):\n try:\n xp = pickle.dumps(x)\n pickle.loads(xp)\n except:\n return\n return xp", "def _save_obj(obj, name):\n with open('/bigdisk/pickles/' + name, 'w') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save(self, timestamp: int, data_path: str, file_base: str):\n file_name = os.path.join(data_path,\n '{}-{}.pkl'.format(file_base, timestamp))\n pickle.dump(self.as_numpy_array(),\n open(file_name, 'wb'),\n protocol=pickle.HIGHEST_PROTOCOL)", "def save_object(path,object):\r\n with open(path,\"wb\") as f:\r\n pickle.dump(object,f,pickle.HIGHEST_PROTOCOL)", "def test_pickle(self):\n X,Y,Z = self.generate_data(nrows=200)\n task = mmSCHPOLY()\n task.fit(X,Y,Z)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)\n\n X,Y,Z = self.generate_data(nrows=200)\n task = mmSCH2W()\n task.fit(X,Y,Z)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)", "def to_pickle(self, path: Union[str, Path]) -> None:\n with open(path, 'wb') as handle:\n pickle.dump(self, handle)", "def save_obj(obj, path ):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)", "def dump_in_pickle(py_obj, filepath):\n\t\twith open(filepath, 'wb') as pfile:\n\t\t\tpickle.dump(py_obj, pfile)", "def _get_time(self):\n # get the current time in UTC (make sure we are timezone aware)\n now_utc = datetime.datetime.now(pytz.UTC)\n \n # convert to our local timezone\n timenow = now_utc.astimezone(self.timezone)\n \n # save the data to our data\n self.data['year'][0] = timenow.year\n self.data['month'][0] = timenow.month\n self.data['day'][0] = timenow.day\n self.data['hour'][0] = timenow.hour\n self.data['minute'][0] = timenow.minute\n self.data['second'][0] = timenow.second\n \n return", "def save_times_data(time: datetime.datetime):\n df = pd.read_csv(STOPS_3CITY, index_col=0)\n logger.debug(\"Data readed\")\n\n dfs_splited = prepare_stops_to_request(df)\n logger.debug(\"Dataframe prepared to request\")\n\n dfs_with_times = add_times_of_travels(dfs_splited, time)\n logger.debug(\"Added times of travels to dataframes\")\n\n df_merged = pd.DataFrame()\n for data_part in dfs_with_times:\n df_merged = df_merged.append(data_part)\n logger.debug(\"Dataframes prepared to save\")\n\n date = time.strftime('%m-%d-%Y_%H-%M-%S')\n df_merged.to_csv(os.path.join(TRICITY, \"data\",\n f\"tricity_travel_{date}.csv\"))\n logger.debug(\"Data saved\")", "def save(data, phonebook):\n\n with open(phonebook, \"w\") as outfile:\n cPickle.dump(data, outfile)", "def write_pickle(obj, path):\n with open(path, 'wb') as file:\n pickle.dump(obj, file)", "def save(self):\n pickle.dump(self, open(self.path, \"wb\"))", "def test_serialization_deserialization(self):\n\n original_time = now()\n serialized_time = DatetimeMapper.forward(original_time)\n assert serialized_time == original_time.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n deserialized_time = DatetimeMapper.backward(serialized_time)\n assert original_time == deserialized_time\n\n deserialized_time = DatetimeMapper.backward(None)\n assert isinstance(deserialized_time, datetime)", "def dump_pickle_data(obj, filename):\n path = \"../tmp/{}.pckl\".format(filename)\n f = open(path, 'wb')\n pickle.dump(obj, f)\n f.close()" ]
[ "0.63484544", "0.60688585", "0.60613865", "0.60561633", "0.5997063", "0.5942309", "0.59148747", "0.590097", "0.58997315", "0.5889424", "0.58798075", "0.58791673", "0.5864843", "0.5844835", "0.5821112", "0.5819759", "0.580201", "0.5794824", "0.57903945", "0.57628703", "0.5758429", "0.5755197", "0.57451624", "0.5739928", "0.5735759", "0.5727974", "0.57187307", "0.5695719", "0.56954676", "0.5692994" ]
0.6815023
0
This method validates the input file. Returns true if the JSON is valid, false otherwise.
def validate_input(update_file): try: json.load(open(update_file)) print "\nValid JSON" return True except ValueError: print "\nInvalid JSON" exit(-1) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def validate_json(self):\n pass", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def is_valid_file(parser, arg):\n if not os.path.isfile(arg):\n parser.error(\"The file %s does not seem to be a file at all! Exiting for safety reasons.\" %arg)\n sys.exit(1)\n else:\n if validate_input(arg):\n return True\n else:\n print \"Invalid JSON. Exiting\"\n sys.exit(1)", "def validate(self, config_json):\n pass", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def is_json(filename):\n try:\n with open(filename, 'r') as f:\n dstore = json.load(f)\n except JSONDecodeError:\n return False # In case the file is invalid json file\n return True # In case the file is a valid json file", "def sniff( self, filename ):\r\n try:\r\n json.load( open(filename) )\r\n return True\r\n except Exception:\r\n return False", "def validate_json_file(namespace):\n if namespace.json_file:\n try:\n with open(namespace.json_file) as file_handle:\n json.load(file_handle)\n except EnvironmentError:\n raise ValueError(\"Cannot access JSON request file: \" + namespace.json_file)\n except ValueError as err:\n raise ValueError(\"Invalid JSON file: {}\".format(err))\n # other_values = [arg_name(n) for n in vars(namespace).keys() if getattr(namespace, n)]\n # if other_values:\n # message = \"--json-file cannot be combined with:\\n\"\n # raise ValueError(message + '\\n'.join(other_values))", "def validate(self, json_data):\n try:\n self.process_json(json_data)\n except ValueError as e:\n # self.process_errors.append(e.args[0])\n self.process_errors = [e.args[0]]\n\n self.errors = list(self.process_errors)\n\n # Run validators\n if not self.errors:\n chain = itertools.chain(self.validators)\n self._run_validation_chain(chain)\n\n return len(self.errors) == 0", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def is_json_valid(json_data: dict, json_schema: dict) -> bool:\r\n try:\r\n validate(instance=json_data, schema=json_schema)\r\n except jsonschema.exceptions.ValidationError as err:\r\n return False\r\n return True", "def validate_input_file(self):\r\n return os.path.isfile(self.input_file)", "def validate(self) -> bool:\n\n # Start by reading in the blueprint schema json\n schema = json.loads(pkgutil.get_data(\"FactorioTools\", \"blueprintSchema.json\"))\n\n # Validate the object's schema against the blueprintSchema JSON\n try:\n jsonschema.validate(self.data, schema)\n return True\n except jsonschema.ValidationError:\n pass\n\n return False", "def validate_json() -> bool:\n with Path(ROOT_DIR, \"seals\", \"seals.json\").open() as f:\n seals = json.load(f)\n\n seals_in_json = [k for k, v in seals.items() if v[\"has_seal\"]]\n\n seals = [\n x.split(\"/\")[-1][:-4] for x in glob.glob(f\"{ROOT_DIR}/seals/orig/*\")\n ]\n missing_seals = sorted(list(set(seals_in_json) ^ set(seals)))\n if not missing_seals:\n return True\n\n raise Exception(f\"Missing entry for: {' '.join(missing_seals)}\")", "def validate(self, json_data):\n self._errors = None\n success = True\n for item in self._schema:\n if not item.validate(json_data):\n success = False\n\n return success", "def validate(json_resp, schema, validictory_path, schema_base=None):\n # assumes /extern/validictory exists (see /cm for instructions)\n if not validictory_path in sys.path:\n sys.path.append(validictory_path)\n import validictory\n\n try:\n if schema_base and not json_resp[\"$schema\"].startswith(schema_base):\n print \"Warning: JSON schema is \", json_resp[\"$schema\"], \"instead of \", schema_base\n validictory.validate(json_resp, schema, required_by_default=False)\n return True\n except Exception as e:\n print \"Received exception %s while trying to validate: %s\" % (\n str(e), json_resp)\n return False", "def is_valid_json(j):\n try:\n json.dumps(j)\n return True\n except json.JSONDecodeError:\n print(\"not valid json\")\n return False", "def is_match(cls, file_path, options=None):\n valid_json_line_count = 0\n total_line_count = 0\n\n if options is None:\n options = dict()\n\n file_encoding = None\n if not isinstance(file_path, StringIO):\n file_encoding = data_utils.detect_file_encoding(file_path=file_path)\n\n with FileOrBufferHandler(file_path, 'r', encoding=file_encoding) \\\n as data_file:\n try:\n json.load(data_file)\n return True\n except (json.JSONDecodeError, UnicodeDecodeError):\n data_file.seek(0)\n\n for k in range(1000):\n total_line_count += 1\n try:\n raw_line = data_file.readline()\n if not raw_line:\n break \n if raw_line.find(\":\") >= 0: # Ensure can be JSON\n json.loads(raw_line) # Check load\n valid_json_line_count += 1\n except UnicodeDecodeError:\n return False\n except ValueError:\n continue\n \n ratio_of_valid_json_line = float(\n valid_json_line_count) / total_line_count\n \n if ratio_of_valid_json_line >= 0.5:\n return True\n else:\n return False", "def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True", "def validateProp(filename):\n\n # does the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Prop file (%s) does not exist' % (filename))\n return False\n\n # can I read it\n try:\n propFile = open(filename, 'r')\n prop = json.load(propFile)\n propFile.close()\n except (ValueError, OSError):\n LOG.warning('Prop file (%s) unable to read or did not parse' % (filename))\n return False\n\n # does the prop have the correct value\n for key in ('name', 'md5', 'description', 'size', 'contact'):\n if (key not in prop):\n LOG.warning('Prop file (%s) missing key (%s)' % (filename, key))\n return False\n\n return True", "def load_from_json(self, file_name: str) -> bool:\n try:\n with open(file_name, 'r') as f:\n data = json.loads(f.read())\n self.__g = DiGraph.from_dict(data)\n return True\n except:\n traceback.print_exc()\n return False", "def is_valid_json(json_str):\n assert json_str is not None\n try:\n json.loads(json_str)\n return True\n except (ValueError, TypeError):\n return False", "def file_jsoncheck(filename):\n with open(filename, 'r') as jsontable:\n try:\n json_object = json.load(jsontable)\n except ValueError, e:\n return False\n\n # DQLL.json number of lines should be 35\n # Will change with table version\n nlines = 35\n \n with open(filename, 'r') as f:\n l = [x for x in f.readlines()]\n # Default number of lines should be 35\n if len(l) != nlines:\n print \"Number of lines in DQLL.json is not default {} but {}\".format(nlines, len(l))\n return False\n\n return True", "def _check_json(json_data: Any, clean: bool) -> Any:\n try:\n json.loads(json_data)\n except ValueError:\n return \"unknown\" if clean else False\n return \"success\" if clean else True", "def test_verifies_token_file_contains_json(self):\n\n with open(self.sample_token_file, 'w',\n encoding=\"utf8\", errors=\"surrogateescape\") as stf_h:\n stf_h.write(\"Bad JSON\")\n\n with self.assertRaises(json.decoder.JSONDecodeError):\n badgr = BadgrLite(token_filename=self.sample_token_file)\n badgr.load_token()", "def is_valid_file(self, file_path):\n return True", "def isjson(filepath):\n return filepath.lower().endswith('.json')" ]
[ "0.81748825", "0.7770329", "0.76431674", "0.7455964", "0.7199138", "0.71282756", "0.7009667", "0.6992463", "0.69263047", "0.6873297", "0.68528163", "0.68223625", "0.6743427", "0.6640722", "0.661942", "0.66073275", "0.65818256", "0.6577986", "0.651692", "0.65011466", "0.6498898", "0.6482918", "0.6480044", "0.64485395", "0.6443472", "0.64414364", "0.6381768", "0.6302479", "0.6292483", "0.62832135" ]
0.823358
0
This method prints the current contents of an api_key riak bucket when passed a valid url
def get_current_key(url): r=requests.get(url) #return json.dumps(r.json(), sort_keys=True, indent=4) return json.dumps(r.json(), sort_keys=True, indent=4, separators=(',', ': '))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_all_keys(riak_host,riak_port,bucket):\n url='http://%s:%s/buckets/%s/keys?keys=true' % (riak_host,riak_port,bucket)\n #print url\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def get_from_api(url, *, verbose=False):\n vprint = lambda *a, **kwa: print(*a, **kwa) if verbose else None\n\n with open(\"APIKey.txt\", \"r\") as keyFile:\n apiKey=keyFile.readline()\n if apiKey[-1] == '\\n':\n apiKey = apiKey[:-1]\n \n headers = {'X-API-Key': apiKey}\n vprint(\"getting\", url, \"with headers\", headers, \"...\")\n r = requests.get(url, headers=headers)\n vprint(\"...done\")\n return r", "def get_current_contents(url):\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def list_all_buckets(riak_host,riak_port):\n url='http://%s:%s/buckets?buckets=true' % (riak_host,riak_port)\n r=requests.get(url)\n print json.dumps(r.json(), sort_keys=True, indent=4)", "def analysis_summary(bucket_url, replay):\n\n logger = logging.getLogger(\"SimpleReplayLogger\")\n\n bucket = bucket_dict(bucket_url)\n logger.info(f\"Simple Replay Workload Analysis: {replay}\")\n replay_path = f'analysis/{replay}/out/'\n output_str = f\"\\nBelow is the presigned URLs for the analysis performed for replay: {replay}. \" \\\n f\"Click or copy/paste the link into your browser to download.\"\n r_url = create_presigned_url(bucket.get('bucket_name'), f'{replay_path}{replay}_report.pdf')\n output_str += f\"\\n\\nReplay Analysis Report | Click to Download:\\n{r_url}\\n\"\n logger.info(output_str)", "def get_bucket_file_url(bucket, key):\n\t#https://s3.amazonaws.com/link-checker/2018-05-27-235740.txt\n\tfile_url = \"https://s3.amazonaws.com/\" + bucket + \"/\" + key\n\treturn file_url", "def print_available( self ):\n\n\t\tmax_length = 0\n\n\t\tfor key in self._available:\n\t\t\tmax_length = max( max_length, len( key ) )\n\n\t\tformat_str = 'API found: %%-%ds (%%s)' % max_length\n\n\t\tfor key in self._available:\n\t\t\tentry = self._available.get( key )\n\t\t\tprint( format_str % ( key, entry.get( 'path' ) ) )", "def list_bucket(self, bucket):\n self.response.write('Listbucket result:\\n')\n\n page_size = 1\n stats = gcs.listbucket(bucket + '/foo', max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write('\\n')\n\n if count != page_size or count == 0:\n break\n stats = gcs.listbucket(bucket + '/foo', max_keys=page_size,\n marker=stat.filename)", "def get_info(self, sha256):\n url = self.API_URL % ('apks/', sha256, '')\n return requests.get(url, headers=self.headers, proxies=self.proxies, verify=self.verify_ssl)", "def get_text(bucket, key):\n s3 = boto3.resource(\"s3\")\n\n ocr_obj = s3.Object(bucket, key)\n response = ocr_obj.get()\n data = response[\"Body\"].read()\n\n return data", "def list_replays(bucket_url):\n\n logger = logging.getLogger(\"SimpleReplayLogger\")\n\n table = []\n bucket = bucket_dict(bucket_url)\n try:\n resp = client(\"s3\").list_objects_v2(Bucket=bucket.get('bucket_name'), Delimiter='/', Prefix='analysis/')\n if resp['KeyCount'] == 0:\n logger.error(f\"No replays available in S3. Please run a replay with replay analysis to access replays \"\n f\"from the command line.\")\n exit(-1)\n\n except Exception as e:\n logger.error(f\"Unable to access replays in S3. Please confirm bucket. {e}\")\n exit(-1)\n\n s3 = boto3.resource('s3')\n print(f\"Listed below are all the replay reports located in the S3 bucket: {bucket_url}.\\n\")\n\n for x in resp['CommonPrefixes']:\n try:\n s3.Object(bucket.get('bucket_name'), f'{x.get(\"Prefix\")}out/info.json').load()\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\": # if info.json does not exist in folder, do not add to list\n continue\n else:\n logger.error(f\"Unable to access replay. {e}\")\n\n content_object = s3.Object(bucket.get('bucket_name'), f'{x.get(\"Prefix\")}out/info.json')\n file_content = content_object.get()['Body'].read().decode('utf-8')\n json_content = json.loads(file_content)\n\n table.append([json_content['replay_id'],\n json_content['id'],\n json_content['start_time'],\n json_content['end_time'],\n json_content['replay_tag']])\n # use tabulate lib to format output\n print(tabulate(table, headers=[\"Replay\", \"Cluster ID\", \"Start Time\", \"End Time\", \"Replay Tag\"]))", "def test_get_url(self):\n package = make_package(version=\"1.1+g12345\")\n response = self.storage.download_response(package)\n\n parts = urlparse(response.location)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'abcdef.cloudfront.net')\n self.assertEqual(parts.path, '/bcc4/mypkg/mypkg-1.1%2Bg12345.tar.gz')\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Key-Pair-Id', 'Expires',\n 'Signature'])\n self.assertTrue(int(query['Expires'][0]) > time.time())\n self.assertEqual(query['Key-Pair-Id'][0],\n self.settings['storage.cloud_front_key_id'])", "def __str__(self):\n return repr(self.api_url)", "def fetch_key(self, key_url: str=None) -> bytes:\n self._logger.debug('Key URL: {}'.format(key_url))\n key_content = requests.get(url=key_url).content if key_url else None\n self._logger.debug('Key content: {}'.format(key_content))\n return key_content", "def bucket_info(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return 1\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n if group[\"name\"] == args.group:\n for bucket in group[\"buckets\"]:\n if bucket[\"name\"] == args.bucket:\n print(json.dumps(bucket, indent=4))\n return 0\n break\n\n print(\"No bucket matching {} found\".format(args.bucket))\n return 1", "def test_get_url(self):\n package = make_package()\n response = self.storage.download_response(package)\n\n parts = urlparse(response.location)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'mybucket.s3.amazonaws.com')\n self.assertEqual(parts.path, '/' + self.storage.get_path(package))\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Expires', 'Signature',\n 'AWSAccessKeyId'])\n self.assertTrue(int(query['Expires'][0]) > time.time())\n self.assertEqual(query['AWSAccessKeyId'][0],\n self.settings['storage.access_key'])", "def apikey(self,filename='apikey'):\n f = open(filename)\n line = f.readline()\n f.close()\n return line.strip()", "def api_url(self, url_key):\n dic = self.api_endpoints()\n return dic.get(url_key)", "def PrintBucketInfo(self, bucket_uri, listing_style, headers=None, debug=0):\n bucket_objs = 0\n bucket_bytes = 0\n if listing_style == ListingStyle.SHORT:\n print bucket_uri\n else:\n try:\n for obj in self.CmdWildcardIterator(\n bucket_uri.clone_replace_name('*'), ResultType.KEYS,\n headers=headers, debug=debug):\n bucket_objs += 1\n bucket_bytes += obj.size\n except WildcardException, e:\n # Ignore non-matching wildcards, to allow empty bucket listings.\n if e.reason.find('No matches') == -1:\n raise e\n if listing_style == ListingStyle.LONG:\n print '%s : %s objects, %s' % (\n bucket_uri, bucket_objs, MakeHumanReadable(bucket_bytes))\n else: # listing_style == ListingStyle.LONG_LONG:\n print '%s :\\n\\t%s objects, %s\\n\\tACL: %s' % (\n bucket_uri, bucket_objs, MakeHumanReadable(bucket_bytes),\n bucket_uri.get_acl(False, headers))\n return (bucket_objs, bucket_bytes)", "def api_scrape_url():\n if 'working_repo' in session:\n meta_data = get_tags(request.args['url'])\n return jsonify(msg=\"success\", data=meta_data)\n else:\n return jsonify(msg=\"failure, unauthorized\"), 401", "def api_url(self, command: str) -> str:\n base_url = self.base_url\n path = \"/\".join(x for x in f\"{base_url.path}/api/v2\".split(\"/\") if x != \"\")\n return URL.build(\n scheme=base_url.scheme,\n host=base_url.host,\n port=base_url.port,\n path=f\"/{path}\",\n query={\"apikey\": self.api_token, \"cmd\": command},\n ).human_repr()", "def read_key_s3(self, keyUrl='', bucket=None):\n \n if bucket is None:\n bucket = self.AWS_S3_BUCKET\n\n try:\n\n fileobj = self.get_s3_client().get_object(\n Bucket= bucket,\n Key= keyUrl\n )\n readKey = fileobj['Body'].read()\n # contentObj = readKey.decode('utf-8')\n\n return readKey\n\n except:\n time.sleep(1)\n pass", "def list_bucket(self, bucket):\n\n self.response.write(\"Listbucket result:\\n\")\n\n # Production apps should set page_size to a practical value.\n page_size = 1\n stats = cloudstorage.listbucket(bucket + \"/foo\", max_keys=page_size)\n while True:\n count = 0\n for stat in stats:\n count += 1\n self.response.write(repr(stat))\n self.response.write(\"\\n\")\n\n if count != page_size or count == 0:\n break\n stats = cloudstorage.listbucket(\n bucket + \"/foo\", max_keys=page_size, marker=stat.filename\n )", "def bucket(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bucket\")", "def bucket(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bucket\")", "def get_url(url):\n get_logs = requests.get(url)\n return get_logs.text", "def download_privacy_score(bucket_name, bucket_prefix):\n s3_client = boto3.client('s3')\n s3_client.get_paginator('list_objects_v2')\n input_matcher = re.compile(f'^{bucket_prefix}2[0-9][0-9][0-9]-[0-9][0-9][.]json$')\n\n def iterate_bucket(s3_client, bucket_name, bucket_prefix, input_matcher):\n pageinator = s3_client.get_paginator('list_objects_v2')\n\n for page in pageinator.paginate(Bucket=bucket_name, Prefix=bucket_prefix):\n if page['KeyCount'] == 0:\n continue\n\n for item in page['Contents']:\n if input_matcher.match(item['Key']):\n yield item['Key']\n\n latest_key = max(iterate_bucket(s3_client, bucket_name, bucket_prefix, input_matcher))\n print(f'Downloading latest_key file s3://{bucket_name}/{latest_key} ...')\n return json.loads(s3_client.get_object(Bucket=bucket_name, Key=latest_key)['Body'].read())", "def list_bucket(self, bucket_id=None):\n url = self.prism_endpoint + \"/wBuckets\"\n\n if bucket_id is not None:\n url = url + \"/\" + bucket_id\n\n headers = {\"Authorization\": \"Bearer \" + self.bearer_token}\n\n r = requests.get(url, headers=headers)\n\n if r.status_code == 200:\n logging.info(\"Successfully obtained information about your buckets\")\n return r.json()\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def report(self, url):\n\n print(self.get(url))", "def get_api_key(api_key):\n api.get(api_key)" ]
[ "0.59069735", "0.5775815", "0.56716317", "0.5670298", "0.5502804", "0.54711944", "0.54018843", "0.5394873", "0.5385831", "0.536729", "0.5343384", "0.5342679", "0.5339456", "0.5331075", "0.53242856", "0.53214943", "0.5316168", "0.53113574", "0.527582", "0.5245839", "0.52193", "0.52118945", "0.52053505", "0.5147191", "0.5147191", "0.5125504", "0.51232177", "0.51114786", "0.5100879", "0.50977135" ]
0.6079053
0
This method saves the current contents of the riak bucket/key to be updated into a file called .original
def save_current_contents(url,update_file): r=requests.get(url) save_file=update_file+'.original' json.dump(r.json(), open(save_file,'w')) print "\nSaved contents of: \n\n\t%s \n\nto \n\n\t%s\n" % (url,save_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _s3_stash(self):\n s3_url = 's3://{}/{}'.format(BUCKET, self.atom_file)\n bucketpath = BUCKET.strip(\"/\")\n bucketbase = BUCKET.split(\"/\")[0]\n parts = urlparse.urlsplit(s3_url)\n mimetype = 'application/xml' \n \n conn = boto.connect_s3()\n\n try:\n bucket = conn.get_bucket(bucketbase)\n except boto.exception.S3ResponseError:\n bucket = conn.create_bucket(bucketbase)\n self.logger.info(\"Created S3 bucket {}\".format(bucketbase))\n\n if not(bucket.get_key(parts.path)):\n key = bucket.new_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"created {0}\".format(s3_url)\n self.logger.info(msg)\n else:\n key = bucket.get_key(parts.path)\n key.set_metadata(\"Content-Type\", mimetype)\n key.set_contents_from_filename(self.atom_file)\n msg = \"re-uploaded {}\".format(s3_url)\n self.logger.info(msg)", "def _update_ondisk(self):\n with open(self.orig_path, \"w\") as f:\n f.write(self.content)", "def update(self, namein, nameout):\n\t\ttext = self.dict.sub(self.readFile(namein))\n\t\tself.writeFile(nameout, text)\n\t\treturn", "def overwrite_original_file(self):\n return self.__overwrite_original_file", "def backup_file(self):\n _backupO = Backups()\n _backupFilename = _backupO.backup_file(self.job[\"JSONfileToBeEdited\"])\n return 'Original file %s backed up to %s' % (\n self.job[\"JSONfileToBeEdited\"], _backupFilename)", "def save_current_ami ( s3_infra_conn, region_name, env_type, app_name, ami_name ) :\n ami_bucket = get_admin_bucket_name( region_name = region_name )\n store_s3_contents( s3_conn = s3_infra_conn,\n bucket_name = ami_bucket,\n key_name = get_ami_keypath( env_type ) + get_ami_keyname( app_name ),\n key_contents = ami_name )", "def save_original(self, filename):\n return self.form.save_original(filename)", "def save_original(self, filename):\n return self.form.save_original(filename)", "def cachePayload(self,encodedPayload):\n filename = cachedFilesPath + str(self.project_tag) + ('_%d'%getUnixTimeStamp()) + cachedFileExtensionSuffix\n #print(filename)\n\n with open(filename, mode='w') as f:\n f.write(encodedPayload)", "def backup_inventory(self):\n if config.get(\"aws\", \"s3_bucket\"):\n archives = self.load_archives()\n\n s3_bucket = S3Backend(self.conf).bucket\n k = Key(s3_bucket)\n k.key = self.backup_key\n\n k.set_contents_from_string(json.dumps(archives))\n\n k.set_acl(\"private\")", "def overwrite_original_file(self, value):\n self.__overwrite_original_file = value", "def reset(self):\n self.keyToFile=dict()", "def save(self, filename=None):\n\n if filename is None:\n filename = self.__filename\n\n file = open(filename, 'w')\n for key in self:\n if key[:2] == '__':\n continue\n file.write(key)\n file.write(' = ')\n if key in self.__orig:\n value = self.__orig[key]\n else:\n value = self[key]\n file.write(str(value))\n\n file.close()", "def save_to_s3(self, bucket):\r\n n = 0\r\n m = self.read()\r\n while m:\r\n n += 1\r\n key = bucket.new_key('%s/%s' % (self.id, m.id))\r\n key.set_contents_from_string(m.get_body())\r\n self.delete_message(m)\r\n m = self.read()\r\n return n", "def save(self):\n if self._mode == 'dict':\n self._mode = 'shelve'\n self._shelve_mode = 'c'\n\n for key, value in self._dict.items():\n ckey = copy.copy(key)\n cvalue = copy.copy(value)\n self.add(ckey, cvalue, 'shelve', check=False)\n\n self._dict.clear()\n\n if self._mode == 'dict':\n self._mode = 'dict'\n self._shelve_mode = 'r'", "def test_retrieve_original_to_temp(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n src = os.path.join(self.upload_path, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n\n # retrieve file\n result = backend.retrieve_original(id, self.tmp_path)\n expected_dst = os.path.join(self.tmp_path, id, 'demo-test.tar.gz')\n self.assertEquals(expected_dst, result)\n self.assertTrue(os.path.exists(expected_dst))", "async def __write_to_auth_file(self, tag_info: dict):\n async with open(self.__file_name, mode=\"r\") as auth_file:\n # Read the cache\n tag_data = json.loads(await auth_file.read())\n tag_copy = tag_data\n await auth_file.close()\n try:\n async with open(self.__file_name, mode=\"w\") as auth:\n tag_list: list = tag_data[\"authorized_tags\"]\n exists_in_cache: bool = False\n # Find tag info, if exists overwrite current status\n for index, tag in enumerate(tag_list):\n if tag[\"id\"] == tag_info[\"id\"]:\n tag_list[index] = tag_info\n exists_in_cache = True\n if not exists_in_cache:\n tag_list.append(tag_info)\n await auth.write(json.dumps(tag_data, indent=2, sort_keys=True))\n await auth.close()\n except Exception as ex:\n logger.debug(\"Failed overwriting tag info\", exc_info=ex)\n print(ex)\n # If overwriting fails, restore old information\n async with open(self.__file_name, mode=\"w\") as backup:\n await backup.write(json.dumps(tag_copy, indent=2, sort_keys=True))\n await backup.close()", "def touchKBucket(self, key):", "def replace_gen(self):\r\n current_path = os.path.join(self.settings.save_path, 'current.json')\r\n current_folder_path = os.path.join(self.settings.save_path, 'current')\r\n history_path = os.path.join(self.settings.save_path, 'history')\r\n archive_folder_path = os.path.join(history_path, f'gen{self.generation}')\r\n archive_path = os.path.join(archive_folder_path, 'current') # no ending allowed\r\n archive_json_path = os.path.join(archive_folder_path, 'current.json')\r\n\r\n\r\n if not os.path.exists(current_path):\r\n raise FileNotFoundError\r\n if not os.path.exists(current_folder_path):\r\n raise FileNotFoundError\r\n\r\n os.makedirs(history_path, exist_ok=True)\r\n os.makedirs(archive_folder_path)\r\n\r\n cwd = os.getcwd()\r\n shutil.make_archive(archive_path, 'zip', current_folder_path)\r\n os.chdir(cwd)\r\n shutil.rmtree(current_folder_path, onerror=_ignore_del_dir_failure)\r\n os.chdir(cwd)\r\n\r\n os.rename(current_path, archive_json_path)", "def save_keyname_file(self, keyname):\n if self.dryrun:\n keyname = '{keyname}'\n else:\n assert keyname, ('need keyname', keyname)\n\n cmd=f'/bin/echo \"{keyname}\" > {self.keyname_file}'\n s, out, err = self.as_user(cmd)\n\n if s == 0:\n self.report(f'Wrote {self.keyname_file}')\n elif s == None:\n pass # dryrun\n else:\n self.report(f'Failed to save {self.keyname_file}')\n self.report('\\t', err.decode('utf8'))\n return s", "def save(self):\n a_copy = FileStorage.__objects\n obj_dict = {obj: a_copy[obj].to_dict() for obj in a_copy.keys()}\n with open(FileStorage.__file_path, \"w\") as f:\n json.dump(obj_dict, f)", "def copy_bucket_contents ( src_bucket, tgt_bucket ) :\n print 'Copying content FROM bucket ' + src_bucket.name + ' TO bucket ' + tgt_bucket.name\n key_list = src_bucket.list( )\n for key in key_list :\n sys.stdout.write( '.' ) # Activity marker\n sys.stdout.flush( )\n tgt_bucket.copy_key( new_key_name = key.key,\n src_bucket_name = src_bucket.name,\n src_key_name = key.key )", "def store(self, filename):", "def save(self):\n if not self.fileKey:\n log.error(\"attempted to save a closed wallet\")\n return\n encrypted = self.fileKey.encrypt(tinyjson.dump(self).encode()).hex()\n w = tinyjson.dump({\n \"keyparams\": self.fileKey.params(),\n \"wallet\": encrypted,\n })\n helpers.saveFile(self.path, w)", "def copy_key(self, frm, to):\n frm_location = self.s3_location(frm)\n copy_source = {\n 'Bucket': frm_location.bucket,\n 'Key': frm_location.key[1:]\n }\n\n to_location = self.s3_location(to)\n bucket = self.get_bucket(to_location.bucket)\n\n log.info(\"Copying %s to %s\", frm, to)\n bucket.copy(copy_source, to_location[1:])", "def save(self):\n if self.hasChanged:\n filePath = self.path\n tempPath = filePath+'.tmp'\n fileDir = os.path.split(filePath)[0]\n if not os.path.exists(fileDir): os.makedirs(fileDir)\n cPickle.dump(self.data,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)\n self.hasChanged = False", "def upload(self, bucket_name, key_name, fname):\n bucket = self.s3_.get_bucket(bucket_name)\n key = boto.s3.key.Key(bucket)\n with open(fname, 'rb') as infile:\n key.key = key_name\n return key.set_contents_from_file(infile)", "def upgrade_savefile(fn):\n\n if signing_keys is None:\n return\n\n atime = os.path.getatime(fn)\n mtime = os.path.getmtime(fn)\n\n with zipfile.ZipFile(fn, \"a\") as zf:\n\n if \"signatures\" in zf.namelist():\n return\n\n log = zf.read(\"log\")\n zf.writestr(\"signatures\", sign_data(log))\n\n os.utime(fn, (atime, mtime))", "def reinit(self):\n self.keys = {}\n fh = open(self.path, \"w\")\n json.dump(self.keys, fh)\n fh.close()\n os.chmod(self.path, 0o600)", "def save(self):\n super(YacoFile, self).save(self._filename)" ]
[ "0.6037694", "0.59927624", "0.57073236", "0.56520575", "0.56048256", "0.5596014", "0.55929804", "0.55929804", "0.5570519", "0.5558988", "0.5556693", "0.5535628", "0.5512414", "0.5506305", "0.5484331", "0.5482167", "0.54426193", "0.54200596", "0.54176253", "0.54065824", "0.53982115", "0.539082", "0.53641605", "0.53599864", "0.53587633", "0.53469986", "0.5340274", "0.5334268", "0.53095424", "0.53007454" ]
0.6019954
1
Export the accumulate QA info
def export_QA(qa: QA): # TODO: implement log.info("assess_quality.export_QA: not yet implemented")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dump_qa(self):\n #- QA level outputs\n #qa_outfile = {}\n qa_outfig = {}\n for PA in self.palist:\n for QA in self.qalist[PA]:\n #qa_outfile[QA] = self.io_qa(QA)[0]\n qa_outfig[QA] = self.io_qa(QA)[1]\n \n #- make path if needed\n path = os.path.normpath(os.path.dirname(qa_outfig[QA]))\n if not os.path.exists(path):\n os.makedirs(path)\n\n return (qa_outfig)", "def qa_test():\r\n # Reads Code and Runs Code Metrics\r\n with open(\"BrainDataVisualiser.py\",\"r\") as file:\r\n code = file.read()\r\n with open(\"QA_LOGS.txt\",\"a\") as file:\r\n # Timestamp and append metric results to log\r\n file.write(datetime.date.today().strftime(\"%b-%d-%Y\")+\"\\n\\t\")\r\n file.write(\"General Analysis\\n\\t\\t\")\r\n file.write(str(analyze(code))+\"\\n\\t\")\r\n file.write(\"Cyclomatic Complexity\\n\")\r\n for i in cc_visit(code):\r\n file.write(\"\\t\\t\"+cc_rank(i.complexity)+\" \"+str(i)+\"\\n\")", "def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r", "def make_qa_report(metadata, base_dir, write_tag):\n # Change directory as QA code writes output directly to the running directory\n work_dir = os.getcwd()\n\n filenames = metadata['FITSImageFilename']\n for i, fits_file in enumerate(filenames):\n pb_dir = _productdir(metadata, base_dir, i, '_PB', write_tag)\n pb_filebase = os.path.splitext(fits_file)[0] + '_PB'\n\n log.info('Write QA report output')\n os.chdir(pb_dir)\n pb_fits = os.path.join(pb_dir, pb_filebase + FITS_EXT)\n command = '/home/kat/valid/Radio_continuum_validation -I {} --telescope MeerKAT -F'\\\n ' /home/kat/valid/filter_config_MeerKAT.txt -r'.format(pb_fits)\n sysarg = shlex.split(command)\n with log_qa(log):\n rcv.main(sysarg[0], sysarg[1:])\n os.chdir(work_dir)", "def inventory_report(products):\r\n names = set()\r\n total_price = 0\r\n total_weight = 0\r\n total_flammability = 0\r\n for product in products:\r\n names.add(product.name)\r\n total_price += product.price\r\n total_weight += product.weight\r\n total_flammability += product.flammability\r\n\r\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\r\n print(\"Unique product names: {}\".format(len(names)))\r\n print(\"Average price: {}\".format(total_price / len(products)))\r\n print(\"Average weight: {}\".format(total_weight / len(products)))\r\n print(\"Average flammability:{}\".format(\r\n total_flammability / len(products)))\r\n\r\n print(\"Following is useful starting code for acme_report.py:\")", "def main():\n now = time.strftime('%Y%m%d%H%M%S')\n\n # info = get_info(now)\n # info_filename = 'info_' + now + '.csv'\n # info.to_csv(os.path.join('..', '..', 'data', 'raw', info_filename), index=False)\n\n questions = get_questions(now)\n\n # don't talk about all this detail in the talk", "def generate_answers_distribution_report(_xmodule_instance_args, _entry_id,\n course_id, _task_input, action_name):\n\n store = modulestore()\n list_problem_module = get_problem_module(course_id.to_deprecated_string(),\n _task_input['problem_module_id'])\n problem_module = list_problem_module[0]\n add_ancestors_names_to_problem_module(problem_module, store)\n problem_module_size = get_problem_module_size(problem_module)\n\n # the csv will have a header_row (name of each column) and data_rows which is a list of data_row\n # data_row contain all the answers of a quizz for one student\n header_row = create_header_row(problem_module_size)\n data_rows = []\n data_row = []\n # create the full id of the quizz and questions in order to get the answer from the SQL database\n module_state_key = \"i4x://{}/{}/problem/{}\".format(course_id.org,\n course_id.course,\n _task_input['problem_module_id'])\n\n # create the full id of the quizz and questions in order to get the answer from the SQL database\n question_ids = create_list_of_question_ids(course_id.org,\n course_id.course,\n _task_input['problem_module_id'],\n problem_module_size)\n # instanciate a UsageKey object from the string \"module_state_key\"\n module_usage_key = UsageKey.from_string(module_state_key)\n\n # request to get all the answers to the quizz\n answers_list = StudentModule.objects.filter(module_state_key=module_usage_key)\n\n # iterate through the answers and fill for each student the data_row\n for answer in answers_list:\n if answer.student.is_superuser is True:\n continue\n user = answer.student\n student = UserProfile.objects.get(user=user)\n data_row = [user.id,\n student.gender,\n student.year_of_birth,\n student.level_of_education]\n json_answer = json.loads(answer.state)\n for question_id in question_ids:\n try:\n data_row.append(json_answer[\"student_answers\"][question_id])\n except KeyError:\n data_row.append(\"NA\")\n data_rows.append(data_row)\n\n datetime_today = datetime.datetime.today().strftime(\"%Y-%m-%d-%H-%M-%S\")\n\n write_csv(header_row, data_rows,\n u\"{}_{}_{}_{}_{}_{}_{}.csv\".format(datetime_today,\n course_id.org[:20],\n course_id.course[:20],\n problem_module.ancestors_names['great_grandparent'][:60],\n problem_module.ancestors_names['grandparent'][:60],\n problem_module.ancestors_names['parent'][:40],\n problem_module.display_name[:40]).replace(' ', '-').replace('?', '-').replace('/', '-'),\n course_id)", "def generate_qna_report(self, past_qna):\n response = []\n\n # reverse the order so correct report order can be generated\n past_qna.reverse()\n for qna in past_qna:\n\n found_page = False\n for page in self.module:\n\n if page[\"QID\"] == qna[\"QID\"]:\n found_page = True\n\n found_answer = False\n answer_list = []\n for answer in page[\"answers\"]:\n if answer[\"AID\"] in qna[\"AID\"]:\n found_answer = True\n answer_list.append({\n \"AID\": answer[\"AID\"],\n \"prettyAID\": answer.get(\"prettyAID\"),\n \"answer\": answer[\"answer\"],\n \"description\": answer[\"description\"],\n \"resources\": answer[\"resources\"]\n })\n\n response.append({\n \"QID\": page[\"QID\"],\n \"question\": page[\"question\"],\n \"description\": page[\"description\"],\n \"resources\": page[\"resources\"],\n \"answers\": answer_list\n })\n\n if not found_answer:\n raise ValueError(\"AID: \" + qna[\"AID\"] + \"not found!\")\n\n if not found_page:\n raise ValueError(\"QID: \" + qna[\"QID\"] + \" not found!\")\n\n return response", "def export_q_a(question, question_intent, answer, bot_answer, answered, has_to_summarize=False):\n # Insert document into collections\n if answered:\n col_answer_given.insert_one(\n {\n 'Question Intent': question_intent,\n 'Question': question,\n 'Bot answer': bot_answer,\n 'Answer': answer\n }\n )\n else:\n if has_to_summarize:\n col_to_summarize.insert_one(\n {\n 'Question Intent': question_intent,\n 'Question': question,\n 'Answer': answer\n }\n )\n else:\n col_answer_not_given.insert_one(\n {\n 'Question Intent': question_intent,\n 'Question': question,\n 'Bot answer': bot_answer,\n 'Answer': answer\n }\n )", "def order_report():", "def print_app_data(self):\n print(\"===================================\")\n print(\"== RESULTS: ==\")\n print(\"===================================\")\n\n # Analog application results\n print(\"--------------------------\")\n print(\"-- Analog applications --\")\n print(\"--------------------------\")\n print(\"Number of analog application processed: {}\".format(len(self.analog_apps)))\n if (self.verbose):\n for app in self.analog_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Bay number : {}\".format(device[\"bay_number\"]))\n print(\" - Channel number : {}\".format(device[\"channel_number\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of faults : {}\".format(len(device[\"faults\"])))\n for fault_id,fault_data in device[\"faults\"].items():\n print(\" Fault data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}_T{}\".format(fault_data[\"name\"], fault_data[\"bit_positions\"][0]))\n print(\" - ID : {}\".format(fault_id))\n print(\" - Name : {}\".format(fault_data[\"name\"]))\n print(\" - Description : {}\".format(fault_data[\"description\"]))\n print(\" - Bit positions : {}\".format(fault_data[\"bit_positions\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"--------------------------\")\n\n # Digital application result\n print(\"----------------------------\")\n print(\"-- Digital applications --\")\n print(\"----------------------------\")\n print(\"Number of digital application processed: {}\".format(len(self.digital_apps)))\n if (self.verbose):\n for app in self.digital_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of inputs : {}\".format(len(device[\"inputs\"])))\n for input in device[\"inputs\"]:\n print(\" Input data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}\".format(input[\"name\"]))\n print(\" - Name : {}\".format(input[\"name\"]))\n print(\" - Bit position : {}\".format(input[\"bit_position\"]))\n print(\" - Zero name : {}\".format(input[\"zero_name\"]))\n print(\" - One name : {}\".format(input[\"one_name\"]))\n print(\" - Alarm state : {}\".format(input[\"alarm_state\"]))\n print(\" - Debounce : {}\".format(input[\"debounce\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"----------------------------\")\n\n\n print(\"===================================\")\n\n print('Found {} link nodes:'.format(len(self.link_nodes)))\n for k,v in self.link_nodes.items():\n print('{}: {}'.format(k, v['type']))", "def write_output(self):\n self.tcex.log.info('Writing Output')\n self.tcex.log.info(type(self.all_items))\n self.tcex.log.info(len(self.all_items))\n self.tcex.playbook.create_output('firework_alert.json', self.all_items)", "def report(self):\n self.report_status()\n print\n self.report_charset()\n print\n self.report_key()\n print\n self.report_keyset()", "def get_aqi(self):\r\n print('Getting AQI...')\r\n\r\n aqi_list = []\r\n for z in self.zipcodes:\r\n webpage = 'https://airnow.gov/index.cfm?action=airnow.local_city&zipcode=' + z + '&submit=Go'\r\n page_html = urllib.request.urlopen(webpage)\r\n soup = BeautifulSoup(page_html, 'html.parser')\r\n city_html = soup.find('td', attrs={'class': 'ActiveCity'})\r\n aqi_html = soup.find('tr', attrs={'style': 'color:black;text-align:center;font-weight:200'})\r\n\r\n city = city_html.text.strip()\r\n aqi = aqi_html.text.strip()\r\n\r\n if self.aqi_df.shape[0] != 0:\r\n aqi_list.append(aqi)\r\n else:\r\n aqi_list.append([city, z, aqi])\r\n\r\n if self.aqi_df.shape[0] != 0:\r\n aqi_list = pd.Series(aqi_list)\r\n self.aqi_df['AQI on {}'.format(datetime.now())] = aqi_list.values\r\n else:\r\n aqi_list = pd.DataFrame(aqi_list, columns=self.columns)\r\n self.aqi_df = self.aqi_df.append(aqi_list, ignore_index=True)\r\n\r\n self.aqi_df.to_csv(save_dir + '/aqi.csv', index=False)\r\n print(self.aqi_df)", "def qalist(self):\n return self._palist.qalist", "def get_xqa_json(self):\n line = []\n for count, document in enumerate(self.documents):\n json_item = {\"id\": [self.id, count],\n \"question\": self.question,\n \"document\": document,\n \"document_id\": self.document_ids[count]}\n line.append(json_item)\n return line", "def put_qa(self, num_qa_records, qa_record):\n ierr = exolib.py_expqa(self.exoid, num_qa_records, qa_record.T)\n if ierr:\n raise ExodusIIWriterError(\"Error putting QA record\")", "def __debug_print__(self):\n print(self.question_data)", "def inventory_report(products):\n name_list = set()\n price_list = []\n wt_list = []\n flamablity_list = []\n\n for p in products:\n name_list.add(p.name)\n price_list.append(p.price)\n wt_list.append(p.weight)\n flamablity_list.append(p.flammability)\n# Calculating average for report\n unique_names = len(name_list)\n avg_price = sum(price_list)/len(price_list)\n avg_weight = sum(wt_list)/len(wt_list)\n avg_flammability = sum(flamablity_list)/len(flamablity_list)\n# Printing\n print(\"$ python acme_report.py \")\n print(\"ACME CORPORATION OFFICIAL INVENTORY REPORT\")\n print(\"Unique product names:\", unique_names)\n print(\"Average price:\", round(avg_price, 2))\n print(\"Average weight:\", avg_weight)\n print(\"Average flammability:\", avg_flammability)", "def add_to_pr_export(self, exp_template):", "def show(self):\n print(\"APKs in Session: {}\".format(len(self.analyzed_apk)))\n for d, a in self.analyzed_apk.items():\n print(\"\\t{}: {}\".format(d, a))\n print(\"DEXs in Session: {}\".format(len(self.analyzed_dex)))\n for d, dex in self.analyzed_dex.items():\n print(\"\\t{}: {}\".format(d, dex))\n print(\"Analysis in Session: {}\".format(len(self.analyzed_vms)))\n for d, a in self.analyzed_vms.items():\n print(\"\\t{}: {}\".format(d, a))", "def get_export_prompts(obj_bp):\n \n prompts = {}\n \n def add_prompt(prompt):\n if prompt.Type == 'NUMBER':\n prompts[prompt.name] = str(prompt.NumberValue)\n if prompt.Type == 'QUANTITY':\n prompts[prompt.name] = str(prompt.QuantityValue)\n if prompt.Type == 'COMBOBOX':\n prompts[prompt.name] = str(prompt.COL_EnumItem[prompt.EnumIndex].name)\n if prompt.Type == 'CHECKBOX':\n prompts[prompt.name] = str(prompt.CheckBoxValue)\n if prompt.Type == 'TEXT':\n prompts[prompt.name] = str(prompt.TextValue)\n if prompt.Type == 'DISTANCE':\n prompts[prompt.name] = str(round(unit.meter_to_active_unit(prompt.DistanceValue),2))\n if prompt.Type == 'ANGLE':\n prompts[prompt.name] = str(prompt.AngleValue)\n if prompt.Type == 'PERCENTAGE':\n prompts[prompt.name] = str(prompt.PercentageValue)\n if prompt.Type == 'PRICE':\n prompts[prompt.name] = str(prompt.PriceValue)\n \n def add_child_prompts(obj):\n for child in obj.children:\n if child.mv.type == 'BPASSEMBLY':\n add_prompts(child)\n if len(child.children) > 0:\n add_child_prompts(child)\n \n def add_prompts(obj):\n for prompt in obj.mv.PromptPage.COL_Prompt:\n if prompt.export:\n add_prompt(prompt)\n \n add_prompts(obj_bp)\n add_child_prompts(obj_bp)\n\n return prompts", "def export_comparisons(self):\n print(\"Exporting comparisons:\")\n\n return", "def build_data(self):\n from desiutil.io import combine_dicts\n # Loop on exposures\n odict = {}\n for qanight in self.qa_nights:\n for qaexp in qanight.qa_exps:\n # Get the exposure dict\n idict = write_qa_exposure('foo', qaexp, ret_dict=True)\n odict = combine_dicts(odict, idict)\n # Finish\n self.data = odict", "def _printable(self):\n toPrint = \"Qubit ID: \" + str(self.qubit_id) + \" \"\n toPrint = toPrint + \"Outcome: \" + str(self.outcome) + \" \"\n toPrint = toPrint + \"Remote App ID: \" + str(self.remote_app_id) + \" \"\n toPrint = toPrint + \"Remote Node: \" + str(self.remote_node) + \" \"\n toPrint = toPrint + \"Remote Port: \" + str(self.remote_port) + \" \"\n toPrint = toPrint + \"Datetime: \" + str(self.datetime)\n return toPrint", "def download(exam, out, name_question, sid_question, compact):\n exam_json, template_questions, email_to_data_map, total = examtool.api.download.download(exam)\n examtool.api.download.export(template_questions, email_to_data_map, total, exam, out, name_question, sid_question, compact)", "def report(self) -> Any:", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "def show_products():\n\n print \"These are the products in sale\"\n for key, value in ADD_PRODUCTS.iteritems():\n print \"%s: Q%.2f\" % (key, value)", "def reports_cli():" ]
[ "0.62779266", "0.5807726", "0.5701978", "0.5490575", "0.548842", "0.5469755", "0.5413337", "0.54031056", "0.5353437", "0.5329075", "0.52541536", "0.5240437", "0.5219163", "0.52120894", "0.5197329", "0.5191597", "0.5154738", "0.51318127", "0.51313466", "0.51085216", "0.51080084", "0.5080649", "0.50224084", "0.50107735", "0.5003458", "0.4993973", "0.49872273", "0.49779552", "0.4974406", "0.4971814" ]
0.7101214
0
Computes boundary indices for each of the splits in split_probs.
def _compute_split_boundaries(split_probs, n_items): if len(split_probs) > n_items: raise ValueError( 'Not enough items for the splits. There are {splits} ' 'splits while there are only {items} items'.format( splits=len(split_probs), items=n_items ) ) total_probs = sum(p for name, p in split_probs) if abs(1 - total_probs) > 1e-8: raise ValueError('Probs should sum up to 1. probs={}'.format(split_probs)) split_boundaries = [] sum_p = 0.0 for name, p in split_probs: prev = sum_p sum_p += p split_boundaries.append((name, int(prev * n_items), int(sum_p * n_items))) # Guard against rounding errors. split_boundaries[-1] = ( split_boundaries[-1][0], split_boundaries[-1][1], n_items, ) return split_boundaries
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_split_indices(self):\n\n cumsum = np.cumsum(\n np.concatenate((np.array([0], dtype=np.int8), self.split_sizes)))\n \n fold_inds = np.array(\n [(cumsum[n], cumsum[n + 1]) for n in range(self.n_splits)])\n\n return fold_inds", "def indices_of_split(self, split_name='train'):\n return self.indices_of('split', split_name)", "def bisect_anyaxis(counts, ndomains, split_fac):\n # split along any axis \n splits = {}\n pvals = []\n for axis in range(len(counts.shape)):\n # Sum over other axes\n sum_axes = list(np.arange(len(counts.shape)))\n sum_axes.pop(axis)\n sum_axes = tuple(sum_axes)\n\n # split into left and right \n counts1d = np.sum(counts, axis=sum_axes, dtype=np.int64)\n \n split_idx, n_L, pval = load_partition_1d(counts1d, ndomains, split_fac)\n\n splits[axis] = (split_idx, n_L)\n\n pvals.append(pval)\n\n axis = int(np.argmin(pvals))\n split_idx, n_L = splits[axis]\n return axis, split_idx, n_L", "def _get_inter_splits_by_group(items_and_groups, split_probs, split_number):\n groups = sorted(set(group_id for item_id, group_id in items_and_groups))\n rng = np.random.RandomState(split_number)\n rng.shuffle(groups)\n\n split_boundaries = _compute_split_boundaries(split_probs, len(groups))\n group_id_to_split = {}\n for split_name, i_start, i_end in split_boundaries:\n for i in range(i_start, i_end):\n group_id_to_split[groups[i]] = split_name\n\n split_to_ids = collections.defaultdict(set)\n for item_id, group_id in items_and_groups:\n split = group_id_to_split[group_id]\n split_to_ids[split].add(item_id)\n\n return split_to_ids", "def estimate_bucket_pipeline(bucket_boundaries, num_samples, safe=True):\n if len(bucket_boundaries) < 2:\n raise ValueError('Bucket boundaries must contain at least 2 values')\n\n batch_step = 8\n\n batch_sizes = []\n for boundary in bucket_boundaries:\n batch_size = num_samples / (boundary - 1)\n batch_size = np.floor(batch_size / batch_step) if safe \\\n else np.round(batch_size / batch_step)\n batch_size *= batch_step\n\n if safe and batch_size < batch_step:\n if len(batch_sizes) < 2:\n raise ValueError('Too few samples per batch')\n\n return bucket_boundaries[:len(batch_sizes) - 1], batch_sizes, bucket_boundaries[len(batch_sizes) - 1]\n\n batch_sizes.append(max(batch_step, batch_size.astype(int)))\n\n return bucket_boundaries[:-1], batch_sizes, bucket_boundaries[-1]", "def _get_indices_split ( indices, number_of_folds ):\n # Split the indicies by the number of folds\n return np.array_split ( indices, indices_or_sections = number_of_folds )\n # End get_indices_split()", "def _get_indices_split ( indices, number_of_folds ):\n # Split the indicies by the number of folds\n return np.array_split ( indices, indices_or_sections = number_of_folds )\n # End get_indices_split()", "def compute_bin_indices(X_part, bin_limits=None, n_bins=20):\n if bin_limits is None:\n bin_limits = []\n for variable_data in range(X_part.shape[1]):\n bin_limits.append(numpy.linspace(numpy.min(variable_data), numpy.max(variable_data), n_bins + 1)[1: -1])\n\n bin_indices = numpy.zeros(len(X_part), dtype=numpy.int)\n for axis, bin_limits_axis in enumerate(bin_limits):\n bin_indices *= (len(bin_limits_axis) + 1)\n bin_indices += numpy.searchsorted(bin_limits_axis, X_part[:, axis])\n\n return bin_indices", "def getSplit(self):\n b_index, b_value, b_score, b_groups = 999, 999, 999, None\n for j in range(len(self[0]) - 1):\n for i in range(len(self)):\n groups = self.splitAttribute(j, self[i][j]) # lit, big\n gini = self.giniIndex(groups)\n if gini < b_score and (j, \"%.1f\" % self[i][j]) not in self.atr:\n b_index, b_value, b_score, b_groups = j, self[i][\n j], gini, groups\n return b_index, b_value, b_groups, b_score", "def get_split(self,X,y):\n \n BEST_COL = 0\n BEST_SPLIT =0\n BEST_IMPUR = 99\n for i,feature in enumerate(X.T):\n arg_sort=np.argsort(feature) #Sort the feature for optimizing the find of splitting points\n feature= feature[arg_sort]\n y_sort = y[arg_sort]\n splits = self.possible_splits(feature,y_sort) #Get \n\n impur,splits = self.test_split(feature,y_sort,splits) #Get impurity for splitting points\n best_idx = np.argmin(impur)\n best_impur = impur[best_idx]\n \n if best_impur==0.0: #Found perfect split, terminate\n return(i,splits[best_idx])\n elif best_impur<BEST_IMPUR:\n BEST_IMPUR=best_impur\n BEST_SPLIT=splits[best_idx]\n BEST_COL=i\n return (BEST_COL,BEST_SPLIT)", "def fold(nb_splits, dataset):\r\n index = np.arange(np.shape(dataset)[0])\r\n splits = np.split(index, nb_splits)\r\n\r\n index = []\r\n\r\n for n_fold in np.arange(nb_splits):\r\n index.append((splits[n_fold].tolist(),(np.concatenate([x for i,x in enumerate(splits) if i!=n_fold])).tolist()))\r\n\r\n return index", "def make_splits(self):\n # produce fold/portion splits of the training indexes: these output indexes to the tr. indexes themselves\n if self.folds is not None:\n meta_trainval_idx = kfold_split(self.train_idx, self.folds, self.seed, self.labels, self.label_info)\n elif self.portion is not None:\n meta_trainval_idx = portion_split(self.train_idx, self.portion, self.seed, self.labels, self.label_info)\n else:\n meta_trainval_idx = [(np.arange(len(self.train_idx)), np.arange(0, dtype=np.int32))]\n # \"dereference\" the metaindexes to point to the data themselves\n self.trainval_idx = []\n for (tidx, vidx) in meta_trainval_idx:\n self.trainval_idx.append((self.train_idx[tidx], self.train_idx[vidx]))", "def test_split(self,X,y,splits):\n n_data = len(y) #Number of data points\n splits=(X[splits]+X[splits+1])/2\n\n idx_greater = (X>splits[:,None]) #index for greater split\n idx_lower = (X<splits[:,None]) #index for lower split\n\n imp_greater =[self.impurity(y[idx]) for idx in idx_greater] #impurity for greater\n imp_lower = [self.impurity(y[idx]) for idx in idx_lower] #impurity lower\n\n impur = [sum(idx_great)/n_data*imp_great+sum(idx_low)/n_data*imp_low for idx_great,imp_great,idx_low,imp_low in zip(idx_greater,imp_greater,idx_lower,imp_lower)] #Weighted impurity\n return (impur,splits)", "def get_bucket_boundaries(feature):\n return np.unique(np.percentile(feature, range(0, 100))).tolist()", "def encode_splits(data, split_data):\n lookup = {'train': 0, 'val': 1, 'test': 2}\n id_to_split = {}\n split_array = np.zeros(len(data))\n for split, idxs in split_data.iteritems():\n for idx in idxs:\n id_to_split[idx] = split\n for i, img_id in enumerate(data):\n split_array[i] = lookup[id_to_split[img_id]]\n return split_array", "def encode_splits(data, split_data):\r\n lookup = {'train': 0, 'val': 1, 'test': 2}\r\n id_to_split = {}\r\n split_array = np.zeros(len(data))\r\n for split, idxs in split_data.iteritems():\r\n for idx in idxs:\r\n id_to_split[idx] = split\r\n for i, img in enumerate(data):\r\n split_array[i] = lookup[id_to_split[img['image_id']]]\r\n return split_array", "def get_partition_boundaries(cls, session: Session):\n rows = session.execute(\"select pg_class.relname, pg_get_expr(pg_class.relpartbound, pg_class.oid, true) from pg_class where relname SIMILAR TO 'peptides_[0-9]{3}';\").fetchall()\n num_regex = re.compile(r\"\\d+\")\n partition_boundaries = []\n for row in rows:\n matches = re.findall(num_regex, row[1])\n partition_boundaries.append((row[0], int(matches[0]), int(matches[1])))\n return partition_boundaries", "def find_split(x, y):\n\n # Need the starting entropy so we can measure improvement...\n start_entropy = calculate_entropy(y)\n\n # Best thus far, initialised to a dud that will be replaced immediately...\n best = {'infogain': -np.inf}\n\n # Randomly allocate the splits to be traversed (without replacement)\n feature_total = x.shape[1]\n feature_subset_count = int(np.sqrt(feature_total))\n feature_subset = np.random.permutation(feature_total)[:feature_subset_count]\n\n # Loop every possible split of every feature...\n for feature_index in feature_subset:\n for split in np.unique(x[:, feature_index]):\n\n left_indices = []\n right_indices = []\n\n # Get index of rows where x[row_index,feature_index] <= split\n for row_index,row in enumerate(x):\n left_indices.append(row_index) if x[row_index,feature_index] <= split else right_indices.append(row_index)\n\n left_ys = y[left_indices]\n right_ys = y[right_indices]\n\n nleft = len(left_ys)\n nright = len(right_ys)\n ntotal = nleft + nright\n infogain = start_entropy - (nleft / ntotal) * calculate_entropy(left_ys) - (\n nright / ntotal) * calculate_entropy(right_ys)\n\n if infogain > best['infogain']:\n best = {'feature': feature_index,\n 'split': split,\n 'infogain': infogain,\n 'left_indices': left_indices,\n 'right_indices': right_indices}\n return best", "def affect(self, bin_boundaries, element):\n\n # bin_boundaries\n assert type(bin_boundaries) is np.ndarray\n\n # element\n assert isinstance(element, (int, float, np.number)), \\\n \"element = {} should be of a numeric type, not {}.\".format(element, type(element))\n assert bin_boundaries[0] <= element <= bin_boundaries[-1]\n\n # For all bins, in increasing order\n for m in range(1, len(bin_boundaries)):\n\n # If the element is too small to get into the mth bin\n if element < bin_boundaries[m]:\n # Returning the index of the previous one\n return m - 1\n\n # Boundary case : element belongs to the last bin.\n return len(bin_boundaries) - 2", "def _generate_bboxes(self, probs, offsets, scale, threshold):\n # applying P-Net is equivalent, in some sense, to\n # moving 12x12 window with stride 2\n stride = 2\n cell_size = 12\n\n # extract positive probability and resize it as [n, m] dim tensor.\n probs = probs[:, 1, :, :]\n\n # indices of boxes where there is probably a face\n mask = probs > threshold\n inds = mask.nonzero()\n\n if inds.shape[0] == 0:\n return torch.empty(0, dtype=torch.int32, device=self.device), \\\n torch.empty(0, dtype=torch.float32, device=self.device), \\\n torch.empty(0, dtype=torch.float32, device=self.device), \\\n torch.empty(0, dtype=torch.int32, device=self.device)\n\n # transformations of bounding boxes\n tx1, ty1, tx2, ty2 = [offsets[inds[:, 0], i, inds[:, 1], inds[:, 2]]\n for i in range(4)]\n\n offsets = torch.stack([tx1, ty1, tx2, ty2], 1)\n score = probs[inds[:, 0], inds[:, 1], inds[:, 2]]\n\n # P-Net is applied to scaled images\n # so we need to rescale bounding boxes back\n bounding_boxes = torch.stack([\n stride*inds[:, -1] + 1.0,\n stride*inds[:, -2] + 1.0,\n stride*inds[:, -1] + 1.0 + cell_size,\n (stride*inds[:, -2] + 1.0 + cell_size),\n ], 0).transpose(0, 1).float()\n\n bounding_boxes = torch.round(bounding_boxes / scale).int()\n return bounding_boxes, score, offsets, inds[:, 0].int()", "def expert_to_batch_indices(self):\n return tf.split(\n self._batch_index, self._part_sizes_tensor, 0, num=self._num_experts)", "def _compute_bn(self, lvl):\n bn = [0] # number of samples crossing the left/right boundary\n for n in range(lvl):\n # 1. down-sampling of N samples by the factor scl gives (N-1)//scl + 1 samples\n # 2. bn[-1]+M-1 is the number of samples acrossing the left/right boundary, with M being the number of freqeuncies\n # => hence after the downsampling the number of boundary crossing samples is:\n bn.append((bn[-1]+self.nfreq-2)//self.scaling+1)\n bn.append(bn[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return bn[1:][::-1]", "def balanced_validation_split(x, y, idx, ratio):\n _ind = [i for i in range(len(x))]\n np.random.seed(0)\n np.random.shuffle(_ind)\n y, x,idx = y[_ind], x[_ind], idx[_ind]\n size = int(np.floor(len(x) * ratio) / 2)\n # binary label index\n _y0 = y[y == 0]\n _y1 = y[y == 1]\n _x0 = x[y == 0]\n _x1 = x[y == 1]\n _idx0= idx[y==0]\n _idx1 = idx[y == 1]\n _ind = int(np.min([np.min([len(_y0), len(_y1)]), size]))\n y_valid = np.hstack([_y0[:_ind], _y1[:_ind]])\n if x.ndim == 1:\n x_valid = np.hstack([_x0[:_ind], _x1[:_ind]])\n idx_valid = np.hstack([_idx0[:_ind], _idx1[:_ind]])\n else:\n\n x_valid = np.vstack([_x0[:_ind], _x1[:_ind]])\n idx_valid = np.vstack([_idx0[:_ind], _idx1[:_ind]])\n\n return x_valid, y_valid ,idx_valid", "def GetNbSplitPoints(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ClosedFaceDivide_GetNbSplitPoints(self, *args)", "def computeBoundaries(dof_connectivity, dof_edges, dof_faces, bEdges, bFaces, Nord):\n # Number of boundaries on edges\n nBoundaryEdges = len(bEdges)\n num_dof_in_edge = Nord\n\n # Number of boundaries on faces\n nBoundaryFaces = len(bFaces)\n num_dof_in_face = Nord*(Nord-1)\n\n # Get boundary dofs for edges\n indx_boundary_edges = dof_edges[bEdges,:]\n\n # Get boundary dofs for faces\n if dof_faces.size == 0:\n # No dofs on faces (first order, Nord==1)\n indx_boundary_faces = np.zeros((1,0), dtype=np.int)\n else:\n indx_boundary_faces = dof_faces[bFaces,:]\n\n # Get indexes of boundary dofs\n tmp1 = np.reshape(indx_boundary_edges, (nBoundaryEdges*num_dof_in_edge))\n tmp2 = np.reshape(indx_boundary_faces, (nBoundaryFaces*num_dof_in_face))\n indx_boundary_dofs = np.hstack((tmp1, tmp2))\n\n # Get total number of dofs in the mesh\n total_num_dofs = np.max(dof_connectivity) + 1\n\n # Get indexes of inner dofs\n indx_inner_dofs = np.setdiff1d(np.arange(0,total_num_dofs), indx_boundary_dofs)\n\n return indx_inner_dofs, indx_boundary_dofs", "def bin_indices(self, coordinates, fractional=True):\n coords = numpy.asarray(coordinates).transpose()\n indices = [numpy.interp(coo, cen, range(n))\n for (coo, cen, n) in zip(coords, self.centers, self.shape)]\n index_arr = numpy.atleast_2d(numpy.array(indices).transpose())\n if fractional:\n return index_arr\n return numpy.floor(index_arr + 0.5).astype(numpy.int_)", "def getBinIndices(self, linear_index):\n return linear_index / self.magic_array % self.nbins_across_dims", "def hashsplit(X, splits={\"train\": 0.8, \"test\": 0.2}, salt=1, N=5):\n\n # normalize the weights, just in case\n splits = {k: v / sum(splits.values()) for k, v in splits.items()}\n\n # determine bins in [0,1] that correspond to each split\n bounds = np.cumsum([0.0] + [v for k, v in sorted(splits.items())])\n bins = {\n k: [bounds[i], bounds[i + 1]] for i, (k, v) in enumerate(sorted(splits.items()))\n }\n\n # hash the strings deterministically\n hashes = [\n hashlib.sha512((str(x) + str(salt)).encode(\"utf-8\")).hexdigest() for x in X\n ]\n\n # create some numbers in [0,1] (at N sig figs) from the hashes\n nums = np.array(\n [float(\"\".join(filter(str.isdigit, h))[:N]) / 10 ** N for h in hashes]\n )\n\n # check where the nums fall in [0,1] relative to the bins left and right boundaries\n inds = {k: np.where((nums > l) & (nums <= r)) for k, (l, r) in bins.items()}\n\n # np.where returns a singleton tuple containing an np array, so convert to list\n return {k: list(*v) for k, v in inds.items()}", "def generate_bboxes_with_scores(cls_map, scale, threshold=0.5, size=12, stride=2):\n assert len(cls_map.shape) == 2\n\n indices = np.where(cls_map >= threshold)\n bboxes = np.concatenate((\n ((indices[1] * stride) / scale).reshape(-1, 1),\n ((indices[0] * stride) / scale).reshape(-1, 1),\n ((indices[1] * stride + size) / scale).reshape(-1, 1),\n ((indices[0] * stride + size) / scale).reshape(-1, 1),\n cls_map[indices].reshape(-1, 1)\n ), axis=1)\n return bboxes, indices", "def split(self, X, y, feature_array):\n n, p = X.shape\n\n best_gain = 0\n best_split_point = 0\n best_feature_id = -1\n for feature_id in feature_array:\n cur_gain, cur_split_point = self.find_best_split(\n X[:, feature_id], y)\n if cur_gain > best_gain - self.eps:\n best_gain = cur_gain\n best_split_point = cur_split_point\n best_feature_id = feature_id\n\n assert(best_feature_id != -1)\n\n x = X[:, best_feature_id]\n left_index = x < best_split_point\n right_index = x >= best_split_point\n\n self.split_id = best_feature_id\n self.split_val = best_split_point\n\n return (left_index, right_index)" ]
[ "0.6504778", "0.6373539", "0.5991977", "0.59512573", "0.58468676", "0.58247024", "0.58247024", "0.5623608", "0.55173296", "0.54587805", "0.5435856", "0.54219145", "0.53155714", "0.53145355", "0.5306709", "0.5304065", "0.5301914", "0.5295978", "0.52721083", "0.52459157", "0.52457094", "0.5239658", "0.52312565", "0.52220803", "0.5206346", "0.51910365", "0.5181284", "0.51792115", "0.51711637", "0.5162949" ]
0.7075934
0
Test parsing and streaming of DPTValue1Ucount 50.
def test_value_50(self): self.assertEqual(DPTValue1Ucount().to_knx(50), (0x32,)) self.assertEqual(DPTValue1Ucount().from_knx((0x32,)), 50)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unit(self):\n self.assertEqual(DPTSignedRelativeValue.unit, \"\")\n self.assertEqual(DPTPercentV8.unit, \"%\")\n self.assertEqual(DPTValue1Count.unit, \"counter pulses\")", "def test_with_status_data(self):\r\n\r\n with open(os.path.join(RESOURCE_PATH, 'ND161646.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n particles = parser.get_records(250)\r\n log.debug('got back %d records', len(particles))\r\n\r\n self.assert_particles(particles, 'ND161646.yml', RESOURCE_PATH)", "def test_uss_num_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_uss_num(input_val)\n self.assertEqual(output_val, self.line.uss_num)", "def test_value_min(self):\n self.assertEqual(DPTValue1Ucount().to_knx(0), (0x00,))\n self.assertEqual(DPTValue1Ucount().from_knx((0x00,)), 0)", "def test_simple(self):\n self.stream_handle = open(os.path.join(RESOURCE_PATH, 'adcpt_20130929_091817.DAT'))\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_header_footer, 138) \n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_a, 509) \n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_b, 880)\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_c, 1251)\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_d, 1622)\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_e, 1993)\n\n # no data left\n result = self.parser.get_records(1)\n self.assertEqual(result, [])\n self.assert_(isinstance(self.publish_callback_value, list))\n self.assertEqual(self.publish_callback_value[0], self.particle_e)", "def test_parse_pi_xml_02(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.bulk_get_series(chunk_size=5):\n pass\n self.assertTrue(True)", "def test_parse_pi_xml_08(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.bulk_get_series(chunk_size=5):\n pass\n self.assertTrue(True)", "def test_parse(self):\n report = (\n \"KJFK 032151Z 16008KT 10SM FEW034 FEW130 BKN250 27/23 A3013 RMK AO2 SLP201\"\n )\n data, units = metar.parse(report[:4], report)\n self.assertIsInstance(data, structs.MetarData)\n self.assertIsInstance(units, structs.Units)\n self.assertEqual(data.raw, report)", "def test_percentage_is_100(self):\n metric = self.metric(direction=\">\")\n sources = [self.source(metric, value=\"0\", total=\"0\")]\n measurement = self.measurement(metric, sources=sources)\n self.assertEqual(\"100\", measurement[\"percentage\"][\"value\"])", "def test_1020(self, gmn_client_v2):\n str_buf = io.StringIO()\n d1_gmn.app.sysmeta_extract.extract_values(out_stream=str_buf)\n self.sample.assert_equals(str_buf.getvalue(), \"all_stream\")", "def test_parse_example1(example1):\n assert example1 == [12, 14, 1969, 100756]", "def test_str_counter_pulses(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"counter_pulses\"\n )\n sensor.sensor_value.payload = DPTArray((0x9D,))\n\n self.assertEqual(sensor.resolve_state(), -99)\n self.assertEqual(sensor.unit_of_measurement(), \"counter pulses\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_str_percent_u8(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"percentU8\"\n )\n sensor.sensor_value.payload = DPTArray((0x6B,))\n\n self.assertEqual(sensor.resolve_state(), 107)\n self.assertEqual(sensor.unit_of_measurement(), \"%\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_pressure_count(self):\n self.assertEqual(self.Pcount, 7)", "def testSingleRead(self, point = 'pressure', expectedValue = 17.0 ):\n import time\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n reply = client.readPoints(point)\n #print \"Slave pressure: \", reply\n assert reply[0] == expectedValue", "def print_data():\n print \"quantity1.value %f\" % 10.0\n return 0", "def test_data_type(self):\n self.assertTrue(self.tester.data_type(), \"18S\")", "def testSingleRead(self, point = 'pressure', expectedValue = 17.0 ):\n import time\n time.sleep(2)\n client = ModbusTCP(self.config['vdevs']['slave']['icsifaces'][0],\n self.config['vdevs']['slave']['points']) \n\n reply = client.readPoints(point)\n #print \"Slave pressure: \", reply, \"Expected:\", expectedValue\n assert reply[0] == expectedValue", "def test_unit_of_measurement(self):\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n assert self.sensor_dict[name][\"units\"] == sensor.unit_of_measurement", "def test_value_ignored_entities(self):\n metric = self.metric()\n source = self.source(metric, value=\"10\")\n source[\"entities\"] = [\n {\"key\": \"entity1\", \"counted_tests\": 3},\n {\"key\": \"entity2\", \"counted_tests\": 5},\n {\"key\": \"entity3\", \"counted_tests\": 2},\n {\"key\": \"entity4\", \"counted_tests\": 10},\n ]\n source[\"entity_user_data\"] = {\n \"entity1\": {\"status\": \"fixed\"},\n \"entity2\": {\"status\": \"wont_fix\"},\n \"entity3\": {\"status\": \"false_positive\"},\n }\n measurement = self.measurement(metric, sources=[source])\n self.assertEqual(\"0\", measurement[\"count\"][\"value\"])", "def test_str_pressure(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"pressure\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xC5,\n 0xE6,\n 0xE6,\n 0x63,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -7388.79833984375)\n self.assertEqual(sensor.unit_of_measurement(), \"Pa\")\n self.assertEqual(sensor.ha_device_class(), \"pressure\")", "def test_parse_output(self):\n output_exp = [\"0.99\", \"0.01\"]\n output = parse_output(hgt_results_fp=self.consel_output_hgt_fp,\n method=\"consel\")\n self.assertEqual(output_exp, output)\n output_exp = \"1\"\n output = parse_output(hgt_results_fp=self.riatahgt_output_hgt_fp,\n method=\"riata-hgt\")\n self.assertEqual(output_exp, output)\n output_exp = (\"WP_011672248.1\\t372461\\tBuchnera aphidicola\\tProteobac\"\n \"teria;Gammaproteobacteria;Enterobacteriales;Enterobact\"\n \"eriaceae;Buchnera;Buchnera aphidicola\\t37.5\\t99.14\\nWP\"\n \"_045117937.1\\t580331\\tThermoanaerobacter italicus\\tFir\"\n \"micutes;Clostridia;Thermoanaerobacterales;Thermoanaero\"\n \"bacteraceae;Thermoanaerobacter;Thermoanaerobacter ital\"\n \"icus\\t42.6\\t93.84\")\n output = parse_output(hgt_results_fp=self.hgtector_output_hgt_fp,\n method=\"hgtector\")\n self.assertEqual(output_exp, output)\n output_exp = (\"G2311_SE001,\\tgi|557307555|ref|YP_008766893.1|\\t140749\"\n \"3\\tShigella phage SfIV\\tViruses;Caudovirales;Myovirida\"\n \"e\\t67.4\\t100\\t0.002\\nG1250_SE001,\\tgi|9630468|ref|NP_0\"\n \"46899.1|\\t40631\\tEnterobacteria phage N15\\tViruses;Cau\"\n \"dovirales;Siphoviridae;N15likevirus\\t79.4\\t100\\t0.002\\n\"\n \"G1252_SE001,\\tgi|428782382|ref|YP_007112139.1|\\t114714\"\n \"4\\tEnterobacteria phage HK225\\tViruses;Caudovirales;Si\"\n \"phoviridae;Lambdalikevirus\\t88.2\\t100\\t0.002\\nG1251_SE\"\n \"001,\\tgi|428782381|ref|YP_007112138.1|\\t1147144\\tEnter\"\n \"obacteria phage HK225\\tViruses;Caudovirales;Siphovirid\"\n \"ae;Lambdalikevirus\\t94.9\\t100\\t0.002\")\n output = parse_output(hgt_results_fp=self.darkhorse_output_hgt_fp,\n method=\"darkhorse\")\n self.assertEqual(output_exp, output)\n output_exp = \"AAA98667.1\"\n output = parse_output(hgt_results_fp=self.egid_output_hgt_fp,\n genbank_fp=self.genbank_input_fp,\n method=\"egid\")\n self.assertEqual(output_exp, output)\n output_exp = \"AAA98667.1\"\n output = parse_output(hgt_results_fp=self.genemark_output_hgt_fp,\n genbank_fp=self.genbank_input_fp,\n method=\"genemark\")\n self.assertEqual(output_exp, output)", "def test_converter_length(self):\n \n input_values = [3,0,3,'2000']\n\n output = []\n expected_result = \"Result: 2000 Meter(m) equals to 1.24 Mile(mi)\"\n\n def mock_input(s):\n output.append(s)\n return input_values.pop(0)\n\n mp2.input = mock_input\n mp2.print = lambda s:output.append(s)\n mp2.main()\n self.assertEqual(output[-1],expected_result)", "def test_str_pressure_2byte(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"pressure_2byte\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x7C,\n 0xF4,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 415498.24)\n self.assertEqual(sensor.unit_of_measurement(), \"Pa\")\n self.assertEqual(sensor.ha_device_class(), \"pressure\")", "def test_str_percent_v16(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"percentV16\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x8A,\n 0x2F,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -30161)\n self.assertEqual(sensor.unit_of_measurement(), \"%\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_overall_report_banner_ecpm():\n assert (overall_data['banner_report']['data'][6][0] == 'eCPM')\n for num in overall_data['banner_report']['data'][6][1:]:\n assert (num == 4000)", "def test_parse_pi_xml_02(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n pass\n self.assertTrue(True)", "def test_str_time_period_100msec(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"time_period_100msec\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x6A,\n 0x35,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 27189)\n self.assertEqual(sensor.unit_of_measurement(), \"ms\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_parse_pi_xml_08(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n pass\n self.assertTrue(True)", "def test_many_values(self):\n write this test!" ]
[ "0.65973496", "0.61154234", "0.60894555", "0.5885825", "0.5680461", "0.5545329", "0.5519853", "0.5479107", "0.54342437", "0.53747666", "0.5368387", "0.53356916", "0.5330443", "0.53220665", "0.532082", "0.52951694", "0.5269306", "0.5266885", "0.52651095", "0.52512425", "0.5247367", "0.52466273", "0.52375627", "0.5230064", "0.5214435", "0.5205577", "0.51985615", "0.51973945", "0.5182217", "0.516943" ]
0.66703874
0
Test parsing and streaming of DPTValue1Ucount 255.
def test_value_max(self): self.assertEqual(DPTValue1Ucount().to_knx(255), (0xFF,)) self.assertEqual(DPTValue1Ucount().from_knx((0xFF,)), 255)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_value_50(self):\n self.assertEqual(DPTValue1Ucount().to_knx(50), (0x32,))\n self.assertEqual(DPTValue1Ucount().from_knx((0x32,)), 50)", "def test_unit(self):\n self.assertEqual(DPTSignedRelativeValue.unit, \"\")\n self.assertEqual(DPTPercentV8.unit, \"%\")\n self.assertEqual(DPTValue1Count.unit, \"counter pulses\")", "def test_value_min(self):\n self.assertEqual(DPTValue1Ucount().to_knx(0), (0x00,))\n self.assertEqual(DPTValue1Ucount().from_knx((0x00,)), 0)", "def test_with_status_data(self):\r\n\r\n with open(os.path.join(RESOURCE_PATH, 'ND161646.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n particles = parser.get_records(250)\r\n log.debug('got back %d records', len(particles))\r\n\r\n self.assert_particles(particles, 'ND161646.yml', RESOURCE_PATH)", "def test_from_knx_wrong_value(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx((0x256,))", "def test_transcoder(self, raw, value):\n assert DPTSceneNumber.to_knx(value) == DPTArray(raw)\n assert DPTSceneNumber.from_knx(DPTArray(raw)) == value", "def prepare_data(val):\n return round((100 * val)/255)", "def test_uss_num_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_uss_num(input_val)\n self.assertEqual(output_val, self.line.uss_num)", "def test_str_pressure_2byte(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"pressure_2byte\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x7C,\n 0xF4,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 415498.24)\n self.assertEqual(sensor.unit_of_measurement(), \"Pa\")\n self.assertEqual(sensor.ha_device_class(), \"pressure\")", "def test_str_percent_u8(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"percentU8\"\n )\n sensor.sensor_value.payload = DPTArray((0x6B,))\n\n self.assertEqual(sensor.resolve_state(), 107)\n self.assertEqual(sensor.unit_of_measurement(), \"%\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_value_max_string(self):\n raw = [\n 0x41,\n 0x41,\n 0x41,\n 0x41,\n 0x41,\n 0x42,\n 0x42,\n 0x42,\n 0x42,\n 0x42,\n 0x43,\n 0x43,\n 0x43,\n 0x43,\n ]\n string = \"AAAAABBBBBCCCC\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def test_from_knx_wrong_parameter(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx((0x01, 0x02, 0x03))", "def test_str_counter_pulses(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"counter_pulses\"\n )\n sensor.sensor_value.payload = DPTArray((0x9D,))\n\n self.assertEqual(sensor.resolve_state(), -99)\n self.assertEqual(sensor.unit_of_measurement(), \"counter pulses\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_convert_same_unit():\n assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2\n assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3\n assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4\n assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5", "def test_data_type(self):\n self.assertTrue(self.tester.data_type(), \"18S\")", "def test_str_percent_v16(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"percentV16\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x8A,\n 0x2F,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -30161)\n self.assertEqual(sensor.unit_of_measurement(), \"%\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_from_knx_wrong_parameter2(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx(\"0x23\")", "def test_str_pressure(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"pressure\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xC5,\n 0xE6,\n 0xE6,\n 0x63,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -7388.79833984375)\n self.assertEqual(sensor.unit_of_measurement(), \"Pa\")\n self.assertEqual(sensor.ha_device_class(), \"pressure\")", "def test_domain_and_target_type(self):\n t = OneHotEncode(3)\n assert t.domain_type == \"integer\"\n assert t.target_type == \"real\"", "def test_dummy(self, data):\r\n source, expected = data\r\n result = self.converter.convert(source)\r\n self.assertUnicodeEquals(result, expected)", "def test_unsigned_integers(self):\n\n self._compare_avp(\n avp.Unsigned32AVP(299, 1234),\n memoryview(b'\\x00\\x00\\x01+\\x00\\x00\\x00\\x0c\\x00\\x00\\x04\\xd2'),\n )\n\n with self.assertRaises(CodecException):\n avp.Unsigned32AVP(299, -1234)", "def test_str_percent_v8(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"percentV8\"\n )\n sensor.sensor_value.payload = DPTArray((0x20,))\n\n self.assertEqual(sensor.resolve_state(), 32)\n self.assertEqual(sensor.unit_of_measurement(), \"%\")\n self.assertEqual(sensor.ha_device_class(), None)", "def test_decode_energy_sums(self):\n self.assertEqual(td.esums(decoded=True),\n decoder.decode_energy_sums(BytesIO(td.esums(True))))", "def test_value_empty_string(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n string = \"\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def test_pressure_count(self):\n self.assertEqual(self.Pcount, 7)", "def test_str_scaling(self):\n xknx = XKNX(loop=self.loop)\n sensor = Sensor(\n xknx,\n 'TestSensor',\n group_address_state='1/2/3',\n value_type=\"percent\")\n sensor.sensor_value.payload = DPTArray((0x40,))\n\n self.assertEqual(sensor.resolve_state(), 25)\n self.assertEqual(sensor.unit_of_measurement(), \"%\")\n self.assertEqual(sensor.ha_device_class(), None)", "def testSingleRead(self, point = 'pressure', expectedValue = 17.0 ):\n import time\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n reply = client.readPoints(point)\n #print \"Slave pressure: \", reply\n assert reply[0] == expectedValue", "def test_simple(self):\n self.stream_handle = open(os.path.join(RESOURCE_PATH, 'adcpt_20130929_091817.DAT'))\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_header_footer, 138) \n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_a, 509) \n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_b, 880)\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_c, 1251)\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_d, 1622)\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_e, 1993)\n\n # no data left\n result = self.parser.get_records(1)\n self.assertEqual(result, [])\n self.assert_(isinstance(self.publish_callback_value, list))\n self.assertEqual(self.publish_callback_value[0], self.particle_e)", "def test_datatype_uintarray_one_dimension(self):\n input = [0, 65535, 255]\n expect_dims = [len(input)]\n result = arcpy.QA_IDLTaskEngine_DataType_UIntArray_TEST(input, expect_dims)\n self.assertEqual(result[0], ';'.join(str(i) for i in input))", "def testSingleRead(self, point = 'pressure', expectedValue = 17.0 ):\n import time\n time.sleep(2)\n client = ModbusTCP(self.config['vdevs']['slave']['icsifaces'][0],\n self.config['vdevs']['slave']['points']) \n\n reply = client.readPoints(point)\n #print \"Slave pressure: \", reply, \"Expected:\", expectedValue\n assert reply[0] == expectedValue" ]
[ "0.67056155", "0.631558", "0.6267815", "0.5858477", "0.5822282", "0.5758016", "0.56248516", "0.561795", "0.5597454", "0.55848616", "0.5476851", "0.53409237", "0.530719", "0.5269258", "0.52683234", "0.5258315", "0.52361274", "0.5220925", "0.5200943", "0.5164981", "0.5155284", "0.51510763", "0.514222", "0.51269305", "0.51141745", "0.50798637", "0.50792474", "0.5078302", "0.50751704", "0.5069468" ]
0.6720798
0
Test parsing and streaming of DPTValue1Ucount 0.
def test_value_min(self): self.assertEqual(DPTValue1Ucount().to_knx(0), (0x00,)) self.assertEqual(DPTValue1Ucount().from_knx((0x00,)), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unit(self):\n self.assertEqual(DPTSignedRelativeValue.unit, \"\")\n self.assertEqual(DPTPercentV8.unit, \"%\")\n self.assertEqual(DPTValue1Count.unit, \"counter pulses\")", "def test_value_50(self):\n self.assertEqual(DPTValue1Ucount().to_knx(50), (0x32,))\n self.assertEqual(DPTValue1Ucount().from_knx((0x32,)), 50)", "def test_uss_num_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_uss_num(input_val)\n self.assertEqual(output_val, self.line.uss_num)", "def test_with_status_data(self):\r\n\r\n with open(os.path.join(RESOURCE_PATH, 'ND161646.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n particles = parser.get_records(250)\r\n log.debug('got back %d records', len(particles))\r\n\r\n self.assert_particles(particles, 'ND161646.yml', RESOURCE_PATH)", "def test_noQuantity(self):\n # result = self.parser.parse(\"d6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_from_knx_wrong_value(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx((0x256,))", "def test_percentage_is_zero(self):\n metric = self.metric(direction=\"<\")\n sources = [self.source(metric, value=\"0\", total=\"0\")]\n measurement = self.measurement(metric, sources=sources)\n self.assertEqual(\"0\", measurement[\"percentage\"][\"value\"])", "def test_empty_value(self):\n avp_val = avp.AVP(0)\n self.assertEqual(avp_val.value, None)\n self.assertEqual(avp_val.payload, None)\n\n # We can then set its value\n avp_val.value = b''\n self.assertEqual(avp_val.value, b'')\n self.assertEqual(avp_val.payload, b'')\n\n # And unset it again\n avp_val.value = None\n self.assertEqual(avp_val.value, None)\n self.assertEqual(avp_val.payload, None)", "def test_value_empty_string(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n string = \"\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def test_from_knx_wrong_parameter(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx((0x01, 0x02, 0x03))", "def test_simple(self):\n self.stream_handle = open(os.path.join(RESOURCE_PATH, 'adcpt_20130929_091817.DAT'))\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_header_footer, 138) \n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_a, 509) \n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_b, 880)\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_c, 1251)\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_d, 1622)\n result = self.parser.get_records(1)\n self.assert_result(result, self.particle_e, 1993)\n\n # no data left\n result = self.parser.get_records(1)\n self.assertEqual(result, [])\n self.assert_(isinstance(self.publish_callback_value, list))\n self.assertEqual(self.publish_callback_value[0], self.particle_e)", "def test_parse_version(self):\n version = VersionNumberScaleMeasurement.parse_version(None)\n self.assertEqual(Version(\"0\"), version)", "def test_empty_value(self, sc):\n assert sc.add('') == 0", "def test_no_source_measurements(self):\n measurement = self.measurement(self.metric())\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def test_value_max(self):\n self.assertEqual(DPTValue1Ucount().to_knx(255), (0xFF,))\n self.assertEqual(DPTValue1Ucount().from_knx((0xFF,)), 255)", "def test_from_knx_wrong_parameter2(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx(\"0x23\")", "def test_value_initial_value(self):\n self.assertEqual(self.progressbar.getValue(), 0)", "def testSingleRead(self, point = 'pressure', expectedValue = 17.0 ):\n import time\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n reply = client.readPoints(point)\n #print \"Slave pressure: \", reply\n assert reply[0] == expectedValue", "def testSingleRead(self, point = 'pressure', expectedValue = 17.0 ):\n import time\n time.sleep(2)\n client = ModbusTCP(self.config['vdevs']['slave']['icsifaces'][0],\n self.config['vdevs']['slave']['points']) \n\n reply = client.readPoints(point)\n #print \"Slave pressure: \", reply, \"Expected:\", expectedValue\n assert reply[0] == expectedValue", "def test_1020(self, gmn_client_v2):\n str_buf = io.StringIO()\n d1_gmn.app.sysmeta_extract.extract_values(out_stream=str_buf)\n self.sample.assert_equals(str_buf.getvalue(), \"all_stream\")", "def test_value_ignored_entities(self):\n metric = self.metric()\n source = self.source(metric, value=\"10\")\n source[\"entities\"] = [\n {\"key\": \"entity1\", \"counted_tests\": 3},\n {\"key\": \"entity2\", \"counted_tests\": 5},\n {\"key\": \"entity3\", \"counted_tests\": 2},\n {\"key\": \"entity4\", \"counted_tests\": 10},\n ]\n source[\"entity_user_data\"] = {\n \"entity1\": {\"status\": \"fixed\"},\n \"entity2\": {\"status\": \"wont_fix\"},\n \"entity3\": {\"status\": \"false_positive\"},\n }\n measurement = self.measurement(metric, sources=[source])\n self.assertEqual(\"0\", measurement[\"count\"][\"value\"])", "def test_unpack_3(self):\n val = ('item number', int)\n assert lws.parse_schema_val(val) == (int, '')", "def get_value(self):\r\n return 0", "def test_uss_num_no_value(self):\n self.line._parse_uss_num(\" \")\n self.assertEqual(None, self.line.uss_num)", "def test_missing_filter_value():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot X date BY YEAR Y report_number COUNT FILTER\n \"\"\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def test__parse_sku():\n for input_data, expected_output in (\n ({}, ''),\n ({'sku': None}, ''),\n ({'sku': ''}, ''),\n ({'sku': 'a'}, 'a'),\n ):\n output = parse_sku(input_data)\n vampytest.assert_eq(output, expected_output)", "def test_raw_data(self):\n self.assertEqual(self.tester.raw_data, 1)", "def test_bad_data(self):\n # Bad checksum\n # If checksum is bad, skip the record and continue parsing.\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_CHECKSUM)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n # Only the header and second record, particle_b should be returned.\n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))\n \n # Incorrect number of bytes\n # If numbytes is incorrect, skip the record and continue parsing.\n self.start_state = {StateKey.POSITION: 0}\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_NUM_BYTES)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback) \n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))", "def test_parse_output_empty(self):\n output_exp = 'NaN'\n output = parse_output(hgt_results_fp=self.empty_output_hgt_fp,\n method=\"riata-hgt\")\n self.assertEqual(output_exp, output)", "def test_str_counter_pulses(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"counter_pulses\"\n )\n sensor.sensor_value.payload = DPTArray((0x9D,))\n\n self.assertEqual(sensor.resolve_state(), -99)\n self.assertEqual(sensor.unit_of_measurement(), \"counter pulses\")\n self.assertEqual(sensor.ha_device_class(), None)" ]
[ "0.65742433", "0.6063547", "0.6014494", "0.5983357", "0.57647645", "0.5689636", "0.5639485", "0.5594808", "0.5583179", "0.55746424", "0.5439922", "0.5422113", "0.5407889", "0.5406917", "0.53917974", "0.5373599", "0.53290766", "0.53238535", "0.5318", "0.52924883", "0.5290578", "0.52895886", "0.5277972", "0.5266021", "0.52655625", "0.52321714", "0.52264684", "0.5222354", "0.5214857", "0.5207792" ]
0.6662104
0
Test parsing of DPTValue1Ucount with wrong value (3 byte array).
def test_from_knx_wrong_parameter(self): with self.assertRaises(ConversionError): DPTValue1Ucount().from_knx((0x01, 0x02, 0x03))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_from_knx_wrong_value(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx((0x256,))", "def test_from_knx_wrong_parameter2(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx(\"0x23\")", "def test_value_min(self):\n self.assertEqual(DPTValue1Ucount().to_knx(0), (0x00,))\n self.assertEqual(DPTValue1Ucount().from_knx((0x00,)), 0)", "def test_unit(self):\n self.assertEqual(DPTSignedRelativeValue.unit, \"\")\n self.assertEqual(DPTPercentV8.unit, \"%\")\n self.assertEqual(DPTValue1Count.unit, \"counter pulses\")", "def test_uss_num_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_uss_num(input_val)\n self.assertEqual(output_val, self.line.uss_num)", "def test_value_50(self):\n self.assertEqual(DPTValue1Ucount().to_knx(50), (0x32,))\n self.assertEqual(DPTValue1Ucount().from_knx((0x32,)), 50)", "def test_value_max_string(self):\n raw = [\n 0x41,\n 0x41,\n 0x41,\n 0x41,\n 0x41,\n 0x42,\n 0x42,\n 0x42,\n 0x42,\n 0x42,\n 0x43,\n 0x43,\n 0x43,\n 0x43,\n ]\n string = \"AAAAABBBBBCCCC\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def test_value_max(self):\n self.assertEqual(DPTValue1Ucount().to_knx(255), (0xFF,))\n self.assertEqual(DPTValue1Ucount().from_knx((0xFF,)), 255)", "def test_bad_data(self):\n # Bad checksum\n # If checksum is bad, skip the record and continue parsing.\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_CHECKSUM)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n # Only the header and second record, particle_b should be returned.\n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))\n \n # Incorrect number of bytes\n # If numbytes is incorrect, skip the record and continue parsing.\n self.start_state = {StateKey.POSITION: 0}\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_NUM_BYTES)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback) \n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))", "def test_unpack_3(self):\n val = ('item number', int)\n assert lws.parse_schema_val(val) == (int, '')", "def test_example_day9_pt1():\n assert find_first_invalid_value(ex_data, 5) == 127", "def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))", "def testUnexpectedTagBelowOne(self):\n # Message has tag 0, type NUMERIC.\n invalid_tag_message = chr(protobuf._Encoder.NUMERIC)\n\n self.assertErrorIs(messages.DecodeError,\n 'Invalid tag value 0',\n protobuf.decode_message,\n test_util.OptionalMessage,\n invalid_tag_message)", "def test_generic_failed_code_value(self):\n value = 0\n\n for elem in self.test_generic_failed_code:\n self.assertEqual(value, elem)", "def test_value_empty_string(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n string = \"\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def test_is_old_papernum(self):\n self.assertFalse(util.is_old_papernum(\"9106001\"))\n self.assertTrue(util.is_old_papernum(\"9107001\"))\n self.assertFalse(util.is_old_papernum(\"9200001\"))\n self.assertTrue(util.is_old_papernum(\"9201001\"))\n self.assertTrue(util.is_old_papernum(\"0703999\"))\n self.assertFalse(util.is_old_papernum(\"0704001\"))", "def test_unknown(self):\n result = self.flag.parseString('U')\n self.assertEqual('U', result[0])", "def test_noQuantity(self):\n # result = self.parser.parse(\"d6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_negativeQuantity(self):\n result = self.parser.parse(\"-1d6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_corruptedbit(self):\n self.assertRaises(ValueError, two_out_five, '1100000111') #Too many 1s must raise a ValueError!\n self.assertRaises(ValueError, two_out_five, '1100000100') #Too many 0s must raise a ValueError!", "def test_from_knx_wrong_parameter_too_large(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n with self.assertRaises(ConversionError):\n DPTString().from_knx(raw)", "def test_from_knx_wrong_parameter_too_small(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n with self.assertRaises(ConversionError):\n DPTString().from_knx(raw)", "def test_bad_data(self):\r\n # LB180210_3_corrupted.PD0 has three records in it, the 2nd record was corrupted\r\n with open(os.path.join(RESOURCE_PATH, 'LB180210_3_corrupted.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n # try to get 3 particles, should only get 2 back\r\n # the second one should correspond to ensemble 3\r\n parser.get_records(3)\r\n\r\n log.debug('Exceptions : %s', self.exception_callback_value[0])\r\n\r\n self.assertEqual(len(self.exception_callback_value), 1)\r\n self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))", "def test_bad_value_type(self):\n\n print 'Patience, this may take 20 seconds'\n request = service.get_request('POST', {u'species': u'Nosuchtaxonia mistakea'})\n x = self.start_request_tests(request)\n self.assertTrue(x.status_code % 100 == 4, x.status_code)\n json.dump(x.to_dict(), sys.stdout, indent=2)\n # TBD: Change this to a *correct* check for message informativeness.\n m = x.json().get(u'message')\n self.assertTrue(u'species' in m, #informative?\n 'no \"species\" in \"%s\"' % m)", "def test_mult_specifiers_missing(self):\n template = '{0} too few {1}'\n value_count = 3\n msg = ('The formatter contains too few \"{}\" '\n 'specifiers for the number of source fields.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])", "def test_nonIntegerUIDVALIDITY(self):\n d = self._examineOrSelect()\n self._response(b'* OK [UIDVALIDITY foo] UIDs valid')\n self.failureResultOf(d, imap4.IllegalServerResponse)", "def test_deserialize_number():\n bytestring = bytes([\n 0b_1010_0001,\n 0b_1100_1111,\n 0b_1000_0010,\n 0b_0100_0001\n ])\n assert 136357793 == UnsignedInt.read(bytestring)", "def test_value_error(self):\n self._error_test(ValueError)", "def test_unsigned_integers(self):\n\n self._compare_avp(\n avp.Unsigned32AVP(299, 1234),\n memoryview(b'\\x00\\x00\\x01+\\x00\\x00\\x00\\x0c\\x00\\x00\\x04\\xd2'),\n )\n\n with self.assertRaises(CodecException):\n avp.Unsigned32AVP(299, -1234)" ]
[ "0.6574174", "0.64003175", "0.63059187", "0.6276057", "0.6197245", "0.6195625", "0.61819005", "0.6062769", "0.600046", "0.5978124", "0.5972343", "0.5937288", "0.57631", "0.5702879", "0.566235", "0.5638889", "0.56113666", "0.5604449", "0.56010675", "0.55782485", "0.557578", "0.5571303", "0.55697894", "0.5531455", "0.5516101", "0.54972446", "0.54956365", "0.54716367", "0.5467117", "0.54571486" ]
0.6430515
1
Returns the eopatch with the new grouping of the LPIS data. A column "GROUP_1_ID", is also added, with the ID associated to the groups. col_cropN_lpis is the name of the column of the crop type in the lpis dataframe. col_cropN_lpistogroup is the name of the column of the crop type in the csv file specified by self.lpis_to_group_file.
def execute(self, eopatch, col_cropN_lpis, col_cropN_lpistogroup): # Group LPIS classes lpis = eopatch.vector_timeless["LPIS_{}".format(self.year)] mapping = pd.read_csv(self.lpis_to_group_file, sep=";") result = pd.merge(lpis, mapping, how="left", left_on=[col_cropN_lpis], right_on=[col_cropN_lpistogroup]) # Assign GroupID to GroupName group_id = pd.read_csv(self.crop_group_file, sep=";") resultend = pd.merge(result, group_id, how="left", on="GROUP_1") eopatch.vector_timeless["LPIS_{}".format(self.year)] = resultend # Fill GroupID NaN values with zeros group = eopatch.vector_timeless["LPIS_{}".format(self.year)]["GROUP_1_ID"] eopatch.vector_timeless["LPIS_{}".format(self.year)]["GROUP_1_ID"] = group.fillna(0) return eopatch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def putHaplotypeGroupIntoDB(self, session, input_fname, tg_ecotypeid2row, max_snp_typing_error_rate, snp_id_ls):\n\t\tsys.stderr.write(\"Constructing haplotype groups ...\\n\")\n\t\tpattern_ecotypeid = re.compile(r'(?<=\\))\\d+')\n\t\treader = csv.reader(open(input_fname), delimiter=figureOutDelimiter(input_fname))\n\t\tcol_name2col_index = getColName2IndexFromHeader(reader.next())\n\t\tecotypeid_idx = col_name2col_index['ecotypeid']\n\t\thaplo_name_idx = col_name2col_index['haplogroup']\n\t\tgeographic_integrity_idx = col_name2col_index['geographic_integrity']\n\t\tfiltered_SNPs_idx = col_name2col_index['filtered_SNPs']\n\t\tcounter = 0\n\t\tfor tg_ecotypeid, row in tg_ecotypeid2row.iteritems():\n\t\t\tecotypeid = int(row[ecotypeid_idx])\n\t\t\tecotypeid = tg_ecotypeid\t#2009-4-4 use tg_ecotypeid instead\n\t\t\thaplo_name = row[haplo_name_idx]\n\t\t\tgeographic_integrity_name = row[geographic_integrity_idx]\n\t\t\tfiltered_SNPs = row[filtered_SNPs_idx]\n\t\t\tref_ecotypeid = int(pattern_ecotypeid.search(haplo_name).group(0))\n\t\t\thaplo_group = StockDB.HaploGroup.query.filter_by(short_name=haplo_name).first()\n\t\t\tif not haplo_group:\n\t\t\t\thaplo_group = StockDB.HaploGroup(short_name=haplo_name, ref_ecotypeid=ref_ecotypeid, max_snp_typing_error_rate=max_snp_typing_error_rate)\n\t\t\t\tsession.save(haplo_group)\n\t\t\t\tsession.flush()\n\t\t\t\n\t\t\tecotype = StockDB.Ecotype.get(ecotypeid)\n\t\t\thaplo_group.ecotypes.append(ecotype)\n\t\t\tgeographic_integrity = StockDB.GeographicIntegrity.query.filter_by(short_name=geographic_integrity_name).first()\n\t\t\tif not geographic_integrity:\n\t\t\t\tgeographic_integrity = StockDB.GeographicIntegrity(short_name=geographic_integrity_name)\n\t\t\t\tsession.save(geographic_integrity)\n\t\t\t\tsession.flush()\n\t\t\tecotype.geographic_integrity = geographic_integrity\n\t\t\tsession.save_or_update(ecotype)\n\t\t\t#one bit of ecotype: link the ecotypeid to tg_ecotype_id\n\t\t\t\n\t\t\t\n\t\t\t#deal with filtered SNPs\n\t\t\tfor i in range(len(filtered_SNPs)):\n\t\t\t\tallele = filtered_SNPs[i]\n\t\t\t\tif allele=='_':\n\t\t\t\t\tcontinue\n\t\t\t\tfc = StockDB.FilteredCalls(ecotypeid=ecotypeid, snpid=snp_id_ls[i], allele=allele)\n\t\t\t\tsession.save(fc)\n\t\t\t\tsession.flush()\n\t\t\tcounter += 1\n\t\t\tif counter%500==0 and self.report:\n\t\t\t\tsys.stderr.write('%s%s'%('\\x08'*80, counter))\n\t\tsession.flush()\n\t\tsys.stderr.write(\"Done.\\n\")", "def makeGroupsFromCutFile(self):\n if self.cutfile == None:\n print \"Cannot make groups without a cuts file\"\n return ([],[])\n else:\n groups = []\n labels = []\n yields = []\n all_cols = self.qie.columns.values\n # For each predefined group\n for grouplist in cut_groups:\n labels.append(grouplist[0])\n g = None\n # For each cut in that group\n for cut in grouplist[1]:\n # Get min and max values for main cuts (TODO: handle marginal cuts)\n cut_min = self.cuts[cut][0]\n cut_max = self.cuts[cut][1]\n # For each df column corresponding to that cut (sometimes more than one measurement)\n for col in all_cols:\n if col.split(\"_\")[0] == cut:\n g_tmp = (self.qie[col] < cut_min) | (self.qie[col] > cut_max)\n if 'NoneType' in str(type(g)) :\n g = g_tmp\n else: \n g = g | g_tmp\n # Make exclusive groups\n if len(groups) > 0:\n g = g & (self.NotGroup(groups))\n groups.append(g)\n yields.append(g.sum())\n # Make final group containing all other chips\n groups.append(self.NotGroup(groups))\n labels.append(\"Good\")\n yields.append(groups[-1].sum())\n self.makeYieldsTable(yields, labels)\n # Add column to data frame containing \"Good\" (1), \"bad\" (0), \"marginal\" (2,..) info\n self.qie[\"Sorting\"] = np.where(groups[-1], 1, 0)\n print sum(self.qie[\"Sorting\"])\n #print self.qie\n self.makeSortingFile()\n return (groups, labels)", "def get_new_config_group(self):\n filename = \"%(config_dir)s/%(group)s.%(time)s\" % \\\n { \"config_dir\": self.config_dir(),\n \"group\": self.group_name(),\n \"time\": common.time_suffix(),}\n common.write_file(\"w\", 0o644, filename, self.get_match_criteria())", "def load_group_from_config(self):\n\n group_file_name = \"cicada/config/group.yaml\"\n if os.path.isfile(group_file_name):\n self.group_data = dict()\n with open(group_file_name, 'r') as stream:\n self.group_data = yaml.safe_load(stream)\n self.all_groups = deepcopy(self.group_data)\n if self.group_data:\n keys_to_del = []\n for key, value in self.group_data.items():\n missing_file = False\n for file in value:\n if file not in self.nwb_path_list.values():\n missing_file = True\n if missing_file:\n keys_to_del.append(key)\n for key in keys_to_del:\n self.group_data.pop(key)\n self.grouped_labels = []\n if self.group_data:\n self.grouped = True\n for value in self.group_data.values():\n nwb_file_list = []\n for file in value:\n io = NWBHDF5IO(file, 'r')\n nwb_file = io.read()\n self.data_dict[nwb_file.identifier] = nwb_file\n nwb_file_list.append(nwb_file.identifier)\n self.grouped_labels.append(nwb_file_list)\n self.showGroupMenu.setEnabled(True)\n self.addGroupDataMenu.setEnabled(True)\n self.populate_menu()\n else:\n self.showGroupMenu.setEnabled(False)\n self.addGroupDataMenu.setEnabled(False)\n self.showGroupMenu.clear()\n self.addGroupDataMenu.clear()", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def load_ids_to_groups(self):\n self.groups = set([])\n self.h_group_ids = defaultdict(lambda: set([]))\n self.h_id_to_group = defaultdict(lambda: set([]))\n for i,g in zip(self.df.sample_id, self.df.group_name):\n self.h_group_ids[g].add(i)\n self.h_id_to_group[i] = g\n self.groups.add(g)", "def generate_group_ids_items(self):\n\n groups_ids = [[\"all_id\"], [\"state_id\"], [\"store_id\"], [\"cat_id\"], [\"dept_id\"], [\"item_id\"], [\"state_id\", \"cat_id\"], [\"state_id\", \"dept_id\"], [\"store_id\", \"cat_id\"], [\"store_id\", \"dept_id\"], [\"item_id\", \"state_id\"], [\"item_id\", \"store_id\"]]\n group_ids_items_df = pd.DataFrame({\"group_id\": self.roll_index.get_level_values(\"level\"), \"time_series_ids\": self.roll_index.get_level_values(\"id\")})\n group_ids_items_df[\"group_id\"] = group_ids_items_df[\"group_id\"].apply(lambda x: groups_ids[x])\n \n return group_ids_items_df", "def get_grp(self):\n\n grp = -1\n\n if self.depth > 2:\n\n inp = ri.RhinoInput(self.path[2])\n\n grp = inp.get_no()\n\n return grp", "def get_contribution_dataframe_groups(self):\n pargrp_dict = {}\n par = self.pst.parameter_data\n groups = par.groupby(\"pargp\").groups\n for grp,idxs in groups.items():\n pargrp_dict[grp] = list(par.loc[idxs,\"parnme\"])\n return self.get_contribution_dataframe(pargrp_dict)", "def _merge_groups(self):\n fof_rdd = self.fof_rdd\n nPartitions = self.nPartitions\n \n def remap_local_groups(iterator): \n gmap = iterator.next() \n for p_arr in iterator:\n remap_gid_partition_cython(p_arr, gmap)\n yield p_arr\n\n mapping = self._get_level_map()\n\n group_merge_map = (mapping.flatMap(lambda (g,g_p):\n [(gid, (g,g_p)) for gid in [decode_partition(g), decode_partition(g_p)]])\n .partitionBy(nPartitions)\n .map(lambda (k,v): v, preservesPartitioning=True)\n .mapPartitions(create_map_dict, True)).cache() \n\n merged_rdd = (group_merge_map + fof_rdd).mapPartitions(remap_local_groups, preservesPartitioning=True)\n merged_rdd.setName('merged_rdd')\n\n self.group_merge_map = group_merge_map\n\n return merged_rdd", "def batchAnalysis(groupfil):\n groups = []\n with open(groupfil, 'r') as fIn:\n for line in fIn:\n groups.append(line.strip().split(','))\n \n checks = ['maxV', 'maxDerivV', 'maxDerivdV', 'minDerivV',\n 'minDerivdV', 'preMinV', 'postMinV', 'preMaxCurveV',\n 'preMaxCurveK', 'postMaxCurveV', 'postMaxCurveK',\n 'height', 'repolarizationV', 'intervals', 'frequencies']\n props = {ch: {gr: {} for gr in list(set([g[1] for g in groups]))}\n for ch in checks} # A dict of dicts\n # props [properties] [group name] [cell name]\n cells = [f[0].split('/')[-1].split('_')[0] for f in groups]\n \n # Add a few more keys\n props['activity'] = {gr: {} for gr in list(set([g[1] for g in groups]))}\n \n # Assign all the properties to the props dict\n for g in groups:\n df = pd.read_csv(g[0])\n df = df.drop('Unnamed: 33', 1) # Garbage\n df = df.drop('freq', 1) # These are downsampled\n df = df.dropna() # Dropna\n \n # If there are multiple clusters, add them in order\n if max(df.clust_inds) == 1: # Two clusters\n numClusts = int(max(df.clust_inds)+1)\n for ch in checks:\n for clust in range(numClusts):\n try:\n props[ch][g[1]][cells[groups.index(g)]].append(df[df['clust_inds']==clust][ch].dropna().values)\n except:\n props[ch][g[1]][cells[groups.index(g)]] = [df[df['clust_inds']==clust][ch].dropna().values]\n else: # Just one cluster\n for ch in checks:\n props[ch][g[1]][cells[groups.index(g)]] = [df[ch].dropna().values]\n # Get activity profile\n tIn, cBouts = timeInClusters(df)\n props['activity'][g[1]][cells[groups.index(g)]] = [tIn, cBouts]\n \n return props", "def constella(cur_plms, pc_starscape, group_iter, outfile_prefix):\n # Copy dataframe to avoid modifying the input dataframe\n cur_plms_copy = cur_plms.copy(deep=True)\n\n sanity_check_pos = 2 # Needs to point at days in image identifier!\n\n singleton_no = pc_starscape.shape[0]\n\n if params.debug is not None:\n print(f'{singleton_no} plms to group')\n\n plm_links = linkage(pc_starscape.loc[:, pc_starscape.columns[2:len(pc_starscape.columns)]].values, 'ward')\n\n # For n-1 to 2 leaves on the current hierarchical cluster dendrogram...\n for c in np.arange(singleton_no - 1, 2, -1):\n # Extract current number of clusters for the agglomeration step\n cutree = cut_tree(plm_links, n_clusters=c)\n # Generate a list of all current clusters identified\n group_list = np.unique(cutree)\n\n # For the current cluster being queried...\n for g in group_list:\n # Create list of current clusters row indices in pandas dataframe\n cur_index = [i for i, x in enumerate(cutree == g) if x]\n # Create list of current clusters present group identity assignments\n cur_index_id = np.array(cur_plms_copy.iloc[cur_index, 0])\n # Are any of the plms in the current cluster unnamed, how many?\n empty_count = np.count_nonzero(cur_index_id == None)\n empty_index = [i for (i, v) in zip(cur_index, cur_plms_copy.iloc[cur_index, 0].values == None) if v]\n # Are any of the plms in the current cluster already assigned an identity, what are those identities?\n unique_ids = np.unique(cur_index_id[np.array(cur_index_id) != None])\n\n # If cluster is two unnamed plms exactly, assign this group their own identity as a pair\n if empty_count == 2:\n pair_names = cur_plms_copy.iloc[empty_index, 1].values\n # Sanity check! Pairs must be on different days\n if pair_names[0].split('_')[sanity_check_pos] != pair_names[1].split('_')[sanity_check_pos]:\n cur_plms_copy.iloc[empty_index, 0] = group_iter\n group_iter = group_iter + 1\n else:\n cur_plms_copy.iloc[empty_index[0], 0] = group_iter\n cur_plms_copy.iloc[empty_index[1], 0] = group_iter + 1\n group_iter = group_iter + 2\n\n # For the identities that already exist...\n for uid in unique_ids:\n # If only one plm assigned a name in current cluster and a second unnamed plm exists\n # transfer ID over to create a pair\n if np.count_nonzero(np.array(cur_index_id) == uid) < 2 and empty_count == 1:\n # Store boolean positions for plms with IDs matching current id out of current cluster\n match_ids = [i for i, x in enumerate(cur_plms_copy.iloc[cur_index, 0].values == uid) if x]\n # Store boolean positions for plms which are unnamed out of current cluster\n null_ids = [i for i, x in enumerate(cur_plms_copy.iloc[cur_index, 0].values == None) if x]\n # If exactly 1 matching ID and 1 null ID (i.e. 2 plms total)\n # continue to pass ID name to the unnamed plm\n if len(match_ids) + len(null_ids) == 2:\n # Sanity check! Pairs must be on different days\n pair_names = cur_plms_copy.iloc[[cur_index[i] for i in match_ids + null_ids], 1].values\n if pair_names[0].split('_')[sanity_check_pos] != pair_names[1].split('_')[sanity_check_pos]:\n # Transfer identities to the unnamed plm\n cur_plms_copy.iloc[[cur_index[i] for i in null_ids], 0] = uid\n\n # Now that all groups that can be linked are formed, name rogues...\n rogues = [i for i, x in enumerate(cur_plms_copy.loc[:, 'group'].values == None) if x]\n for rogue in rogues:\n cur_plms_copy.iloc[[rogue], 0] = group_iter\n group_iter = group_iter + 1\n\n grpnames = cur_plms_copy.loc[:, ['group']].values\n plmnames = cur_plms_copy.loc[:, ['plmname']].values\n\n labelnames = []\n\n for li in range(0, len(plmnames)):\n labelnames.append(''.join(plmnames[li] + ' (' + str(int(grpnames[li])) + ')'))\n\n if params.debug is not None:\n plt.figure()\n plt.title('')\n plt.xlabel('')\n plt.ylabel('')\n dendrogram(plm_links, color_threshold=100, orientation=\"left\", leaf_font_size=10, labels=np.array(labelnames))\n plt.tight_layout()\n\n if params.debug == \"print\":\n plt.savefig(outfile_prefix + '_plmHCA.png')\n plt.close()\n elif params.debug == \"plot\":\n plt.show()\n\n return cur_plms_copy, group_iter", "def _group(codes, group_file):\n \n groups, size = {}, len(codes)\n group_temp = 'oma_temporary_groups.tsv'\n if os.path.isfile(group_temp):\n info('Loading pre-existed temporary OMA ortholog groups (oma_temporary_'\n 'groups.tsv) ...')\n for blocks in _lines(group_temp):\n groups[blocks[0]] = blocks[1:]\n else:\n info('Parsing OMA ortholog groups (oma-groups.txt.gz) ...')\n for blocks in _lines(group_file):\n number, finger, entries = blocks[0], blocks[1], blocks[2:]\n ids = [entry for entry in entries if entry[:5] in codes]\n if size == len(set(i[:5] for i in ids)):\n groups[finger] = ids\n if groups:\n with open(group_temp, 'w') as o:\n o.writelines('{}\\t{}\\n'.format(k, '\\t'.join(v))\n for k, v in groups.items())\n info('Yield {} one-to-one ortholog groups for {} query items.'.format(\n len(groups), size))\n return groups", "def make_grp(self):\n try:\n self.base['grp']\n except:\n self.base['grp'] = np.zeros(len(self.base),dtype='i')\n\n for halo in self._halos.values():\n halo[name][:] = halo._halo_id\n\n if config['verbose']: print \"writing %s\"%(self._base().filename+'.grp')\n self._base().write_array('grp',overwrite=True,binary=False)", "def updateImageGroups(self):\n self.img_grps = self.splitImages()\n grps = self.img_grps\n self.detail.clear()\n detail = \"Available Groups : \\n\"\n if len(grps) >= 1:\n for i in range(len(grps)):\n detail += \"Group \"+ str(i+1)+ \" : \" + str(grps[i][0]) + \" ... \" + str(grps[i][-1]) + '\\n'\n\n self.detail.insertPlainText(detail)\n self.detail.moveCursor(QTextCursor.End)", "def _get_gid_map(self, level=0):\n fof_rdd = self.fof_rdd\n sc = self.sc\n\n nPartitions = sc.defaultParallelism*5\n\n groups_map = (fof_rdd.flatMap(lambda p: p[np.where(p['is_ghost'])[0]])\n .map(pid_gid)\n .groupByKey(nPartitions)\n .values()\n .filter(lambda x: len(x)>1)\n .map(lambda x: sorted(x))\n .flatMap(lambda gs: [(g, gs[0]) for g in gs[1:]]))\n\n return groups_map", "def create_group_incidence_matrix(self):\n\n gv_data = []\n # decompose the compounds in the training_data and add to G\n for compound_id in self.cids:\n smiles = ccache.get_compound(compound_id).smiles\n try:\n gv_data.append(\n list(self.decomposer.smiles_to_groupvec(smiles).flat))\n except inchi2gv.GroupDecompositionError:\n gv_data.append([0] * len(self.group_names))\n\n G = pd.DataFrame(index=self.cids,\n columns=self.group_names,\n dtype=float,\n data=gv_data)\n\n for compound_id in G.index[(G == 0).all(1)]:\n # add a column for this compound, representing itself\n # as a new group\n G[compound_id] = 0.0\n\n # place a single '1' for this compound group decomposition\n G.at[compound_id, compound_id] = 1.0\n\n return G.values", "def add_group_data(self, group_name):\n self.sorted = False\n self.grouped = False\n self.labels_to_add = []\n for path in self.all_groups.get(group_name):\n io = NWBHDF5IO(path, 'r')\n nwb_file = io.read()\n # self.labels.append(nwb_file.identifier)\n self.nwb_path_list.update({nwb_file.identifier: path})\n self.labels_to_add.append(nwb_file.identifier)\n self.musketeers_widget.session_widget.populate(self.labels_to_add, 'add')\n self.musketeers_widget.session_widget.update_text_filter()\n self.groupMenu.setEnabled(True)\n self.sortMenu.setEnabled(True)", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def write_groups(sql, langs):\n groups = []\n nametemp = []\n langorder = []\n sql.write(\"INSERT INTO food_group(food_group_id) VALUES\\n\")\n with open(\"data/groups.csv\", 'r', encoding='utf8') as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\", quoting=csv.QUOTE_MINIMAL)\n i = 0\n for row in reader:\n if i == 0:\n i += 1\n langorder = getorder(row, langs)\n continue\n if row == '' or ''.join(row) == '':\n continue\n nametemp.append([row[x] for x in langorder])\n groups.append([i-1, [x for x in row[0].split(';')]])\n if i > 1:\n sql.write(\",\\n\")\n sql.write(commajoin([i-1], [], 4))\n i += 1\n sql.write(\";\\n\\n\")\n\n sql.write(\"INSERT INTO food_group_name(food_group_id, language_id, name) VALUES\\n\")\n writenames(sql, nametemp, 4)\n sql.write(\";\\n\\n\")\n\n sql.write(\"INSERT INTO food_group_groups(food_group_id, food_group_id2) VALUES\\n\")\n i = 0\n for group in groups:\n for inner_group in group[1]:\n index = 0\n if inner_group == [] or inner_group == '':\n continue\n for names in nametemp:\n if inner_group in names:\n if i > 0:\n sql.write(\",\\n\")\n sql.write(commajoin([group[0], index], [], 4))\n i += 1\n break\n index += 1\n if index == len(nametemp):\n print(\"Could not find food group(groups.csv): \" + inner_group)\n sql.write(\";\\n\\n\")\n\n # save group names, they are still needed\n # inner group names are not so saving over them\n for i in range(len(groups)):\n groups[i][1] = nametemp[i]\n return groups", "def render_plds_by_group(pds_by_group_name, output_plot_path, column_properties,\n global_x_label, global_y_label,\n # General figure configuration\n combine_groups=False, color_by_group_name=None, group_name_order=None,\n fig_width=None, fig_height=None,\n global_y_label_pos=None, legend_column_count=None,\n force_monochrome_group=True,\n # Axis configuration\n show_grid=None,\n semilog_y=None, semilog_y_base=10, semilog_y_min_bound=1e-10,\n group_row_margin=None,\n # Axis limits\n x_min=None, x_max=None,\n horizontal_margin=None, vertical_margin=None,\n y_min=None, y_max=None,\n # Optional axis labeling\n y_labels_by_group_name=None,\n x_tick_list=None, x_tick_label_list=None, x_tick_label_angle=0,\n y_tick_list=None, y_tick_label_list=None,\n plot_title=None,\n show_legend=True):\n with enb.logger.verbose_context(f\"Rendering {len(pds_by_group_name)} plottable data groups to {output_plot_path}\",\n sep=\"...\\n\", msg_after=f\"Done rendering into {output_plot_path}\"):\n if len(pds_by_group_name) < 1:\n if options.verbose > 1:\n print(\"[W]arning: trying to render an empty pds_by_group_name dict. \"\n f\"output_plot_path={output_plot_path}, column_properties={column_properties}. \"\n f\"No analysis is performed.\")\n return\n\n legend_column_count = options.legend_column_count if legend_column_count is None else legend_column_count\n if legend_column_count:\n for name, pds in pds_by_group_name.items():\n for pld in pds:\n pld.legend_column_count = legend_column_count\n\n y_min = column_properties.hist_min if y_min is None else y_min\n y_min = max(semilog_y_min_bound, y_min if y_min is not None else 0) \\\n if ((column_properties is not None and column_properties.semilog_y) or semilog_y) else y_min\n y_max = column_properties.hist_max if y_max is None else y_max\n\n if group_name_order is None:\n def normalize_group_label(group_name):\n if isinstance(group_name, str):\n return group_name.strip().lower()\n else:\n return group_name\n\n sorted_group_names = sorted(pds_by_group_name.keys(), key=normalize_group_label)\n if str(sorted_group_names[0]).lower() == \"all\":\n sorted_group_names = sorted_group_names[1:] + [str(n) for n in sorted_group_names[:1]]\n else:\n sorted_group_names = []\n for group_name in group_name_order:\n if group_name not in pds_by_group_name:\n if options.verbose > 2:\n print(f\"[W]arning: {group_name} was provided in group_name_order but is not one of the \"\n f\"produce groups: {sorted(list(pds_by_group_name.keys()))}. Ignoring.\")\n else:\n sorted_group_names.append(group_name)\n for g in pds_by_group_name.keys():\n if g not in sorted_group_names:\n if options.verbose > 2:\n print(f\"[W]arning: {g} was not provided in group_name_order but is one of the \"\n f\"produce groups: {sorted(list(pds_by_group_name.keys()))}. Appending automatically.\")\n sorted_group_names.append(g)\n\n if combine_groups:\n for i, g in enumerate(sorted_group_names):\n if show_legend:\n if (i == 0 and g.lower() != \"all\") or len(sorted_group_names) > 1:\n pds_by_group_name[g][0].label = g\n for pld in pds_by_group_name[g]:\n pld.marker = marker_cycle[i]\n\n y_labels_by_group_name = {g: g for g in sorted_group_names} \\\n if y_labels_by_group_name is None else y_labels_by_group_name\n if color_by_group_name is None:\n color_by_group_name = {}\n for i, group_name in enumerate(sorted_group_names):\n color_by_group_name[group_name] = color_cycle[i % len(color_cycle)]\n if os.path.dirname(output_plot_path):\n os.makedirs(os.path.dirname(output_plot_path), exist_ok=True)\n\n fig_width = options.fig_width if fig_width is None else fig_width\n fig_height = options.fig_height if fig_height is None else fig_height\n global_y_label_pos = options.global_y_label_pos if global_y_label_pos is None else global_y_label_pos\n\n fig, group_axis_list = plt.subplots(\n nrows=max(len(sorted_group_names), 1) if not combine_groups else 1,\n ncols=1, sharex=True, sharey=combine_groups,\n figsize=(fig_width, max(3, 0.5 * len(sorted_group_names) if fig_height is None else fig_height)))\n\n if combine_groups:\n group_axis_list = [group_axis_list]\n elif len(sorted_group_names) == 1:\n group_axis_list = [group_axis_list]\n\n if plot_title:\n plt.title(plot_title)\n\n semilog_x, semilog_y = False, semilog_y if semilog_y is not None else semilog_y\n\n if combine_groups:\n assert len(group_axis_list) == 1\n # group_name_axes = zip(sorted_group_names, group_axis_list * len(sorted_group_names))\n group_name_axes = zip(sorted_group_names, group_axis_list * len(sorted_group_names))\n else:\n group_name_axes = zip(sorted_group_names, group_axis_list)\n\n global_x_min = float(\"inf\")\n global_x_max = float(\"-inf\")\n global_y_min = float(\"inf\")\n global_y_max = float(\"-inf\")\n for pld in (plottable for pds in pds_by_group_name.values() for plottable in pds):\n x_values = np.array(pld.x_values, copy=False)\n if len(x_values) > 0:\n x_values = x_values[~np.isnan(x_values)]\n global_x_min = min(global_x_min, x_values.min() if len(x_values) > 0 else global_x_min)\n global_x_max = max(global_x_min, x_values.max() if len(x_values) > 0 else global_x_min)\n y_values = np.array(pld.y_values, copy=False)\n if len(y_values) > 0:\n y_values = y_values[~np.isnan(y_values)]\n\n global_y_min = min(global_y_min, y_values.min() if len(y_values) > 0 else global_y_min)\n global_y_max = max(global_y_min, y_values.max() if len(y_values) > 0 else global_y_min)\n\n if global_x_max - global_x_min > 1:\n global_x_min = math.floor(global_x_min) if not math.isinf(global_x_min) else global_x_min\n global_x_max = math.ceil(global_x_max) if not math.isinf(global_x_max) else global_x_max\n if global_y_max - global_y_min > 1:\n global_y_min = math.floor(global_y_min) if not math.isinf(global_y_min) else global_y_min\n global_y_max = math.ceil(global_y_max) if not math.isinf(global_y_max) else global_y_max\n if column_properties:\n global_x_min = column_properties.plot_min if column_properties.plot_min is not None else global_x_min\n global_x_max = column_properties.plot_max if column_properties.plot_max is not None else global_x_max\n\n for i, (group_name, group_axes) in enumerate(group_name_axes):\n group_color = color_by_group_name[group_name]\n for pld in pds_by_group_name[group_name]:\n pld.x_label = None\n pld.y_label = None\n d = dict()\n if force_monochrome_group:\n pld.color = group_color\n d.update(color=pld.color)\n try:\n pld.extra_kwargs.update(d)\n except AttributeError:\n pld.extra_kwargs = d\n\n try:\n pld.render(axes=group_axes)\n except Exception as ex:\n raise Exception(f\"Error rendering {pld} -- {group_name} -- {output_plot_path}\") from ex\n semilog_x = semilog_x or (column_properties.semilog_x if column_properties else False)\n semilog_y = semilog_y or (column_properties.semilog_y if column_properties else False) or semilog_y\n\n for (group_name, group_axes) in zip(sorted_group_names, group_axis_list):\n if y_min != y_max:\n group_axes.set_ylim(y_min, y_max)\n\n if semilog_x:\n x_base = column_properties.semilog_x_base if column_properties is not None else 10\n group_axes.semilogx(base=x_base)\n group_axes.get_xaxis().set_major_locator(matplotlib.ticker.LogLocator(base=x_base))\n else:\n group_axes.get_xaxis().set_major_locator(\n matplotlib.ticker.MaxNLocator(nbins=\"auto\", integer=True, min_n_ticks=5))\n group_axes.get_xaxis().set_minor_locator(matplotlib.ticker.AutoMinorLocator())\n\n if semilog_y:\n base_y = column_properties.semilog_y_base if column_properties is not None else semilog_y_base\n group_axes.semilogy(base=base_y)\n if combine_groups or len(sorted_group_names) <= 2:\n numticks = 11\n elif len(sorted_group_names) <= 5 and not column_properties.semilog_y:\n numticks = 6\n elif len(sorted_group_names) <= 10:\n numticks = 4\n else:\n numticks = 3\n group_axes.get_yaxis().set_major_locator(matplotlib.ticker.LogLocator(base=base_y, numticks=numticks))\n group_axes.grid(True, \"major\", axis=\"y\", alpha=0.2)\n else:\n group_axes.get_yaxis().set_major_locator(matplotlib.ticker.MaxNLocator(nbins=\"auto\", integer=False))\n group_axes.get_yaxis().set_minor_locator(matplotlib.ticker.AutoMinorLocator())\n if not combine_groups:\n group_axes.get_yaxis().set_label_position(\"right\")\n group_axes.set_ylabel(y_labels_by_group_name[group_name]\n if group_name in y_labels_by_group_name\n else clean_column_name(group_name),\n rotation=0, ha=\"left\", va=\"center\")\n\n plt.xlabel(global_x_label)\n if column_properties and column_properties.hist_label_dict is not None:\n x_tick_values = sorted(column_properties.hist_label_dict.keys())\n x_tick_labels = [column_properties.hist_label_dict[x] for x in x_tick_values]\n plt.xticks(x_tick_values, x_tick_labels)\n\n if global_y_label:\n fig.text(global_y_label_pos, 0.5, global_y_label, va='center', rotation='vertical')\n\n if options.displayed_title is not None:\n plt.suptitle(options.displayed_title)\n\n group_row_margin = group_row_margin if group_row_margin is not None else float(\n enb.config.options.group_row_margin)\n group_row_margin += (len(pds_by_group_name) - 6) / 24\n plt.subplots_adjust(hspace=group_row_margin)\n\n if x_tick_list is not None:\n if not x_tick_label_list:\n plt.xticks(x_tick_list)\n else:\n plt.xticks(x_tick_list, x_tick_label_list, rotation=x_tick_label_angle)\n plt.minorticks_off()\n if x_tick_label_list is not None:\n assert x_tick_list is not None\n if x_tick_list is None and x_tick_label_angle is not None:\n plt.xticks(rotation=x_tick_label_angle)\n\n for group_axes in group_axis_list:\n plt.sca(group_axes)\n if y_tick_list is not None:\n if not y_tick_label_list:\n plt.yticks(y_tick_list)\n else:\n plt.yticks(y_tick_list, y_tick_label_list)\n group_axes.minorticks_off()\n if y_tick_label_list is not None:\n assert y_tick_list is not None\n plt.yticks()\n\n # Set the axis limits\n xlim = [global_x_min, global_x_max]\n ylim = [global_y_min, global_y_max]\n xlim[0] = xlim[0] if x_min is None else x_min\n xlim[1] = xlim[1] if x_max is None else x_max\n ylim[0] = ylim[0] if y_min is None else y_min\n ylim[1] = ylim[1] if y_max is None else y_max\n # Translate relative margin to absolute margin\n horizontal_margin = horizontal_margin if horizontal_margin is not None else options.horizontal_margin\n vertical_margin = vertical_margin if vertical_margin is not None else options.vertical_margin\n h_margin = horizontal_margin * (xlim[1] - xlim[0])\n v_margin = vertical_margin * (ylim[1] - ylim[0])\n xlim = [xlim[0] - h_margin, xlim[1] + h_margin]\n ylim = [ylim[0] - v_margin, ylim[1] + v_margin]\n # Apply changes to the figure\n if xlim[0] != xlim[1] and not math.isnan(xlim[0]) and not math.isnan(xlim[1]):\n plt.xlim(*xlim)\n if ylim[0] != ylim[1] and not math.isnan(ylim[0]) and not math.isnan(ylim[1]):\n plt.ylim(*ylim)\n\n show_grid = options.show_grid if show_grid is None else show_grid\n\n if show_grid:\n if combine_groups:\n plt.grid(\"major\", alpha=0.5)\n else:\n for axes in group_axis_list:\n axes.grid(\"major\", alpha=0.5)\n\n with enb.logger.verbose_context(f\"Saving plot to {output_plot_path}\"):\n plt.savefig(output_plot_path, bbox_inches=\"tight\", dpi=300)\n\n plt.close()", "def cal_2rdgs(database_name, table_name, primary_key, group_name1, group_name2):\n\n ################################################################\n # conect to the database and return the query information\n ################################################################\n conn = connect_database(database_name)\n c = conn.cursor()\n\n sql1 = (\"select * from {0} where {1} = '{2}' \".format(table_name, primary_key, group_name1))\n sql2 = (\"select * from {0} where {1} = '{2}' \".format(table_name, primary_key, group_name2))\n\n c.execute(sql1)\n infolist1 = c.fetchall()\n\n c.execute(sql2)\n infolist2 = c.fetchall()\n\n # print(infolist1)\n # print(infolist2)\n\n #######################################################################\n # find the gene number of each disease group(group1_item_num,group2_item_num)\n ########################################################################\n group_1_item_num = get_icd_diseasegroup_geneinfo(database_name, table_name, primary_key, group_name1)[2]\n group_2_item_num = get_icd_diseasegroup_geneinfo(database_name, table_name, primary_key, group_name2)[2]\n # print(group_1_item_num)\n # print(group_2_item_num)\n # print(get_icd_diseasegroup_geneinfo(database_name, table_name, primary_key, group_name1)[1])\n # print(get_icd_diseasegroup_geneinfo(database_name, table_name, primary_key, group_name2)[1])\n ###############################################################\n # find the gene number of all the GDAs\n ###############################################################\n all_gene_num = get_all_gene_num(database_name, \"mesh_gene\")\n # print(all_gene_num)\n\n ###############################################################\n # bulid the random model of GROUP_NAME1, GROUP_NAME2, calculate C_random\n ###############################################################\n\n c_random = (group_1_item_num * group_2_item_num) / all_gene_num\n\n # print(c_random)\n\n ###############################################################\n # calculate the gene number of (GROUP_NAME1 intersection GROUP_NAME2), calculate C_real\n ###############################################################\n\n c_real = get_2diseasegroup_shared_gene(database_name, table_name, group_name1, group_name2, primary_key)[3]\n\n # print(c_real)\n\n ###############################################################\n # calculate sij = c_real/c_random\n ###############################################################\n\n s = float(c_real) / float(c_random)\n\n ###############################################################\n # normalization Si,j by min-max normalization method\n ###############################################################\n\n min_score = 0\n\n max_score = float(all_gene_num) / min(float(group_1_item_num), float(group_2_item_num))\n\n # print(max_score)\n\n sim = (s - min_score) / (max_score - min_score)\n\n sim = '%.5f' % sim\n\n conn.close()\n\n return sim", "def finalize_groups(self):\n merged_rdd = self.merged_rdd\n group_merge_map = self.group_merge_map\n sc = self.sc\n sqc = pyspark.sql.SQLContext(sc)\n\n nPartitions = sc.defaultParallelism*5\n\n nMinMembers = self.nMinMembers\n\n # we need to use the group merge map used in a previous step to see which \n # groups are actually spread across domain boundaries\n group_merge_map = self.group_merge_map\n \n\n def count_groups_local(i, iterator, nMinMembers):\n # the first element is the group mapping dictionary\n dist_groups = set(iterator.next().values())\n print len(dist_groups)\n print 'sizeof set in ', i, ' ', asizeof.asizeof(dist_groups)\n p_arrs = np.concatenate([p_arr for p_arr in iterator])\n gids, counts = np.unique(p_arrs['iGroup'], return_counts=True)\n print 'number of groups in partition ', i, ' = ', len(gids)\n return ((g,cnt) for (g,cnt) in zip(gids, counts) if (g in dist_groups) or (cnt >= nMinMembers))\n \n\n def filter_groups_by_map(rdd, not_in_map=False):\n def perform_filter(iterator, exclusive):\n # the first element after the union is the group mapping\n # here we have already remapped the groups so we need to just take the final group IDs\n dist_groups = set(iterator.next().values())\n return ((gid, count) for (gid,count) in iterator if (gid in dist_groups)^exclusive)\n return rdd.mapPartitions(lambda i: perform_filter(i,not_in_map), preservesPartitioning=True)\n\n def get_local_groups(rdd, map_rdd): \n return filter_groups_by_map(map_rdd + rdd, not_in_map=True)\n\n def get_distributed_groups(rdd, map_rdd):\n return filter_groups_by_map(map_rdd + rdd, not_in_map=False)\n\n # first, get rid of ghost particles\n no_ghosts_rdd = self.filter_ghosts(merged_rdd)\n\n # count up the number of particles in each group in each partition\n group_counts = (group_merge_map + no_ghosts_rdd).mapPartitionsWithIndex(lambda index,i: count_groups_local(index, i, nMinMembers), True).cache()\n\n # merge the groups that reside in multiple domains\n distributed_groups = get_distributed_groups(group_counts, group_merge_map)\n\n merge_group_counts = (distributed_groups.reduceByKey(lambda a,b: a+b, nPartitions)\n .filter(lambda (g,cnt): cnt>=nMinMembers)).cache()\n\n if self.DEBUG:\n print 'spark_fof DEBUG: non-merge groups = %d merge groups = %d'%(group_counts.count(), merge_group_counts.count()) \n\n # combine the group counts\n groups_rdd = (get_local_groups(group_counts, group_merge_map) + merge_group_counts).setName('groups_rdd')\n total_group_counts = groups_rdd.cache().count()\n \n print 'Total number of groups: ', total_group_counts\n\n self.total_group_counts = total_group_counts\n\n return groups_rdd", "def gallery_groups(self):\n\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n n = 3\n iterable = self.context['gallery'].values()\n args = [iter(iterable)] * 3\n return izip_longest(fillvalue=None, *args)", "def collect_final_groups(self):\n # get the final group mapping by sorting groups by particle count\n timein = time.time()\n groups_map = {}\n groups = {}\n groups_rdd = self.groups_rdd\n for i, (g,c) in enumerate(groups_rdd.collect()): \n groups_map[g] = i+1\n groups[i+1] = c\n\n print 'spark_fof: Final group map build took %f seconds'%(time.time() - timein)\n \n return groups", "def _get_group_from_file(self, wanted_group):\n wanted_gid = \"\"\n if (isinstance(wanted_group, int) or\n re.match(\"^\\\\d+$\", wanted_group)):\n wanted_gid = str(wanted_group)\n wanted_group = \"\"\n try:\n ingroup = open(self.group_file)\n except (IOError, OSError):\n return (\"\", \"\", \"\")\n else:\n for line in ingroup:\n (group, dummy, gid, users) = line.strip().split(':')\n if wanted_group and group == wanted_group:\n return (group, gid, users)\n if wanted_gid and gid == wanted_gid:\n return (group, gid, users)\n ingroup.close()\n return (\"\", \"\", \"\")", "def group_df(self):\n return self._group_df", "def _prepare_invoice_grp(self, cr, uid, order, line_ids, context=None):\n if context is None:\n context = {}\n context = dict(context)\n\n inv_data = super(grp_orden_compra, self)._prepare_invoice_grp(cr, uid, order, line_ids, context=context)\n\n # adicionando campos numero compromiso y no obligacion desde la OC\n monto_oc = math.floor(order.total_llavep or 0)\n monto_oc = int(monto_oc)\n inv_data.update({'nro_compromiso': order.nro_compromiso or False, 'monto_comprometido': monto_oc or 0, 'currency_id':order.currency_oc.id})\n\n # adicionando campos no afectacion y monto autorizado desde la primera APG\n if order.pc_apg_id:\n first_apg = order.pc_apg_id\n monto_apg = math.floor(first_apg.total_llavep)\n monto_apg = int(monto_apg)\n # TODO R SPRING X ADICIONANDO CABEZALES SIIF A LA FACTURA A PARTIR DE LA APG\n inv_data.update({'nro_afectacion': first_apg.nro_afectacion_siif or False,\n 'monto_afectado': monto_apg or 0,\n 'siif_tipo_ejecucion':first_apg.siif_tipo_ejecucion.id,\n 'siif_concepto_gasto':first_apg.siif_concepto_gasto.id,\n 'siif_financiamiento':first_apg.siif_financiamiento.id,\n 'siif_codigo_sir':first_apg.siif_codigo_sir.id,\n 'siif_nro_fondo_rot':first_apg.siif_nro_fondo_rot.id,\n }) # cambiando nro_afectacion 23/10\n # inv.update({'nro_afectacion': first_apg.nro_afectacion_apg or False, 'monto_afectado': monto_apg or 0})\n\n # # TODO R SPRING X NO LLEVAR LAS LLAVES PRESUPUESTALES POR DEFECTO\n # if order.pc_apg_id.llpapg_ids:\n # llavep_ids = []\n # for llavep in order.pc_apg_id.llpapg_ids:\n # llavep_ids.append((0, 0, {\n # 'programa_id': llavep.programa_id.id,\n # 'odg_id': llavep.odg_id.id,\n # 'auxiliar_id': llavep.auxiliar_id.id,\n # 'disponible': llavep.disponible,\n # 'proyecto_id': llavep.proyecto_id.id,\n # 'fin_id': llavep.fin_id.id,\n # 'mon_id': llavep.mon_id.id,\n # 'tc_id': llavep.tc_id.id,\n # 'importe': llavep.importe\n # }))\n # inv_data.update({'llpapg_ids': llavep_ids})\n\n return inv_data", "def groups(self):\n\n\t\tprint \"completed minimization\"\n\t\tcopy(self.rootdir+'counterions-minimized.gro',self.rootdir+'system.gro')\n\t\tcopy(self.rootdir+'counterions.top',self.rootdir+'system.top')\n\t\tif self.simscale == 'aamd': grouptype = 'standard'\n\t\tif self.simscale == 'cgmd': grouptype = 'cgmd_water'\n\t\tself.grouping(grouptype=grouptype)", "def _get_new_group_id():\n new_group = data_types.TestcaseGroup()\n new_group.put()\n return new_group.key.id()" ]
[ "0.54248655", "0.52833027", "0.52409554", "0.51482445", "0.5124294", "0.50936604", "0.5087704", "0.5087017", "0.5085758", "0.50673145", "0.5042352", "0.50204813", "0.5016356", "0.49944848", "0.4958095", "0.49487063", "0.49047828", "0.49024466", "0.48803094", "0.48635092", "0.48621163", "0.48396927", "0.48180306", "0.48107547", "0.47901312", "0.47796732", "0.47616366", "0.47456917", "0.47339725", "0.47324955" ]
0.7963392
0
mask out labels that are not in both train and test data and also mask out samples where features include NaN values
def masking(X_train, X_test, y_train, y_test): # create mask to exclude NaN-values from train data mask_train = np.zeros(X_train.shape[0], dtype=np.bool) for i, subfeat in enumerate(X_train): if True in np.isnan(subfeat): mask_train[i] = True else: mask_train[i] = False # create mask to exclude NaN-values from test data mask_test = np.zeros(X_test.shape[0], dtype=np.bool) for i, subfeat in enumerate(X_test): if True in np.isnan(subfeat): mask_test[i] = True else: mask_test[i] = False # masking X_train = X_train[~mask_train] y_train = y_train[~mask_train] X_test = X_test[~mask_test] y_test = y_test[~mask_test] y_train = y_train.astype("int64") y_test = y_test.astype("int64") # exclude classes that are not included in both, test and train data difflist1 = list(set(np.unique(y_train)) - set(np.unique(y_test))) for i in difflist1: mask_train = y_train == i X_train = X_train[~mask_train] y_train = y_train[~mask_train] difflist2 = list(set(np.unique(y_test)) - set(np.unique(y_train))) for i in difflist2: mask_test = y_test == i X_test = X_test[~mask_test] y_test = y_test[~mask_test] return(X_train, X_test, y_train, y_test)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def omit_nans(self, data, label):\n maskarray=np.full(data.shape[0], True)\n masker=np.unique(np.argwhere(np.isnan(data))[:,0])\n maskarray[masker]=False\n traindata=data[maskarray,:,:,:]\n trainlabel=label[maskarray]\n return traindata, trainlabel", "def filter_nan_samples(self, train_x, train_y):\n\n n_samples = train_x.shape[0]\n if n_samples != train_y.shape[0]:\n raise ValueError(\"x and y sample lengths don't match\")\n\n validity_array = np.zeros(n_samples)\n for i in range(n_samples):\n x_sample = train_x[i, :]\n y_sample = train_y[i, :]\n validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()\n\n mask = np.where(validity_array)[0]\n\n return train_x[mask, :], train_y[mask, :]", "def mask_test_train(data, split): \n # create a copy of the full data for reduction\n training_set = data.copy()\n\n # find index of values which are not empty\n nonzero_inds = training_set.nonzero()\n\n # create list of index pairs\n nonzero_pairs = list(zip(nonzero_inds[0], nonzero_inds[1]))\n\n # calculate the number of samples to be removed in training set\n num_samples = int(np.ceil(split*len(nonzero_pairs)))\n\n # get random samples\n samples = random.sample(nonzero_pairs, num_samples)\n\n # remove selected samples in training set\n user_inds = [index[0] for index in samples]\n item_inds = [index[1] for index in samples]\n training_set[user_inds, item_inds] = 0 \n\n return training_set, list(set(user_inds)), np.array(samples)", "def forget_labels(labels_to_forget=\"none\"):\n\t\t\tassert labels_to_forget in {\"none\",\"originally unlabelled\",\"all\"}\n\t\t\tif labels_to_forget != \"none\":\n\t\t\t\tif labels_to_forget == \"originally unlabelled\":\n\t\t\t\t\tself.train_labels___0_unlab__neg1_exclud=self.train_orig_labels.copy()\n\t\t\t\telif labels_to_forget == \"all\":\n\t\t\t\t\tself.train_labels___0_unlab__neg1_exclud=np.zeros(self.num_train)\n\t\t\t\telse:\n\t\t\t\t\tassert False\n\t\t\t\tself.bool_train_labelled=(self.train_labels___0_unlab__neg1_exclud>0)\n\t\t\t\tself.bool_train_unlabelled=(self.train_labels___0_unlab__neg1_exclud==0)\n\t\t\t\tself.bool_train_excluded=(self.train_labels___0_unlab__neg1_exclud<0)\n\t\t\t\tself.num_train_labelled=sum(self.bool_train_labelled)\n\t\t\t\tself.num_train_unlabelled=sum(self.bool_train_unlabelled)\n\t\t\t\tself.num_train_excluded=sum(self.bool_train_excluded)", "def clean_train_test2(train, test):\n\n # Species, Street, Trap\n labeller = LabelEncoder()\n labeller.fit(np.concatenate((train.Species.values, test.Species.values)))\n train.Species = labeller.transform(train.Species.values)\n test.Species = labeller.transform(test.Species.values)\n\n labeller.fit(np.concatenate((train.Street.values, test.Street.values)))\n train.Street = labeller.transform(train.Street.values)\n test.Street = labeller.transform(test.Street.values)\n\n labeller.fit(np.concatenate((train.Trap.values, test.Trap.values)))\n train.Trap = labeller.transform(train.Trap.values)\n test.Trap = labeller.transform(test.Trap.values)\n\n return train, test", "def prune_train_dataset(self, all_labels, train_idxs):\n\n # -- prune samples if necessary to have equal sized splits\n neg_idxs = [idx for idx in train_idxs if all_labels[idx] == self.NEG_LABEL]\n pos_idxs = [idx for idx in train_idxs if all_labels[idx] == self.POS_LABEL]\n n_samples = min(len(neg_idxs), len(pos_idxs))\n\n rstate = np.random.RandomState(7)\n rand_idxs_neg = rstate.permutation(neg_idxs)\n rand_idxs_pos = rstate.permutation(pos_idxs)\n\n neg_idxs = rand_idxs_neg[:n_samples]\n pos_idxs = rand_idxs_pos[:n_samples]\n train_idxs = np.concatenate((pos_idxs, neg_idxs))\n\n return train_idxs", "def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)", "def test_labels_encoder_no_classes(self):\n\n class L2UTransformer(object):\n def transform(self, y):\n return np.array([yi.upper() for yi in y])\n\n oz = ClassificationScoreVisualizer(GaussianNB(), encoder=L2UTransformer())\n with pytest.warns(YellowbrickWarning, match=\"could not determine class labels\"):\n assert oz._labels() is None", "def is_labeled(y, missing_label=MISSING_LABEL):\n return ~is_unlabeled(y, missing_label)", "def get_mask_for_valid_labels(y_true, num_classes, ignore_value=255):\n mask_for_class_elements = y_true < num_classes\n mask_for_not_ignored = y_true != ignore_value\n mask = mask_for_class_elements & mask_for_not_ignored\n return mask", "def test_keep_labels_all(self):\n # Create some arbitrary data and labels\n data = array([[1], [2], [3], [4], [5], [6]])\n labels = array([1, 1, 2, 2, 1, 2])\n\n # Create a LabeledCData object\n lcdata = LabeledCData(data, labels)\n\n self.assertTrue(array_equal(lcdata.data, data))\n self.assertTrue(array_equal(lcdata.labels, labels))\n\n # Only keep the 1 and 2 labels\n lcdata.keep_data_with_labels([1, 2])\n\n # Make sure the new data is correct\n self.assertTrue(array_equal(lcdata.data, data))\n self.assertTrue(array_equal(lcdata.labels, labels))", "def mask_labels(labels):\n def do_one_row(row):\n erase = False\n for i, _ in enumerate(row):\n if erase:\n row[i] = 0\n else:\n if row[i] == 10:\n erase = True\n row[i] = 1\n return row\n\n ret = np.copy(labels)\n return np.apply_along_axis(do_one_row, axis=1, arr=ret)", "def discard_none_targets(dataset):\r\n indices = []\r\n for (ii,sample) in enumerate(dataset):\r\n target = sample[1]\r\n if target is not None:\r\n indices.append(ii)\r\n\r\n return Subset(dataset,indices)", "def clean(data, skip_transformations=False, target=False):\n data = convert_type(data)\n data = category_grouping(data)\n if target:\n target = data[['Target']]\n data = data.drop(columns='Target')\n x_train, x_test, y_train, y_test = f.train_test(data, target)\n x_train, y_train = resample_vals(x_train, y_train)\n x_train = x_train.assign(Train=lambda x: 1)\n x_test = x_test.assign(Train=lambda x: 0)\n data = pd.concat([x_train, x_test])\n data = onehot_features(data)\n data = log_trans(data, test=True)\n data = cap_outliers(data, test=True)\n data = scale(data, test=True)\n x_train = data.loc[data['Train'] == 1]\n x_test = data.loc[data['Train'] == 0]\n return x_train, x_test, y_train, y_test\n data = onehot_features(data)\n if skip_transformations:\n return data\n data = log_trans(data)\n data = cap_outliers(data)\n data = scale(data)\n return data", "def clean_features(test_data, train_data, undef):\n \n test_set = test_data\n train_set = train_data\n \n # Preprocessing of the four jet datasets\n for jet in range(4):\n \n # Remove columns full of undefined values (-999.0 in the datase)\n train_set[jet][train_set[jet] <= undef] = np.nan # replace undefined values -999 by NaN\n train_set[jet] = train_set[jet][:, ~np.all(np.isnan(train_set[jet]), axis=0)]\n test_set[jet][test_set[jet] <= undef] = np.nan # replace undefined values -999 by NaN\n test_set[jet] = test_set[jet][:, ~np.all(np.isnan(test_set[jet]), axis=0)]\n\n # Remove columns without standard deviation to remove column full of the same value\n train_id_pred = train_set[jet][:,0:2]\n train_features = train_set[jet][:,2:]\n train_features = train_features[:, np.nanstd(train_features, axis=0) != 0]\n train_set[jet] = np.concatenate((train_id_pred, train_features), axis=1)\n \n # Extract prediction column of test before because it has std=0, and re-insert it after at the begining of the table\n test_id_pred = test_set[jet][:,0:2]\n test_features = test_set[jet][:,2:]\n test_features = test_features[:, np.nanstd(test_features, axis=0) != 0]\n test_set[jet] = np.concatenate((test_id_pred, test_features), axis=1)\n \n return test_set, train_set", "def remove_labels(self, test):\n ii = 0\n while ii < len(self.labels):\n if test(self.labels[ii]):\n self.labels.pop(ii)\n else:\n ii += 1\n return self", "def remove_labels(self, test):\n ii = 0\n while ii < len(self.labels):\n if test(self.labels[ii]):\n self.labels.pop(ii)\n else:\n ii += 1\n return self", "def test_umap_mismtached_labels(self):\n ## produce random data\n X, y = make_classification(\n n_samples=200,\n n_features=100,\n n_informative=20,\n n_redundant=10,\n n_classes=3,\n random_state=42,\n )\n\n ## fewer labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)\n\n ## more labels than classes\n umap = UMAPVisualizer(random_state=87, labels=[\"a\", \"b\", \"c\", \"d\"])\n with pytest.raises(YellowbrickValueError):\n umap.fit(X, y)", "def testConvertMissingLabels(self):\n self.assertEqual(self.data['no_species']['labels'][0]['species'], '-1')\n self.assertEqual(self.data['no_count']['labels'][0]['count'], '-1')\n self.assertEqual(self.data['no_standing']['labels'][0]['standing'], '-1')", "def test_keep_labels(self):\n # Create some arbitrary data and labels\n data = array([[1], [2], [3], [4], [5], [6]])\n labels = array([1, 1, 2, 2, 3, 3])\n\n # Create a LabeledCData object\n lcdata = LabeledCData(data, labels)\n\n self.assertTrue(array_equal(lcdata.data, data))\n self.assertTrue(array_equal(lcdata.labels, labels))\n\n # Make sure 3 is in the labels, for contrast\n self.assertIn(3, lcdata.labels)\n\n # Only keep the 1 and 2 labels\n lcdata.keep_data_with_labels([1, 2])\n\n # Make sure 3 has been removed from the labels, for contrast\n self.assertNotIn(3, lcdata.labels)\n\n # Correct answers\n newdata = array([[1], [2], [3], [4]])\n newlabels = array([1, 1, 2, 2])\n\n # Make sure the new data is correct\n self.assertTrue(array_equal(lcdata.data, newdata))\n self.assertTrue(array_equal(lcdata.labels, newlabels))", "def test_keep_labels2(self):\n # Create some arbitrary data and labels\n data = array([[1], [2], [3], [4], [5], [6]])\n labels = array([1, 1, 2, 2, 3, 3])\n\n # Create a LabeledCData object\n lcdata = LabeledCData(data, labels)\n\n self.assertTrue(array_equal(lcdata.data, data))\n self.assertTrue(array_equal(lcdata.labels, labels))\n\n # Make sure 2 is in the labels, for contrast\n self.assertIn(2, lcdata.labels)\n\n # Only keep the 1 and 3 labels\n lcdata.keep_data_with_labels([1, 3])\n\n # Make sure 3 has been removed from the labels, for contrast\n self.assertNotIn(2, lcdata.labels)\n\n # Correct answers\n newdata = array([[1], [2], [5], [6]])\n newlabels = array([1, 1, 3, 3])\n\n # Make sure the new data is correct\n self.assertTrue(array_equal(lcdata.data, newdata))\n self.assertTrue(array_equal(lcdata.labels, newlabels))", "def check_training_samples(self):\n\n yidx = np.sum(self.datas[self.train_idx].gen_labels(), axis=0) < self.kfold_cv\n if np.any(yidx):\n xlist = ','.join(np.array(self.datas[self.train_idx].labels)[yidx])\n print('\\n *** WARNING ***\\n There are labels with very few samples: %s' % xlist)\n print(' If encounter chaotic errors, consider excluding these labels using --excludeloc %s\\n' % xlist)\n\n return", "def mask_nan(y_true, y_pred):\n notnan_true = K.cast(~tf.math.is_nan(y_true), \"float32\")\n num_notnan = K.sum(K.flatten(notnan_true))\n y_pred = tf.math.multiply(y_pred, notnan_true)\n\n # We need to use tf.where to do this substitution, because when trying to\n # multiply with just the notnan_true masks,\n # NaN*0 = NaN, so NaNs are not removed\n y_true = K.cast(\n tf.where(~tf.math.is_nan(y_true), y_true, tf.zeros_like(y_true)), \"float32\"\n )\n return y_pred, y_true, num_notnan", "def mask_nan_keep_loss(y_true, y_pred):\n y_pred, y_true, num_notnan = mask_nan(y_true, y_pred)\n loss = K.sum((K.flatten(y_pred) - K.flatten(y_true)) ** 2) / num_notnan\n return tf.where(~tf.math.is_nan(loss), loss, 0)", "def irrelevant_features(features):\n irrelevant = []\n for vec in set(features):\n if (features[vec].count(0)/len(indtf_features[vec])) < 0.1:\n irrelevant.append(vec)\n return irrelevant", "def drop_missing_values(self):\n X_train, X_val, y_train, y_val = train_test_split(self.X_dev, self.y_dev, test_size=0.25, random_state=10)\n y_test = self.y_test\n X_test = self.X_test\n X_train_dropped = X_train.dropna(axis='rows')\n y_train_dropped = y_train.loc[X_train_dropped.index]\n X_val_dropped = X_val.dropna(axis='rows')\n y_val_dropped = y_val.loc[X_val_dropped.index]\n X_test_dropped = X_test.dropna(axis='rows')\n y_test_dropped = y_test.loc[X_test_dropped.index]\n \n best_rf, best_hyperparams = random_forest_grid_search(X_train_dropped, y_train_dropped, X_val_dropped, y_val_dropped)\n y_train_best = best_rf.predict_proba(X_train_dropped)[:, 1]\n Train_c_index = cindex(y_train_dropped, y_train_best)\n \n y_val_best = best_rf.predict_proba(X_val_dropped)[:, 1]\n valid_c_index = cindex(y_val_dropped, y_val_best)\n \n y_test_best = best_rf.predict_proba(X_test_dropped)[:,1]\n test_c_index = cindex(y_test_dropped, y_test_best)\n \n return Train_c_index, valid_c_index, test_c_index", "def predict_missing_values(self, data, targets, features):\n for target in targets:\n cols = features + [target]\n train_fit_mask = pd.notnull(\n data.loc[self.train_index, target])\n # train_df = data.loc[:, cols].dropna()\n train_fill_mask = pd.isnull(data.loc[self.train_index, target])\n hyper_params_model = lm.LassoCV(normalize=True, copy_X=True, n_jobs=-1).fit(\n data.loc[train_fit_mask, features], data.loc[train_fit_mask, target])\n model = lm.Lasso(alpha=hyper_params_model.alpha_,\n copy_X=True, normalize=True)\n model.fit(data.loc[train_fit_mask, features],\n data.loc[train_fit_mask, target])\n data.loc[train_fill_mask, target] = model.predict(\n data.loc[train_fill_mask, features])\n if str(self.test_index) != 'None':\n if pd.isnull(self.data.loc[self.test_index, target]).any():\n test_fill_mask = pd.isnull(\n self.data.loc[self.test_index, target])\n print self.test.loc[test_fill_mask, features]\n self.data.loc[test_fill_mask, target] = model.predict(\n self.data.loc[test_fill_mask, features])\n return data", "def prepare_labels(labels, class_mask):\n mask = [1 if elt else -1 for elt in class_mask]\n mask = np.array(mask)\n return labels.dot(mask)", "def missing_values():\n print('Missings in the train data:', train_data.isnull().sum())", "def subsample_negative_labels(labels):\n num_bg = RPN_BATCHSIZE - tensorflow.reduce_sum(tensorflow.gather(labels, tensorflow.where(tensorflow.equal(labels, 1))))\n \n bg_inds = tensorflow.where(tensorflow.equal(labels, 0))\n \n bg_inds = keras.backend.shape(bg_inds)[0]\n \n size = keras.backend.cast(bg_inds, tensorflow.int32) - keras.backend.cast(num_bg, tensorflow.int32)\n\n def more_negative():\n indices = tensorflow.multinomial(keras.backend.log(keras.backend.ones((bg_inds, 1)) * 10.), size)\n\n elems = keras.backend.gather(tensorflow.range(bg_inds), indices)\n \n return tensorflow.scatter_update(tensorflow.Variable(labels, validate_shape=False), elems, -1)\n\n def less_negative():\n return labels\n\n return tensorflow.cond(keras.backend.less_equal(size, 0), lambda: less_negative(), lambda: more_negative())" ]
[ "0.7626387", "0.6600071", "0.63293505", "0.62641454", "0.62288636", "0.61516243", "0.608271", "0.6056829", "0.6002692", "0.599427", "0.59860575", "0.59836817", "0.5916399", "0.5898351", "0.5887911", "0.5840369", "0.5840369", "0.581876", "0.58088595", "0.5802163", "0.5787612", "0.5756453", "0.57326037", "0.57312804", "0.5673773", "0.5666024", "0.56492615", "0.56346726", "0.56161714", "0.55815697" ]
0.75027907
1
Returns mapping between Geopedia's crop index and crop id for Slovenia.
def get_slovenia_crop_geopedia_idx_to_crop_id_mapping(): gpd_session = GeopediaSession() to_crop_id = list(GeopediaFeatureIterator(layer='2036', gpd_session=gpd_session)) to_crop_id = [{'crop_geopedia_idx': code['id'], **code['properties']} for code in to_crop_id] to_crop_id = pd.DataFrame(to_crop_id) to_crop_id['crop_geopedia_idx'] = pd.to_numeric(to_crop_id.crop_geopedia_idx) return to_crop_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_austria_crop_geopedia_idx_to_crop_id_mapping():\n gpd_session = GeopediaSession()\n to_crop_id = list(GeopediaFeatureIterator(layer='2032', gpd_session=gpd_session))\n to_crop_id = [{'crop_geopedia_idx': code['id'], **code['properties']} for code in to_crop_id]\n to_crop_id = pd.DataFrame(to_crop_id)\n to_crop_id['crop_geopedia_idx'] = pd.to_numeric(to_crop_id.crop_geopedia_idx)\n to_crop_id.rename(index=str, columns={\"SNAR_BEZEI\": \"SNAR_BEZEI_NAME\"}, inplace=True)\n to_crop_id.rename(index=str, columns={\"crop_geopedia_idx\": \"SNAR_BEZEI\"}, inplace=True)\n\n return to_crop_id", "def get_danish_crop_geopedia_idx_to_crop_id_mapping():\n gpd_session = GeopediaSession()\n to_crop_id = list(GeopediaFeatureIterator(layer='2050', gpd_session=gpd_session))\n to_crop_id = [{'crop_geopedia_idx': code['id'], **code['properties']} for code in to_crop_id]\n to_crop_id = pd.DataFrame(to_crop_id)\n to_crop_id['crop_geopedia_idx'] = pd.to_numeric(to_crop_id.crop_geopedia_idx)\n\n return to_crop_id", "def crop_id(self):\n return self._crop_id", "def get_mapu_kanala_ID_OPIS(self):\n out = {}\n for kanal in self.sviKanali:\n out[kanal] = self.get_datastore(kanal).koncentracija.opis\n return out", "def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result", "def hvgs_ids(self):\n if not hasattr(self, '_hvgs_ids'):\n mv = myvariant.MyVariantInfo()\n self._hvgs_ids = [i['_id'] for i in\n mv.query(self.snp_loc, fields='id')['hits']]\n return self._hvgs_ids", "def getVSMSpace():\n sids,documents = getSongTextInfo()\n texts = [[word for word in document.lower().split()] for document in documents]\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n songMap = {}\n index = 0\n for doc in corpus:\n sid = sids[index]\n rMap = {}\n for item in doc:\n wid = item[0]\n count = item[1]\n rMap[wid] = count\n songMap[sid] = rMap\n index += 1\n return songMap", "def getTopPopulationRegion(self):\n\t\tdata = {}\n\t\tfor iProvince in range(con.iNumRegions):\n\t\t\tdata[iProvince] = 0\n\t\tfor iLoopPlayer in range(con.iBarbarian + 1):\n\t\t\tapCityList = PyPlayer(iLoopPlayer).getCityList()\n\t\t\tfor pCity in apCityList:\n\t\t\t\tdata[pCity.GetCy().plot().getRegionID()] += pCity.getPopulation()\n\t\tkey = -1\n\t\tfor key, value in sorted(data.iteritems(), key=lambda (k,v): (v,k)):\n\t\t\tpass\n\t\treturn key", "def GetMapId(landsat, date, date_range):\n \n def maskClouds(img):\n scored = ee.Algorithms.Landsat.simpleCloudScore(img);\n return img.updateMask(scored.select(['cloud']).lt(20));\n\n def CreateTimeBand(img):\n return maskClouds(img).byte().addBands(img.metadata('system:time_start'))\n\n if landsat == 'l7':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L7)\n l7 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l7Composite = l7.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l7Composite.getMapId({\n 'min': '0,0,0',\n 'max': '255,255,255',\n 'bands': 'B4,B3,B2',\n })\n if landsat == 'l8':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L8)\n l8 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l8Composite = l8.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l8Composite.getMapId({\n 'min': '0',\n 'max': '0.4',\n 'bands': 'B4,B3,B2',\n })", "def _create_img_id_to_idx(self):\n with h5py.File(self.image_features_path, 'r') as features_file:\n coco_ids = features_file['ids'][()]\n coco_id_to_index = {id: i for i, id in enumerate(coco_ids)}\n return coco_id_to_index", "def sosid(self):\r\n return self.word2idx.get(SOS, 0)", "def make_sector_map(self, data):\n\n def format_county_fips(cf):\n\n cf = str(cf)\n\n if len(cf)<=4:\n\n cf = '0'+cf\n\n return cf\n\n data['COUNTY_FIPS'] = data.COUNTY_FIPS.apply(\n lambda x: format_county_fips(x)\n )\n\n # match on geo_id\n map_data = self.cshp.set_index('GEOID').join(\n data.set_index('COUNTY_FIPS').MMBtu\n )\n\n ## Need to specify colors or will geopandas automatcially assign?", "def get_grid_id(point, grids):\r\n\tdis_cents = 100\r\n\tgc_id = 0\r\n\r\n\tfor i, gc in enumerate(grids[\"grid_region\"]):\r\n\t\tdis = sqrt((float(point[0]) - float(gc[0])) ** 2 + (float(point[1]) - float(gc[1])) ** 2)\r\n\t\tif dis < dis_cents:\r\n\t\t\tdis_cents = dis\r\n\t\t\tgc_id = i\r\n\r\n\tgd_id = -1\r\n\tfor j, gd in enumerate(grids[\"grid_boundary\"][str(gc_id)]):\r\n\t\tboundary = grids[\"grid_boundary\"][str(gc_id)][gd]\r\n\t\tif isInsidePolygon((float(point[0]),float(point[1])),boundary):\r\n\t\t\tgd_id = gd\r\n\t\t\tbreak \r\n\tif(gd_id>0):\r\n\t\treturn str(gc_id) + '-' + str(gd_id)\r\n\telse:\r\n\t\treturn '-'", "def getSpeciesIds(self):\n species = {}\n result_args = self.cursor.callproc(\"get_all_species\")\n # process the result\n for result in self.cursor.stored_results():\n for r in result:\n # print(r)\n species[r[1]] = r[0]\n\n return species", "def filter_plants_by_region_id(region_id, year, host='localhost', area=0.5):\n\n state_dict = {\n 'Alabama':'AL',\n 'Alaska':'AK',\n 'Arizona':'AZ',\n 'Arkansas':'AR',\n 'California':'CA',\n 'Colorado':'CO',\n 'Connecticut':'CT',\n 'Delaware':'DE',\n 'Florida':'FL',\n 'Georgia':'GA',\n 'Hawaii':'HI',\n 'Idaho':'ID',\n 'Illinois':'IL',\n 'Indiana':'IN',\n 'Iowa':'IA',\n 'Kansas':'KS',\n 'Kentucky':'KY',\n 'Louisiana':'LA',\n 'Maine':'ME',\n 'Maryland':'MD',\n 'Massachusetts':'MA',\n 'Michigan':'MI',\n 'Minnesota':'MN',\n 'Mississippi':'MS',\n 'Missouri':'MO',\n 'Montana':'MT',\n 'Nebraska':'NE',\n 'Nevada':'NV',\n 'New Hampshire':'NH',\n 'New Jersey':'NJ',\n 'New Mexico':'NM',\n 'New York':'NY',\n 'North Carolina':'NC',\n 'North Dakota':'ND',\n 'Ohio':'OH',\n 'Oklahoma':'OK',\n 'Oregon':'OR',\n 'Pennsylvania':'PA',\n 'Rhode Island':'RI',\n 'South Carolina':'SC',\n 'South Dakota':'SD',\n 'Tennessee':'TN',\n 'Texas':'TX',\n 'Utah':'UT',\n 'Vermont':'VT',\n 'Virginia':'VA',\n 'Washington':'WA',\n 'West Virginia':'WV',\n 'Wisconsin':'WI',\n 'Wyoming':'WY'\n }\n\n print \"Getting region name from database...\"\n query = \"SELECT regionabr FROM ventyx_nerc_reg_region WHERE gid={}\".format(\n region_id)\n region_name = connect_to_db_and_run_query(query=query,\n database='switch_gis', host=host)['regionabr'][0]\n counties_path = os.path.join('other_data', '{}_counties.tab'.format(region_name))\n \n if not os.path.exists(counties_path):\n # assign county if (area)% or more of its area falls in the region\n query = \"SELECT name, state\\\n FROM ventyx_nerc_reg_region regions CROSS JOIN us_counties cts\\\n JOIN (SELECT DISTINCT state, state_fips FROM us_states) sts \\\n ON (sts.state_fips=cts.statefp) \\\n WHERE regions.gid={} AND\\\n ST_Area(ST_Intersection(cts.the_geom, regions.the_geom))/\\\n ST_Area(cts.the_geom)>={}\".format(region_id, area)\n print \"\\nGetting counties and states for the region from database...\"\n region_counties = pd.DataFrame(connect_to_db_and_run_query(query=query,\n database='switch_gis', host=host)).rename(columns={'name':'County','state':'State'})\n region_counties.replace(state_dict, inplace=True)\n region_counties.to_csv(counties_path, sep='\\t', index=False)\n else:\n print \"Reading counties from .tab file...\"\n region_counties = pd.read_csv(counties_path, sep='\\t', index_col=None)\n\n generators = pd.read_csv(\n os.path.join('processed_data','generation_projects_{}.tab'.format(year)), sep='\\t')\n generators.loc[:,'County'] = generators['County'].map(lambda c: str(c).title())\n\n print \"\\nRead in data for {} generators, of which:\".format(len(generators))\n print \"--{} are existing\".format(len(generators[generators['Operational Status']=='Operable']))\n print \"--{} are proposed\".format(len(generators[generators['Operational Status']=='Proposed']))\n\n generators_with_assigned_region = generators.loc[generators['Nerc Region'] == region_name]\n generators = generators[generators['Nerc Region'].isnull()]\n generators_without_assigned_region = pd.merge(generators, region_counties, how='inner', on=['County','State'])\n generators = pd.concat([\n generators_with_assigned_region,\n generators_without_assigned_region],\n axis=0)\n generators.replace(\n to_replace={'Energy Source':coal_codes, 'Energy Source 2':coal_codes,\n 'Energy Source 3':coal_codes}, value='COAL', inplace=True)\n generators_columns = list(generators.columns)\n\n existing_gens = generators[generators['Operational Status']=='Operable']\n proposed_gens = generators[generators['Operational Status']=='Proposed']\n\n print \"=======\"\n print \"Filtered to {} projects in the {} region, of which:\".format(\n len(generators), region_name)\n print \"--{} are existing with {:.0f} GW of capacity\".format(\n len(existing_gens), existing_gens['Nameplate Capacity (MW)'].sum()/1000.0)\n print \"--{} are proposed with {:.0f} GW of capacity\".format(\n len(proposed_gens), proposed_gens['Nameplate Capacity (MW)'].sum()/1000.0)\n print \"=======\"\n\n return generators", "def srid(self) -> ir.IntegerValue:\n return ops.GeoSRID(self).to_expr()", "def basic_crop(data):\n return data['crop'];", "def covariate_to_index(self):\n covariate_df = self.dismod_file.covariate\n return dict(covariate_df[[\"covariate_name\", \"covariate_id\"]].to_records(index=False))", "def openneuro_id_lookup(rvid):\n onid = id_mapping.loc[id_mapping['SUBJECT_NUMBER'] == rvid, 'open_neuro_id'].values[0]\n return onid", "def get_squ_dict(self, index):\n squ = self.squares[index]\n return self.get_dict([self.possibles[cell[0]][cell[1]] for cell in squ], \"S\", squ)", "def solar_profile_indices_map(self):\n\n if self._hybrid_meta is None:\n return np.array([]), np.array([])\n\n idxs = self._hybrid_meta[self.__solar_rpi_n].astype(int)\n idxs = idxs[idxs >= 0]\n\n return idxs.index.values, idxs.values", "def map_ll_to_seviri(lon, lat):\n # new method\n # project lat/lon input to meteosat view, mask out of bounds data\n geos = pyproj.Proj(proj='geos', h=35785831.0,lon_0=0,lat_0=0,x_0=0,y_0=0,units='m')\n x,y = geos(lon,lat)\n x = ma.masked_equal(x,1e30)\n y = ma.masked_equal(y,1e30)\n # Convert to index. ~3000.5m per pixel, centre pixel index is [1855,1855]\n x = x/-3000.5+1855\n y = y/3000.5+1855\n return x,y\n # old method\n \"\"\"\n # Define Earth radius and geostationary orbit height in km and calucalte max\n # viewer angle\n r_sat = 42164.\n r_earth = 6378.\n zenith_max = np.arcsin(r_earth/r_sat)\n # convert lat/lon to cartesian coordinates\n x = np.cos(np.radians(lat)) * np.sin(np.radians(lon))\n y = np.sin(np.radians(lat))\n z = np.cos(np.radians(lat)) * np.cos(np.radians(lon))\n # x,y vector magnitude\n d = np.sqrt(x**2 + y**2)\n # Calculate footprint SEVIRI effective zenith angle and mask for > pi/2\n # values\n zenith = np.arctan2(d, z) + np.arctan2(r_earth*d, r_sat-r_earth*z)\n zenith_mask = np.abs(zenith) >= (0.5 * np.pi)\n # Calculate x and y viewer angles\n theta_x = np.arctan2(r_earth*x, r_sat-r_earth*z)\n theta_y = np.arctan2(r_earth*y, r_sat-r_earth*z)\n # Define SEVIRI global index range and offset\n # These should be the same on all files, but may need to check\n x_irange = 3623\n x_ioffset = 44\n y_irange = 3611\n y_ioffset = 51\n # Remap viewer angles to indexes using max viewer angle, index range and\n # offset. Note -ve theta_y as SEVIRI indexes the x-axis right to left(E-W)\n x_out = (1 - theta_x / zenith_max) * 0.5 * x_irange + x_ioffset\n y_out = (1 + theta_y / zenith_max) * 0.5 * y_irange + y_ioffset\n # Return masked arrays using the zenith angle mask\n return ma.array(x_out, mask=zenith_mask), ma.array(y_out, mask=zenith_mask)\n \"\"\"", "def get_province(self, station_id, time):\n # Make sure the stations have been collected\n if not hasattr(self, 'stations'):\n self.collect_stations()\n\n keys = list(self.stations.keys())\n\n index = numpy.where(\n [any([True for id in self.stations[prov][time] if id == station_id]) for prov in keys]\n )[0]\n\n if index.size == 0:\n raise Exception('Cannot find the station \"{}\" with {} data'.format(station_id, time))\n\n return keys[int(index)]", "def vector_indx_to_map_matrix_indx(index,senzory_map):\n xs = dict(zip(np.unique(senzory_map[:,0]), it.count()))\n ys = dict(zip(np.negative(np.unique(senzory_map[:,1])), it.count()))\n x, y = senzory_map[index]\n return ys[y],xs[x]", "def cen_region_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cen_region_id\")", "def save_new_lid(self):\n region = 'world' if self.city is None else self.city\n id_ = str(hash(self.cids[0]))[:5]\n output = 'new_venue_id_{}_{}'.format(id_, region)\n p.save_var(output, set(self.new_venues))", "def north_fold(longitudes, latitudes):\n # Match indices to coordinates\n coordinates = defaultdict(list)\n for ikey, key in enumerate(zip(longitudes, latitudes)):\n coordinates[key].append(ikey)\n\n # Create bijective map between north fold indices\n result = {}\n for indices in coordinates.itervalues():\n if len(indices) == 2:\n j1, j2 = indices\n result[j1] = j2\n result[j2] = j1\n return result", "def get_family_id_to_index():\n \n family_ids = open(\n resource_filename('contextual_lenses.resources', 'pfam_family_ids.txt'),\n 'r').readlines()\n family_id_to_index = {}\n for i, family_id in enumerate(family_ids):\n family_id_to_index[family_id.replace('\\n', '')] = i\n\n return family_id_to_index", "def polygon_ids(self):\n return self.get_ids()", "def region_of_province(province_in: str) -> str:\n region = None\n for r in ITALY_MAP:\n for p in ITALY_MAP[r]:\n if province_in == p:\n region = r\n return region" ]
[ "0.75980484", "0.75025374", "0.5695294", "0.5330386", "0.52470756", "0.5175866", "0.5105939", "0.5084242", "0.5022458", "0.50011927", "0.49794403", "0.49425364", "0.48705566", "0.48659784", "0.48595893", "0.48556918", "0.48154962", "0.48137003", "0.4780601", "0.47703573", "0.47681645", "0.47629926", "0.47125733", "0.47030348", "0.46974802", "0.46960977", "0.46704838", "0.46651325", "0.46628627", "0.46618944" ]
0.8482437
0
Returns mapping between Geopedia's crop index and crop id for Austria.
def get_austria_crop_geopedia_idx_to_crop_id_mapping(): gpd_session = GeopediaSession() to_crop_id = list(GeopediaFeatureIterator(layer='2032', gpd_session=gpd_session)) to_crop_id = [{'crop_geopedia_idx': code['id'], **code['properties']} for code in to_crop_id] to_crop_id = pd.DataFrame(to_crop_id) to_crop_id['crop_geopedia_idx'] = pd.to_numeric(to_crop_id.crop_geopedia_idx) to_crop_id.rename(index=str, columns={"SNAR_BEZEI": "SNAR_BEZEI_NAME"}, inplace=True) to_crop_id.rename(index=str, columns={"crop_geopedia_idx": "SNAR_BEZEI"}, inplace=True) return to_crop_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_slovenia_crop_geopedia_idx_to_crop_id_mapping():\n gpd_session = GeopediaSession()\n to_crop_id = list(GeopediaFeatureIterator(layer='2036', gpd_session=gpd_session))\n to_crop_id = [{'crop_geopedia_idx': code['id'], **code['properties']} for code in to_crop_id]\n to_crop_id = pd.DataFrame(to_crop_id)\n to_crop_id['crop_geopedia_idx'] = pd.to_numeric(to_crop_id.crop_geopedia_idx)\n\n return to_crop_id", "def get_danish_crop_geopedia_idx_to_crop_id_mapping():\n gpd_session = GeopediaSession()\n to_crop_id = list(GeopediaFeatureIterator(layer='2050', gpd_session=gpd_session))\n to_crop_id = [{'crop_geopedia_idx': code['id'], **code['properties']} for code in to_crop_id]\n to_crop_id = pd.DataFrame(to_crop_id)\n to_crop_id['crop_geopedia_idx'] = pd.to_numeric(to_crop_id.crop_geopedia_idx)\n\n return to_crop_id", "def crop_id(self):\n return self._crop_id", "def GetMapId(landsat, date, date_range):\n \n def maskClouds(img):\n scored = ee.Algorithms.Landsat.simpleCloudScore(img);\n return img.updateMask(scored.select(['cloud']).lt(20));\n\n def CreateTimeBand(img):\n return maskClouds(img).byte().addBands(img.metadata('system:time_start'))\n\n if landsat == 'l7':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L7)\n l7 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l7Composite = l7.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l7Composite.getMapId({\n 'min': '0,0,0',\n 'max': '255,255,255',\n 'bands': 'B4,B3,B2',\n })\n if landsat == 'l8':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L8)\n l8 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l8Composite = l8.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l8Composite.getMapId({\n 'min': '0',\n 'max': '0.4',\n 'bands': 'B4,B3,B2',\n })", "def _create_img_id_to_idx(self):\n with h5py.File(self.image_features_path, 'r') as features_file:\n coco_ids = features_file['ids'][()]\n coco_id_to_index = {id: i for i, id in enumerate(coco_ids)}\n return coco_id_to_index", "def postalcode_area_studies():\n dfpawnshop = pd.read_csv(pawnmtl.csv)\n cpdic = getPostalCodeDic()\n for ik in cpdic.keys():\n print ik, cpdic[ik]", "def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result", "def get_mapu_kanala_ID_OPIS(self):\n out = {}\n for kanal in self.sviKanali:\n out[kanal] = self.get_datastore(kanal).koncentracija.opis\n return out", "def getTopPopulationRegion(self):\n\t\tdata = {}\n\t\tfor iProvince in range(con.iNumRegions):\n\t\t\tdata[iProvince] = 0\n\t\tfor iLoopPlayer in range(con.iBarbarian + 1):\n\t\t\tapCityList = PyPlayer(iLoopPlayer).getCityList()\n\t\t\tfor pCity in apCityList:\n\t\t\t\tdata[pCity.GetCy().plot().getRegionID()] += pCity.getPopulation()\n\t\tkey = -1\n\t\tfor key, value in sorted(data.iteritems(), key=lambda (k,v): (v,k)):\n\t\t\tpass\n\t\treturn key", "def showId(self):\n #Here I'm supposing that the name of the table, and the extent polygon gives a unique mapping.\n try:\n extent = self.biomeGeometry.extent\n name = \"tax\"\n res = self.biomeGeometry.area\n string = \"%s-%s:%s:%s\" %(name,self.gid,extent,res)\n return string\n except:\n logger.error(\"[biospatial.gbif.taxonomy.GriddedTaxonomy] \\n The total geometry area has not been defined. Try running mergeGeometries first\")\n raise Exception(\"Geometry Extent has not been instantiated\")\n return None", "def make_sector_map(self, data):\n\n def format_county_fips(cf):\n\n cf = str(cf)\n\n if len(cf)<=4:\n\n cf = '0'+cf\n\n return cf\n\n data['COUNTY_FIPS'] = data.COUNTY_FIPS.apply(\n lambda x: format_county_fips(x)\n )\n\n # match on geo_id\n map_data = self.cshp.set_index('GEOID').join(\n data.set_index('COUNTY_FIPS').MMBtu\n )\n\n ## Need to specify colors or will geopandas automatcially assign?", "def east_asia_pacific_countries():\r\n east_asia_pacific_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in east_asia_pacific:\r\n east_asia_pacific_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in east_asia_pacific_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def basic_crop(data):\n return data['crop'];", "def region_of_province(province_in: str) -> str:\n region = None\n for r in ITALY_MAP:\n for p in ITALY_MAP[r]:\n if province_in == p:\n region = r\n return region", "def cen_region_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cen_region_id\")", "def _computeCoaddExposureId(self, dataId, singleFilter):\n\n tract = int(dataId['tract'])\n if tract < 0 or tract >= 2**SuperBITMapper._nbit_tract:\n raise RuntimeError('tract not in range [0,%d)' % (2**SuperBITMapper._nbit_tract))\n patchX, patchY = [int(patch) for patch in dataId['patch'].split(',')]\n for p in (patchX, patchY):\n if p < 0 or p >= 2**SuperBITMapper._nbit_patch:\n raise RuntimeError('patch component not in range [0, %d)' % 2**SuperBITMapper._nbit_patch)\n oid = (((tract << SuperBITMapper._nbit_patch) + patchX) << SuperBITMapper._nbit_patch) + patchY\n if singleFilter:\n return (oid << SuperBITMapper._nbit_filter) + afwImage.Filter(dataId['filter']).getId()\n return oid", "def filter_plants_by_region_id(region_id, year, host='localhost', area=0.5):\n\n state_dict = {\n 'Alabama':'AL',\n 'Alaska':'AK',\n 'Arizona':'AZ',\n 'Arkansas':'AR',\n 'California':'CA',\n 'Colorado':'CO',\n 'Connecticut':'CT',\n 'Delaware':'DE',\n 'Florida':'FL',\n 'Georgia':'GA',\n 'Hawaii':'HI',\n 'Idaho':'ID',\n 'Illinois':'IL',\n 'Indiana':'IN',\n 'Iowa':'IA',\n 'Kansas':'KS',\n 'Kentucky':'KY',\n 'Louisiana':'LA',\n 'Maine':'ME',\n 'Maryland':'MD',\n 'Massachusetts':'MA',\n 'Michigan':'MI',\n 'Minnesota':'MN',\n 'Mississippi':'MS',\n 'Missouri':'MO',\n 'Montana':'MT',\n 'Nebraska':'NE',\n 'Nevada':'NV',\n 'New Hampshire':'NH',\n 'New Jersey':'NJ',\n 'New Mexico':'NM',\n 'New York':'NY',\n 'North Carolina':'NC',\n 'North Dakota':'ND',\n 'Ohio':'OH',\n 'Oklahoma':'OK',\n 'Oregon':'OR',\n 'Pennsylvania':'PA',\n 'Rhode Island':'RI',\n 'South Carolina':'SC',\n 'South Dakota':'SD',\n 'Tennessee':'TN',\n 'Texas':'TX',\n 'Utah':'UT',\n 'Vermont':'VT',\n 'Virginia':'VA',\n 'Washington':'WA',\n 'West Virginia':'WV',\n 'Wisconsin':'WI',\n 'Wyoming':'WY'\n }\n\n print \"Getting region name from database...\"\n query = \"SELECT regionabr FROM ventyx_nerc_reg_region WHERE gid={}\".format(\n region_id)\n region_name = connect_to_db_and_run_query(query=query,\n database='switch_gis', host=host)['regionabr'][0]\n counties_path = os.path.join('other_data', '{}_counties.tab'.format(region_name))\n \n if not os.path.exists(counties_path):\n # assign county if (area)% or more of its area falls in the region\n query = \"SELECT name, state\\\n FROM ventyx_nerc_reg_region regions CROSS JOIN us_counties cts\\\n JOIN (SELECT DISTINCT state, state_fips FROM us_states) sts \\\n ON (sts.state_fips=cts.statefp) \\\n WHERE regions.gid={} AND\\\n ST_Area(ST_Intersection(cts.the_geom, regions.the_geom))/\\\n ST_Area(cts.the_geom)>={}\".format(region_id, area)\n print \"\\nGetting counties and states for the region from database...\"\n region_counties = pd.DataFrame(connect_to_db_and_run_query(query=query,\n database='switch_gis', host=host)).rename(columns={'name':'County','state':'State'})\n region_counties.replace(state_dict, inplace=True)\n region_counties.to_csv(counties_path, sep='\\t', index=False)\n else:\n print \"Reading counties from .tab file...\"\n region_counties = pd.read_csv(counties_path, sep='\\t', index_col=None)\n\n generators = pd.read_csv(\n os.path.join('processed_data','generation_projects_{}.tab'.format(year)), sep='\\t')\n generators.loc[:,'County'] = generators['County'].map(lambda c: str(c).title())\n\n print \"\\nRead in data for {} generators, of which:\".format(len(generators))\n print \"--{} are existing\".format(len(generators[generators['Operational Status']=='Operable']))\n print \"--{} are proposed\".format(len(generators[generators['Operational Status']=='Proposed']))\n\n generators_with_assigned_region = generators.loc[generators['Nerc Region'] == region_name]\n generators = generators[generators['Nerc Region'].isnull()]\n generators_without_assigned_region = pd.merge(generators, region_counties, how='inner', on=['County','State'])\n generators = pd.concat([\n generators_with_assigned_region,\n generators_without_assigned_region],\n axis=0)\n generators.replace(\n to_replace={'Energy Source':coal_codes, 'Energy Source 2':coal_codes,\n 'Energy Source 3':coal_codes}, value='COAL', inplace=True)\n generators_columns = list(generators.columns)\n\n existing_gens = generators[generators['Operational Status']=='Operable']\n proposed_gens = generators[generators['Operational Status']=='Proposed']\n\n print \"=======\"\n print \"Filtered to {} projects in the {} region, of which:\".format(\n len(generators), region_name)\n print \"--{} are existing with {:.0f} GW of capacity\".format(\n len(existing_gens), existing_gens['Nameplate Capacity (MW)'].sum()/1000.0)\n print \"--{} are proposed with {:.0f} GW of capacity\".format(\n len(proposed_gens), proposed_gens['Nameplate Capacity (MW)'].sum()/1000.0)\n print \"=======\"\n\n return generators", "def idpac(self):\n return self._idpac", "def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping", "def derive_id(row):\n fips = row.get('fips')\n if len(fips) == 2:\n # if row has fips length 2, then it is a state, and the id is state_abbr\n fid = row['state_abbr']\n elif re.match(r'\\d{5}', fips):\n # if row belongs to a county, then id is fips\n fid = fips\n elif fips == \"\":\n # if no fips, then we make up an id\n fid = f'99999-{row[\"state_abbr\"]}-{row[\"county\"]}'\n else: # this shouldn't happen\n import pdb; pdb.set_trace(); raise\n return fid", "def country_id(self):\n return self._country_id", "def country_id(self):\n return self._country_id", "def covariate_to_index(self):\n covariate_df = self.dismod_file.covariate\n return dict(covariate_df[[\"covariate_name\", \"covariate_id\"]].to_records(index=False))", "def tileIDfromCelestialCoordinates(ra, dec, nside, units='degrees'):\n return pixelsForAng(lon=ra, lat=dec, nside=nside, unit=units)", "def get_region_id(region_name):\n region_id = None\n all_region = api_get('region')\n if all_region.get('status') == 200:\n region_data = all_region.get('result')\n for region in region_data:\n if region_data[region].get('name') == region_name:\n region_id = region_data[region].get('DCID')\n return region_id", "def load_country_code_data():\n name_conversion = {\n 'East Timor': 'Timor-Leste',\n 'Republic of the Congo': 'Congo (Kinshasa)',\n 'Ivory Coast': 'Cote d\\'Ivoire',\n 'Macedonia': 'North Macedonia',\n 'Myanmar': 'Burma',\n 'Republic of Serbia': 'Serbia',\n 'Taiwan': 'Taiwan*',\n 'The Bahamas': 'Bahamas',\n 'United Republic of Tanzania': 'Tanzania',\n 'United States of America': 'US'\n }\n\n shapefile = os.path.join('data', 'ne_110m_admin_0_countries.shp')\n\n gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]\n gdf.columns = ['country', 'country_code', 'geometry']\n\n gdf.loc[gdf['country'].isin(name_conversion.keys()), 'country'] = gdf['country'].map(name_conversion)\n\n return gdf", "def remap_ids(self, id_map: Dict[int, int]) -> None:", "def get_mapu_kanala_ID_JEDINICA(self):\n out = {}\n for kanal in self.sviKanali:\n out[kanal] = self.get_datastore(kanal).koncentracija.jedinica\n return out", "def map_category_id(category_map):\n category_id = {}\n id_category = {}\n counter = 0\n for category in category_map:\n category_id[category['name']] = counter\n id_category[counter] = category['name']\n counter += 1\n return category_id, id_category", "def geofind():\n return render_template('geo_find.html')" ]
[ "0.77784413", "0.776725", "0.5600029", "0.54075384", "0.5386751", "0.52513653", "0.521203", "0.5181957", "0.5152261", "0.5145004", "0.51417464", "0.5124304", "0.5023551", "0.5006278", "0.49717915", "0.4945458", "0.4932558", "0.49104485", "0.49027997", "0.48968828", "0.4886672", "0.4886672", "0.48576367", "0.48542482", "0.48492536", "0.4834171", "0.48288077", "0.48233607", "0.48066604", "0.4799764" ]
0.85902876
0
Query oVirt for hosts and place them in env.hosts
def query(oquery='', sure='no', ovirt=None): hosts = oVirtObjectType.all_types['host'].query(ovirt, oquery) env.hosts = [host.address for host in hosts] puts(yellow( "Got %d hosts: \n\t" % len(env.hosts) + '\n\t'.join(env.hosts) )) if sure != 'yes' and not env.parallel: if prompt('Is what you expected? y|n', default='y').lower() == 'n': abort('Ended by user request.') return hosts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_hosts(self):\n ...", "def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)", "def iter_hosts():\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host", "def get_hosts(self):\n\n raise NotImplementedError", "def Hosts(self):\n if not self._hosts:\n hs = self._get_objects(vim.HostSystem)\n for h in hs:\n self._hosts[h.name] = h\n return self._hosts", "def host_list(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n ret = {}\n session = _get_session()\n hosts = session.xenapi.host.get_all()\n for host in hosts:\n host_record = session.xenapi.host.get_record(host)\n ret[host_record[\"name_label\"]] = host_record\n return ret", "def set_hosts(hostfile='allhosts'):\n\n remote_servers = []\n\n file = open(hostfile, 'r')\n for line in file.readlines():\n remote_servers.append(line.strip('\\r\\n'))\n\n env.hosts = remote_servers", "def get_all_hosts(self, view='summary'):\n return self._get(endpoint='{}/hosts'.format(self.api_version),\n params=dict(view=view)).json()", "def staging():\n env.hosts = ['staging.example.com']", "def getHosts(self):\n raise \"not implemented\"", "async def establish_hosts(self):\n scheme = self._config['scheme']\n hosts = self._config['hosts']\n port = self._config['port']\n for hostname in hosts:\n url = '{}://{}:{}/gremlin'.format(scheme, hostname, port)\n host = await driver.GremlinServer.open(\n url, self._loop, **dict(self._config))\n self._hosts.append(host)\n self._hostmap[hostname] = host", "def get_all_hosts(self, view='summary'):\n return self.api_client.get_all_hosts(view=view)['items']", "def hosts(self):\n\n return self._get_list_field(\"hosts\", lambda x: HostSettingContext(x))", "def hosts(self):\n\n return self._get_list_field(\"hosts\", lambda x: HostSettingContext(x))", "def inner():\n hoststrings = []\n if env.key_filename == None: env.key_filename = []\n for host in host_dicts:\n hostname = host.get('hostname', '')\n user = host.get('user', '')\n port = host.get('port', '')\n hoststring = '%s%s%s' % (user and user + '@',\n hostname,\n port and ':' + str(port),\n )\n hoststrings.append(hoststring)\n key_filename = host.get('key_filename')\n if key_filename:\n env.key_filename.append(key_filename)\n env.hosts = hoststrings", "def hosts(self, hosts):\n return self._set_list_field(\"hosts\", hosts)", "def index(self, req):\n LOG.info(\"List all the nova-compute hosts in the system\")\n ctxt = req.environ['nova.context']\n authorize(ctxt)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n services = dbapi.service_get_all_compute_sorted(ctxt)\n # services looks like (Service(object), Decimal('0'))\n # must convert from Decimal('0') to int() because no JSON repr\n hosts = [{'name':srv[0].host,\n 'instanceCount':int(srv[1])}\n for srv in services]\n return {'hosts': hosts}", "def set_hosts(self, hypervisor_per_cluster=False):\n\n self.conf['hosts'] = set()\n\n host_patterns, host_others = self._sift_patterns(\n self.conf.get('hosts_list')\n )\n datacenter_patterns = self.conf.get('datacenter', [])\n cluster_patterns = self.conf.get('cluster', [])\n\n if host_patterns:\n self.conf['host_pattern'] = host_patterns\n\n self.conf['hosts'] = self._get_hypervisors_from_api()\n # Filter all host specified with -H\n host_filtered = set()\n if host_others:\n host_filtered = set([\n (dc, cl, h, is_spm, is_up)\n for dc, cl, h, is_spm, is_up in self.conf['hosts']\n if h in host_others\n ])\n not_found = host_others - set(host[2] for host in host_filtered)\n if not_found != set():\n # try to resolve to ip specified hosts\n for fqdn in set(not_found):\n try:\n ipaddr = socket.gethostbyname(fqdn)\n logging.debug('%s --> %s' % (fqdn, ipaddr))\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n if h == ipaddr:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(fqdn)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=fqdn,\n )\n )\n if not_found != set():\n # try to resolve to ip known hypervisors\n for (dc, cl, h, is_spm, is_up) in self.conf['hosts']:\n try:\n ipaddr = socket.gethostbyname(h)\n logging.debug('%s --> %s' % (h, ipaddr))\n if ipaddr in host_others:\n host_filtered.add((dc, cl, h, is_spm, is_up))\n not_found.remove(ipaddr)\n except socket.error:\n logging.warning(\n _('Cannot resolve {host}').format(\n host=h,\n )\n )\n if not_found != set():\n logging.error(\n _(\n 'The following host are not listed as hypervisors: '\n '{not_listed}. Known hypervisors can be listed using '\n 'the list command'\n ).format(\n not_listed=','.join(not_found)\n )\n )\n sys.exit(ExitCodes.CRITICAL)\n\n orig_hosts = self.conf['hosts'].copy()\n\n if host_patterns:\n for pattern in host_patterns:\n host_filtered |= self._filter_hosts('host', pattern)\n if host_patterns or host_others:\n self.conf['hosts'] &= host_filtered\n\n # Intersect with hosts belonging to the data centers specified with -d\n if datacenter_patterns:\n datacenter_filtered = set()\n for pattern in datacenter_patterns:\n datacenter_filtered |= self._filter_hosts(\n 'datacenter', pattern\n )\n self.conf['hosts'] &= datacenter_filtered\n\n # Intersect with hosts belonging to the clusters specified with -c\n if cluster_patterns:\n # remove all hosts that don't match the patterns\n cluster_filtered = set()\n for pattern in cluster_patterns:\n cluster_filtered |= self._filter_hosts('cluster', pattern)\n self.conf['hosts'] &= cluster_filtered\n\n # If hypervisor_per_cluster is set, collect data only from a single\n # hypervisor per cluster; if the Spm found, collect data from it.\n if hypervisor_per_cluster:\n selected_hosts = dict()\n for dc, cluster, host, is_spm, is_up in self.conf['hosts']:\n # Always add the SPM\n if is_spm:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # For the given cluster, if no host added yet, add it\n elif cluster.name not in selected_hosts:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n # If a host is up and the SPM isn't added yet, add this host\n elif is_up and not selected_hosts[cluster.name][3]:\n selected_hosts[cluster.name] = (dc, cluster, host, is_spm,\n is_up)\n self.conf['hosts'] &= set(selected_hosts.values())\n\n # warn users if they are going to collect logs from all hosts.\n if orig_hosts and self.conf['hosts'] == orig_hosts:\n logging.warning(\n _(\n 'This ovirt-log-collector call will collect logs from '\n 'all available hosts. This may take long time, '\n 'depending on the size of your deployment'\n )\n )\n\n return bool(self.conf.get('hosts'))", "def get_hosts(self):\n\n hosts = self.client.service.getHosts()\n return hosts", "def hosts(self) -> t.List[str]:\n if not self._hosts:\n self._hosts = self._get_db_hosts()\n return self._hosts", "def host_list(self):\n try:\n scode, hosts = Rest.get('Host')\n except Exception as e:\n Console.error(e.message)\n return\n if len(hosts) == 0:\n print(\"No hosts exist\")\n return\n\n n = 1\n e = {}\n for host in hosts:\n d = {}\n d['Ip'] = str(host['Ip'])\n d['Name'] = str(host['Name'])\n d['Port'] = str(host['Port'])\n d['Swarmmode'] = str(host['Swarmmode'])\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Name', 'Port', 'Swarmmode'])))", "def getHosts(**options):\n return search.HostSearch.byOptions(**options)", "def hosts(self):\n return self._hosts", "def hosts(self):\n return self._hosts", "def get_allhosts():\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT hostname from {}'.format(tablename)\n output = pandas.read_sql_query(query, connection).to_json(orient='records')\n\n for host in json.loads(output):\n yield host[\"hostname\"]", "def add_hosts(self, hosts):\n for host in hosts:\n if host not in self.__hosts__:\n self.__hosts__.append(KnownHostsHost(host))", "def hosts(self, hosts):\n self._hosts = hosts", "def qhost():\n command = '%s -xml -q' % QHOST_PATH\n result_xml = subprocess.check_output([command], env=ENV, shell=True)\n hosts_element = xml.etree.ElementTree.fromstring(result_xml)\n hosts = []\n for host_element in hosts_element:\n if host_element.get('name') == 'global':\n continue\n host = {\n 'name': host_element.get('name')\n }\n queues = {}\n for host_value in host_element:\n if host_value.tag == 'hostvalue':\n host[host_value.get('name')] = host_value.text\n elif host_value.tag == 'queue':\n queue_name = host_value.get('name')\n queue = {}\n for queue_value in host_value:\n queue[queue_value.get('name')] = queue_value.text\n queues[queue_name] = queue\n host['queues'] = queues\n hosts.append(host)\n return hosts", "def get_all_hosts_puppetdb():\n\n puppetdb_api_url = config['puppetdb_api_url']\n puppetdb_certfile = config.get('puppetdb_certfile', None)\n puppetdb_keyfile = config.get('puppetdb_keyfile', None)\n puppetdb_cafile = config.get('puppetdb_cafile', None)\n\n # query to match only puppet hosts with Check_mk::Agent class\n query = {\n 'query': ['=', 'type', 'Check_mk::Agent'],\n }\n\n r = requests.post(puppetdb_api_url, json=query,\n cert=(puppetdb_certfile, puppetdb_keyfile), verify=puppetdb_cafile)\n\n hosts = {}\n for res in r.json():\n tags = res['tags']\n hostname = res['certname']\n host_environment = res['environment']\n for tag in res['tags']:\n if tag.startswith('roles::') or tag.startswith('role::'):\n host_role = tag.split('::')[1]\n hosts[hostname] = { 'puppet_environment': host_environment,\n 'puppet_role': host_role }\n\n logging.info('got %s hosts from puppetdb', len(hosts))\n\n return hosts", "def hosts(self) -> dict:\n return self._hosts" ]
[ "0.71519107", "0.68191886", "0.6781512", "0.6779192", "0.67747766", "0.6615469", "0.6611653", "0.65831035", "0.6563107", "0.6526407", "0.6524256", "0.6518464", "0.6516803", "0.6516803", "0.65129817", "0.6509997", "0.6502699", "0.6498704", "0.6484243", "0.6476118", "0.64731044", "0.64551276", "0.6431265", "0.6431265", "0.6423799", "0.6385826", "0.6368936", "0.63364065", "0.6291055", "0.62894595" ]
0.70534045
1
List walks, or create a new walks.
def walk_list_api(request): if request.method == 'GET': walks = Walk.objects.filter(user=request.user).order_by("-date") serializer = WalkSerializer(walks, many=True) return Response(serializer.data) elif request.method == 'POST': serializer = WalkSerializer(data=request.data) if serializer.is_valid(): serializer.save(user=request.user) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def walk(self):\n pass", "def _generate_walks(self):\n return parallel_generate_walks(\n self.d_graph,\n self.walk_length,\n self.num_walks,\n 'Single process!',\n self.sampling_strategy,\n self.NUM_WALKS_KEY,\n self.WALK_LENGTH_KEY,\n self.NEIGHBORS_KEY,\n self.PROBABILITIES_KEY,\n self.FIRST_TRAVEL_KEY,\n self.quiet\n )", "def save_walks(self, f_name):\n # todo: delete this method\n w_file = open(f_name, 'w')\n if not self.walks:\n print(\"No walks exist.\\nYou must run perform_walks() first!\")\n return\n for walk in self.walks:\n w_len = len(walk)\n for i, node in enumerate(walk):\n w_file.write(\"%s\" % node)\n if i < (w_len - 1):\n w_file.write(\" \")\n w_file.write(\"%s\" % os.linesep)\n w_file.flush()\n w_file.close()", "def _generate_walks(self):\n\n flatten = lambda l: [item for sublist in l for item in sublist]\n\n # Split num_walks for each worker\n num_walks_lists = np.array_split(range(self.num_walks), self.workers)\n\n walk_results = Parallel(n_jobs=self.workers, temp_folder=self.temp_folder, require=self.require)(\n delayed(self.parallel_generate_walks)(self.d_graph,\n self.walk_length,\n len(num_walks),\n idx,\n self.sampling_strategy,\n self.NUM_WALKS_KEY,\n self.WALK_LENGTH_KEY,\n self.NEIGHBORS_KEY,\n self.PROBABILITIES_KEY,\n self.FIRST_TRAVEL_KEY,\n self.quiet) for\n idx, num_walks\n in enumerate(num_walks_lists, 1))\n\n walks = flatten(walk_results)\n\n return walks", "def create_list(self):\n\n if self.search_on == 'news':\n self.news()\n elif self.search_on == 'twitter':\n self.twitter()\n else:\n print(\"Invalid search method\")\n return", "def traverse(self, traverser, **kwargs):\n return traverser.list(self, **kwargs)", "def pickWalk(*args, direction: AnyStr=\"\", recurse: bool=True, type: AnyStr=\"\",\n **kwargs)->List[AnyStr]:\n pass", "def walk_single_walker(at, movement_args, Emax, KEmax):\n#DOC\n#DOC ``walk_single_walker``\n\n out = {}\n\n if movement_args['do_good_load_balance']:\n possible_moves = np.array( [ do_atom_walk,\n do_MC_cell_volume_step,\n do_MC_cell_shear_step,\n do_MC_cell_stretch_step,\n do_MC_swap_step,\n do_MC_semi_grand_step ] )\n nums = np.array( [ movement_args['n_atom_steps_n_calls'],\n movement_args['n_cell_volume_steps'],\n movement_args['n_cell_shear_steps'],\n movement_args['n_cell_stretch_steps'],\n movement_args['n_swap_steps'],\n movement_args['n_semi_grand_steps'] ] )\n costs = np.array( [ movement_args['atom_traj_len']*movement_args['atom_traj_len_cost_multiplier'],\n 1,\n 1,\n 1,\n 1,\n 1 ] )\n\n list = create_list(costs, nums, movement_args['n_model_calls'])\n for move_i in list:\n (t_n_model_calls, t_out) = possible_moves[move_i](at, movement_args, Emax, KEmax)\n accumulate_stats(out, t_out)\n\n else:\n #DOC \\item create list\n #DOC \\item do\\_atom\\_walk :math:`*` n\\_atom\\_step\\_n\\_calls\n possible_moves = ( [do_atom_walk] * movement_args['n_atom_steps_n_calls'] +\n #DOC \\item do\\_cell\\_volume\\_step :math:`*` n\\_cell\\_volume\\_steps\n [do_MC_cell_volume_step] * movement_args['n_cell_volume_steps'] + \n #DOC \\item do\\_cell\\_shear\\_step :math:`*` n\\_cell\\_shear\\_steps\n [do_MC_cell_shear_step] * movement_args['n_cell_shear_steps'] + \n #DOC \\item do\\_cell\\_stretch\\_step :math:`*` n\\_cell\\_stretch\\_steps\n [do_MC_cell_stretch_step] * movement_args['n_cell_stretch_steps'] + \n #DOC \\item do\\_swap\\_step :math:`*` n\\_swap\\_steps\n [do_MC_swap_step] * movement_args['n_swap_steps'] +\n #DOC \\item do\\_semi\\_grand\\_step :math:`*` n\\_semi\\_grand\\_steps\n [do_MC_semi_grand_step] * movement_args['n_semi_grand_steps'] )\n\n out = {}\n n_model_calls_used=0\n\n #DOC \\item loop while n\\_model\\_calls\\_used < n\\_model\\_calls\n while n_model_calls_used < movement_args['n_model_calls']:\n #DOC \\item pick random item from list\n move = possible_moves[rng.int_uniform(0,len(possible_moves))]\n #DOC \\item do move\n (t_n_model_calls, t_out) = move(at, movement_args, Emax, KEmax)\n n_model_calls_used += t_n_model_calls\n accumulate_stats(out, t_out)\n\n\n #DOC \\item perturb final energy by random\\_energy\\_perturbation\n # perturb final energy\n at.info['ns_energy'] = rand_perturb_energy(at.info['ns_energy'],ns_args['random_energy_perturbation'],Emax)\n\n #DEBUG print \"walk_single_walker end \", eval_energy(at, do_PE=False), eval_energy(at) #DEBUG\n\n return out", "def add_walk(self, points):\r\n for i in range(0, len(self.walk), 2):\r\n if i + 1 != len(self.walk):\r\n if self.walk[i+1] is None:\r\n points.append(None)\r\n points.append(self.walk[i])", "def walk(self, top, followlinks=False):\r\n try:\r\n names = self.listdir(top)\r\n except os.error:\r\n return\r\n\r\n items = []\r\n for name in names:\r\n items.append(name)\r\n\r\n yield top, items\r\n\r\n for name in items:\r\n new_path = os.path.join(top, name)\r\n if followlinks or not self.islink(new_path):\r\n for x in self.walk(new_path, followlinks):\r\n yield x", "def simulate_walks(self, edge_type, num_walks, walk_length, schema=None):\n walks = []\n nodes = list(range(0, self.graph[edge_type].num_nodes))\n\n for walk_iter in tqdm.tqdm(range(num_walks)):\n random.shuffle(nodes)\n for node in nodes:\n walk = self.graph[edge_type].random_walk(\n [node], max_depth=walk_length - 1)\n for i in range(len(walk)):\n walks.append(walk[i])\n\n return walks", "def generate_walks(self):\n all_walks = {}\n for e_type in self.edge_types:\n layer_walks = self.simulate_walks(\n edge_type=e_type,\n num_walks=self.config['num_walks'],\n walk_length=self.config['walk_length'])\n\n all_walks[e_type] = layer_walks\n\n return all_walks", "def list(\n self,\n name,\n ):\n pass", "def use_listwalker(self, listwalker):\n self.body.contents[:] = listwalker\n return", "def extract_walks(self, kg: KG, entity: Vertex) -> List[Walk]:\n if self.max_walks is None:\n fct_search = self._bfs\n else:\n fct_search = self._dfs\n if self.with_reverse:\n return [\n r_walk[:-1] + walk\n for walk in fct_search(kg, entity)\n for r_walk in fct_search(kg, entity, is_reverse=True)\n ]\n return [walk for walk in fct_search(kg, entity)]", "def _helpWalk(syn, synId, includeTypes, newpath=None):\n starting = syn.get(synId, downloadFile=False)\n # If the first file is not a container, return immediately\n if newpath is None and not is_container(starting):\n return\n elif newpath is None:\n dirpath = (starting[\"name\"], synId)\n else:\n dirpath = (newpath, synId)\n dirs = []\n nondirs = []\n results = syn.getChildren(synId, includeTypes)\n for i in results:\n if is_container(i):\n dirs.append((i[\"name\"], i[\"id\"]))\n else:\n nondirs.append((i[\"name\"], i[\"id\"]))\n yield dirpath, dirs, nondirs\n for name in dirs:\n # The directory path for each os.walk() result needs to be built up\n # This is why newpath is passed in\n newpath = os.path.join(dirpath[0], name[0])\n for x in _helpWalk(syn, name[1], includeTypes, newpath=newpath):\n yield x", "def new():\n list_new()", "def list(self, *args):\n return []", "def main_list(args):\n return list_commands(args.directory)", "def list():", "def list():", "def walk(self): # FileObj.walk\n yield self", "def get_word2vec_from_walks(\n walks: Walks,\n word2vec_parameters: Optional[Word2VecParameters] = None,\n) -> Word2Vec:\n if word2vec_parameters is None:\n word2vec_parameters = Word2VecParameters()\n\n # the docs lie, it actually needs this data structure\n walks = [list(walk) for walk in walks]\n\n return Word2Vec(\n sentences=walks,\n size=word2vec_parameters.size,\n window=word2vec_parameters.window,\n min_count=word2vec_parameters.min_count,\n sg=word2vec_parameters.sg,\n hs=word2vec_parameters.hs,\n workers=word2vec_parameters.workers,\n )", "def walk(self, members, callables):\n answer = [self._answer(members, callables)]\n for item in self.items:\n if isinstance(item, Pulse):\n answer.append(item._answer(members, callables))\n else:\n answer.append(item.walk(members, callables))\n\n return answer", "def walk(self, members, callables):\n answer = [self._answer(members, callables),\n self.context._answer(members, callables)]\n for item in self.items:\n if isinstance(item, Pulse):\n answer.append(item._answer(members, callables))\n else:\n answer.append(item.walk(members, callables))\n\n return answer", "def showMonsterWalkPath(data_list):\n\tpos = [0,0]\n\tfor i in range(len(data_list)):\n\t\tif i == 0:\n\t\t\tpos = copy.copy(data_list[i])\n\t\t\tgameDisplay.blit(monster_walk, pos)\n\t\telse:\n\t\t\tmonster_move = False\n\t\t\tnum_cal = [1,0,0]\n\t\t\tdx = (data_list[i][0] - pos[0])/50\n\t\t\tdy = (data_list[i][1] - pos[1])/50\n\t\t\tif dx < 0 or dy < 0:\n\t\t\t\tnum_cal[0] = -1\n\t\t\tif dx != 0:\n\t\t\t\tmonster_move = True\n\t\t\t\tnum_cal[1] = 1\n\t\t\telif dy != 0:\n\t\t\t\tmonster_move = True\n\t\t\t\tnum_cal[2] = 1\n\t\t\tif monster_move:\n\t\t\t\tfor t in range(abs(dx+dy)):\n\t\t\t\t\tpos[0] += num_cal[0]*num_cal[1]*50\n\t\t\t\t\tpos[1] += num_cal[0]*num_cal[2]*50\n\t\t\t\t\tgameDisplay.blit(monster_walk, pos)", "def get_random_walkers(self, count):\n available_walkers = [w for w in self.read_all()\n if w.get_availability()]\n\n if len(available_walkers) < count:\n raise NotEnoughWalkersException\n elif len(available_walkers) == count:\n return available_walkers\n\n random_walkers = list()\n for i in count:\n random_walkers.append(available_walkers[\n randint(0, len(available_walkers) - 1)])\n\n return random_walkers", "def cmd_list(args):", "def sim_walks(num_steps, num_trials, d_class):\n Homer = d_class()\n origin = Location(0, 0)\n distances = []\n for t in range(num_trials):\n f = Field()\n f.add_drunk(Homer, origin)\n distances.append(round(walk(f, Homer, num_trials), 1))\n return distances", "def get_walks(graph, num_walks, walk_length, matrix, p, q, use_multiprocessing: bool = True, ):\n\n nodes = list(graph.nodes())\n\n shuffled_nodes = random.sample(nodes*num_walks,len(nodes)*num_walks)\n partial_get_walk = partial(_get_walk, graph=graph, walk_length=walk_length, matrix=matrix, p=p, q=q)\n if use_multiprocessing:\n with Pool(cpu_count()) as p:\n logger.warning(f'Use multiprocessing on {cpu_count()} cores')\n chunksize=len(shuffled_nodes)//cpu_count()\n walks = p.map(partial_get_walk, shuffled_nodes,chunksize=chunksize)\n\n else:\n walks = []\n for node in nodes:\n walks.append(partial_get_walk(node))\n\n return walks" ]
[ "0.5646791", "0.5542716", "0.55225664", "0.5464238", "0.54215646", "0.528533", "0.52653676", "0.511849", "0.5058529", "0.5012877", "0.4977857", "0.49737927", "0.4950848", "0.4932711", "0.4876913", "0.48244464", "0.48208708", "0.4789596", "0.4759153", "0.47326207", "0.47326207", "0.4714558", "0.4707602", "0.47047994", "0.46795094", "0.46626455", "0.46483648", "0.4646683", "0.4642788", "0.46394542" ]
0.56819975
0
Python script to read information from a public API returns employess an their completed tasks
def main(): number = sys.argv[1] url_user = "https://jsonplaceholder.typicode.com/users/{}".format(number) url_tasks = ("https://jsonplaceholder.typicode.com/users/{}/todos". format(number)) response = requests.get(url_tasks) tasks = response.json() user_info = requests.get(url_user).json() employee_name = user_info["name"] list_of_done_tasks = [x for x in tasks if x['completed']] number_of_done_tasks = len(list_of_done_tasks) total_task_number = len(tasks) print("Employee {} is done with tasks({}/{}):".format(employee_name, number_of_done_tasks, total_task_number)) for task in list_of_done_tasks: print("\t {}".format(task["title"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTasks(server, appId, maxNumberTasks, completedOnly, oper = 0, fileName = 'data/jsonTasksInfo.dat'):\n if oper == 0:\n if completedOnly == 1:\n JSONdata = urllib2.urlopen(url=server+\"/api/task?app_id=\"+ \\\n str(appId)+\"&state=completed&limit=\"+ \\\n str(maxNumberTasks)).read()\n else:\n JSONdata = urllib2.urlopen(url=server+\"/api/task?app_id=\"+ \\\n str(appId)+\"&limit=\"+str(maxNumberTasks)).read()\n data = json.loads(JSONdata)\n with open(fileName,'w') as outfile:\n json.dump(data, outfile)\n outfile.close()\n elif oper == 1:\n with open(fileName,'r') as outfile:\n data = json.load(outfile)\n outfile.close()\n numberTasks = len(data)\n tasksInfo = []\n for item in range(numberTasks):\n tasksInfo.append({'taskId':data[item]['id'], \\\n 'area':data[item]['info']['tile']['restrictedExtent']})\n print 'number of total completed tasks: ', len(tasksInfo)\n return tasksInfo", "def test_get(self):\n task_types = [1, 2]\n\n for task_type in task_types:\n self.john_gamer.tasks.start(task_type)\n\n self.client.force_login(self.john)\n resp = self.client.get(self.URL)\n\n self.assertListEqual(\n resp.json(),\n ['Type: 1, time left: 42s', 'Type: 2, time left: 42s'],\n \"Gamer can't get list of task via API!\"\n )", "def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE", "def task_parse_results():\n pass", "def request_from_api(_id):\n # get employee name\n employee_name = requests.get(\n 'https://jsonplaceholder.typicode.com/users/' + _id).json()['username']\n\n # get employee info\n info = requests.get(\n 'https://jsonplaceholder.typicode.com/todos?userId=' + _id).json()\n # Write to csv\n j_output = {_id: [{\n \"task\": task.get(\"title\"),\n \"completed\": task.get(\"completed\"),\n \"username\": employee_name\n } for task in info]}\n # write json\n with open('{}.json'.format(_id), 'w') as json_file:\n json_file.write(json.dumps(j_output))", "def task1():\n logger.info(\"In API3 task1 function\")\n return \"task1 success!\"", "def test_get_task(self):\n resp = self.app.get('/api/2/inf/esrs',\n headers={'X-Auth': self.token})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def getResults(server, tasksInfo, maxNumberAnswers, oper = 0, fileName1 = 'data/jsonAnswersInfo.dat', fileName2 = 'data/jsonUsableInfo.dat'):\n #~ usableData = open('usableData.dat', 'w')\n answersApp = []\n usableTasks = []\n numberTasks = len(tasksInfo)\n if oper == 0:\n answerIdx = 0\n #~ for item, number in enumerate(tasksInfo):\n for item in range(numberTasks):\n JSONdata = urllib2.urlopen(url=server+\"/api/taskrun?task_id=\"+ \\\n str(tasksInfo[item]['taskId'])+\"&limit=\"+ \\\n str(maxNumberAnswers)).read()\n data = json.loads(JSONdata)\n lenData = len(data)\n #HARDCODE BEGINS - Testing the obtaining of an exact number of answers\n if (lenData < 0):\n # If there are less answers, we pop the item out!\n #~ trash = tasksInfo.pop(item)\n continue\n else:\n print \"Task \" + str(tasksInfo[item]['taskId']) + \" has \" + str(lenData) + \" answers. NICE! :-)\\n\"\n usableTasks.append(tasksInfo[item])\n #HARDCODE MIDDLE\n #~ usableData.write(str(tasksInfo[item]['taskId'])+\" \"+str(tasksInfo[item]['area'])+\"\\n\")\n answersApp.append([])\n for ans in range(lenData):\n answersApp[answerIdx].append({'taskId':data[ans]['task_id'], \\\n 'id':data[ans]['id'], 'answer':data[ans]['info']['besttile']})\n answerIdx = answerIdx + 1\n #HARDCODE END\n with open(fileName1,'w') as outfile:\n json.dump(answersApp, outfile)\n outfile.close()\n with open(fileName2,'w') as outfile:\n json.dump(usableTasks, outfile)\n outfile.close()\n elif oper == 1:\n with open(fileName1,'r') as outfile:\n answersApp = json.load(outfile)\n outfile.close()\n with open(fileName2,'r') as outfile:\n usableTasks = json.load(outfile)\n outfile.close()\n print 'number of tasks: ', len(tasksInfo)\n print 'number of usable tasks: ', len(usableTasks)\n print 'number of usable answers: ', len(answersApp)\n #~ usableData.close()\n #~ exit(1)\n return (usableTasks, answersApp)", "def get_task_info(self):\n\n print()\n employee_name = self.task.get_employee_name()\n task_name = self.task.get_task_name()\n mins = self.task.get_time_spent()\n notes = self.task.get_notes()\n date = self.task.get_date()\n\n task = {\n 'employee_name': employee_name,\n 'task_name': task_name,\n 'mins': mins,\n 'notes': notes,\n 'date': date\n }\n\n return task", "def main():\n url = 'https://jsonplaceholder.typicode.com'\n user = '{}/users/'.format(url)\n todos = '{}/todos/'.format(url)\n\n # GET info from URLs\n res = requests.get(user)\n info = res.json()\n tasks = requests.get(todos)\n todo = tasks.json()\n file_name = 'todo_all_employess.json'\n\n user_id = {}\n user_name = {}\n\n for user in info:\n id = user.get('id')\n user_id[id] = []\n user_name[id] = user.get('username')\n\n for task in todo:\n task_id = {}\n id = task.get('userId')\n task_id = {'username': user_name.get(id), 'task': task.get('title'),\n 'completed': task.get('completed')}\n user_id.get(id).append(task_id)\n\n with open(file_name, 'w') as filename:\n json.dump(user_id, filename)", "def get(self, dnzo_user, task_list):\n self.json_response(task_list=task_list.to_dict())", "def instructor_task_status(request):\r\n output = {}\r\n if 'task_id' in request.REQUEST:\r\n task_id = request.REQUEST['task_id']\r\n output = _get_instructor_task_status(task_id)\r\n elif 'task_ids[]' in request.REQUEST:\r\n tasks = request.REQUEST.getlist('task_ids[]')\r\n for task_id in tasks:\r\n task_output = _get_instructor_task_status(task_id)\r\n if task_output is not None:\r\n output[task_id] = task_output\r\n\r\n return HttpResponse(json.dumps(output, indent=4))", "def test_my_tasks(self):\n url, parsed = self.prepare_urls(\n 'v1:activity-my-tasks', subdomain=self.company.subdomain)\n \n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n \n content = json.loads(response.content)\n self.assertTrue(content.has_key('count'))\n self.assertTrue(content.has_key('next'))\n self.assertTrue(content.has_key('previous'))\n self.assertTrue(content.has_key('results'))", "def get_tasks():\n outbound_tasks = []\n outbound_tasks_with_due_dates = []\n creds = None\n current_path = os.path.dirname(os.path.abspath(__file__))\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n picked_token_path = current_path + '/token.pickle'\n print(picked_token_path)\n if os.path.exists(picked_token_path):\n with open(picked_token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n current_path + '/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(picked_token_path, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('tasks', 'v1', credentials=creds,\n cache=DiscoveryCache()) # https://github.com/googleapis/google-api-python-client/issues/325\n\n # Call the Tasks API\n tasks = service.tasks().list(tasklist='@default').execute()\n\n for task in tasks['items']:\n reduced = task_reducer(task)\n if reduced is not None:\n if 'due' in reduced:\n outbound_tasks_with_due_dates.append(reduced)\n else:\n outbound_tasks.append(reduced)\n\n outbound_tasks_with_due_dates.sort(key=sort_by_due_date)\n outbound_tasks[:0] = outbound_tasks_with_due_dates\n\n return outbound_tasks", "def return_tasks(data, task_type):\n ongoing = \"*Ongoing:*\\n\"\n overdue = \"*Overdue:*\\n\"\n completed = \"*Completed:*\\n\"\n ongoing_counter = 0\n overdue_counter = 0\n completed_counter = 0\n\n \"\"\"\n task[4] - task\n task[3] - status\n task[2] - assignment\n task[1] - date\n \"\"\"\n for task in data: \n if task[3] == 0:\n ongoing += task[1] + \" | \" + task[4] + \" - \" + task[2] + \"\\n\"\n ongoing_counter = 1\n if task[3] == 1:\n completed += task[1] + \" | \" + task[4] + \" - \" + task[2] + \"\\n\"\n completed_counter = 1\n if task[3] == 2:\n overdue += task[1] + \" | \" + task[4] + \" - \" + task[2] + \"\\n\"\n overdue_counter = 1\n \n if ongoing_counter == 0:\n ongoing += \"You have no ongoing tasks.\\n\"\n if overdue_counter == 0:\n overdue += \"You have no overdue tasks.\\n\"\n if completed_counter == 0:\n completed += \"You have no completed tasks.\\n\"\n\n if task_type == 'completed':\n return completed\n elif task_type == 'ongoing':\n return ongoing\n elif task_type == 'overdue':\n return overdue", "def task2():\n logger.info(\"In API3 task2 function\")\n return \"task2 success!\"", "def on_get(self, req, resp):\n try:\n task_model_list = self.state_manager.get_tasks()\n task_list = [x.to_dict() for x in task_model_list]\n resp.text = json.dumps(task_list)\n resp.status = falcon.HTTP_200\n except Exception as ex:\n self.error(\n req.context,\n \"Unknown error: %s\\n%s\" % (str(ex), traceback.format_exc()))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def list(ctx, id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n if id != None:\n return ctx.invoke(show, id=id, json=json)\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.list()\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"Fail: error response\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n try:\n task.print_list(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))", "def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)", "def get(self):\n user = getAuthData()\n question_list = list_questions()\n # user_question_list = list_questions_by_username(user['username'])\n # nonuser_question_list = list_questions_by_username(user['username'], invert=True)\n\n tasks = get_tasks().values()\n\n # filter out the SUCCESS/FAILURE tasks\n tasks = [t for t in tasks if not (t['state'] == 'SUCCESS' or t['state'] == 'FAILURE' or t['state'] == 'REVOKED')]\n\n # get question hashes\n question_tasks = {q.id:[] for q in question_list}\n for t in tasks:\n if not t['args']:\n continue\n match = re.match(r\"[\\[(]'(.*)',?[)\\]]\", t['args'])\n if not match:\n continue\n question_id = match.group(1)\n question_tasks[question_id].append(t)\n\n # split into answer and update tasks\n for t in tasks:\n t['type'] = 'answering' if t['name'] == 'manager.tasks.answer_question' else \\\n 'refreshing KG' if t['name'] == 'manager.tasks.update_kg' else \\\n 'something?'\n\n def augment_info(question):\n answerset_timestamps = [a.timestamp for a in question.answersets]\n if answerset_timestamps:\n latest_idx = answerset_timestamps.index(max(answerset_timestamps))\n latest_answerset_id = question.answersets[latest_idx].id\n latest_answerset_timestamp = question.answersets[latest_idx].timestamp\n else:\n latest_answerset_id = None\n latest_answerset_timestamp = None\n q = question.toJSON()\n q['user_email'] = question.user.email\n q.pop('user_id')\n q.pop('machine_question')\n return {'latest_answerset_id': latest_answerset_id,\n 'latest_answerset_timestamp': latest_answerset_timestamp.isoformat() if latest_answerset_timestamp else None,\n 'tasks': [t['type'] for t in question_tasks[question.id]],\n **q}\n\n return [augment_info(q) for q in question_list], 200", "def get_tasks(id):\n url = 'https://jsonplaceholder.typicode.com/'\n tasks = requests.get(url + 'todos', params={'userId': id}).json()\n return tasks", "def get_tasks(data: dict) -> dict:\n status_code = http.HTTPStatus.OK\n body = {\"filters\": data}\n try:\n tasks = actions.get_tasks(data)\n body[\"tasks\"] = [task.to_dict() for task in tasks]\n except tskexc.TaskHTTPException as e:\n body = {\"error\": e.message}\n status_code = e.http_status\n return {\"statusCode\": status_code, \"body\": json.dumps(body)}", "async def get_task_result(task_id: TaskId):", "def job_info(url):\n for job in requests.get(url).json():\n yield job", "def get_finished_task_dicts(tasks):\n finished_task_dicts = []\n with database.engine.begin() as connection:\n for task in tasks:\n try:\n download_path = url_for('data', path=task.result)\n task_dict = dict(id=task.id, name=task.name, description=task.description,\n complete=task.complete, result=task.result, download_path=download_path, status='finished', project_id=task.project_id)\n task_dict['meta'] = json.loads(\n task.meta) if task.meta is not None else {}\n finished_task_dicts.append(task_dict)\n project = connection.execute(select([sqlalchemy.text(\n '*')]).select_from(models.projects).where(models.projects.c.project_id == task.project_id)).first()\n if project:\n task_dict['project_name'] = project['name']\n except Exception as err:\n print('exception in api.get_finished_task_dicts')\n print(err)\n return finished_task_dicts", "def show_tasks_status(user, tasks):\n employee_name = user[0]['username']\n all_tasks = tasks\n completed = 0\n title_completed_tasks = ''\n for task in all_tasks:\n if task['completed'] is True:\n completed += 1\n title_completed_tasks += '\\t ' + task['title'] + '\\n'\n print('Employee {} is done with tasks({}/{}):'\n .format(employee_name, completed, len(all_tasks)))\n print(title_completed_tasks, end='')", "def test_get_task_output(self):\n pass", "def main():\n\n parser = argparse.ArgumentParser(description='Get university tuition fee')\n parser.add_argument(\n '--wolframalpha', \n dest='wa_app_id', \n default='None',\n help='Please provide the wolfram alpha app id here'\n )\n \n parser.add_argument(\n '--input', \n dest='input_file', \n default='data/input.csv',\n help='Please provide the path to university list'\n )\n\n parser.add_argument(\n '--output', \n dest='output_file', \n default='data/output.csv',\n help='Please provide the path to university list'\n )\n \n parser.add_argument(\n '--start', \n dest='start_name', \n default='None',\n help='Start from a certain university'\n )\n parser.add_argument(\n '--sleep', \n dest='sleep_seconds', \n default='10',\n help='Time to sleep before start the next university; This is added to prevent the ip from being banned.'\n )\n\n args = parser.parse_args()\n wa_app_id = args.wa_app_id\n input_file = args.input_file\n output_file = args.output_file\n start_name = args.start_name\n sleep_seconds = int(args.sleep_seconds)\n\n print('Using wolframalpha app_id: {}'.format(wa_app_id))\n if wa_app_id == 'None':\n raise Exception('Please provide a wolfram alpha app_id using the --wolframalpha option')\n else:\n logging.debug('Using wolframalpha app_id: {}'.format(wa_app_id) )\n\n if start_name == 'None':\n start_name = None\n\n try:\n client = wolframalpha.Client(wa_app_id)\n except Exception as ee:\n raise Exception('Could not initialize wolframalpha client: {}'.format(ee) )\n\n\n try: \n uni_list_to_be_queried = _extract_universities(\n input_file, start=start_name\n )\n except Exception as ee:\n raise Exception('Could not extract list of universities: {}'.format(ee) )\n\n with open(output_file, 'a+') as fp:\n for uni in uni_list_to_be_queried:\n the_tuition = _tuition_pipe(client, uni)\n fp.write(json.dumps(the_tuition) + '\\n' )\n logging.info( \"{} - {} - {}\".format(\n datetime.datetime.now() , uni, the_tuition) \n )\n time.sleep(sleep_seconds)", "def get(self, controller, data, *args, **kwargs): \n task_manager = controller.get_task_manager()\n res = task_manager.get_all_tasks(details=True)\n resp = {\n u'task-instances':res,\n u'count':len(res)\n } \n return resp", "def test_get_us_daily_data(self):\n dag = self.dagbag.get_dag(self.dag_id)\n extract_task = dag.get_task('extract')\n resp = self.extract.getDailyUSDataFromAPI()\n self.assertIsNotNone(resp)\n self.assertEqual(type(resp), list)" ]
[ "0.6383334", "0.6324897", "0.60697365", "0.60460687", "0.6041132", "0.6039913", "0.6039841", "0.6021158", "0.59782183", "0.594846", "0.5938907", "0.590392", "0.58827335", "0.58718795", "0.5845513", "0.58447444", "0.5832608", "0.5792506", "0.5783988", "0.57403696", "0.5719636", "0.5711573", "0.5709967", "0.5702433", "0.5698703", "0.56955624", "0.56759137", "0.5672151", "0.564529", "0.56312233" ]
0.68217677
0
Pads episodes to all be the same length by repeating the last exp.
def pad(episodes): max_len = max(len(episode) for episode in episodes) mask = torch.zeros((len(episodes), max_len), dtype=torch.bool) padded_episodes = [] for i, episode in enumerate(episodes): padded = episode + [episode[-1]] * (max_len - len(episode)) padded_episodes.append(padded) mask[i, :len(episode)] = True return padded_episodes, mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad(seq, n):\n return", "def _add_target_n_step_q_to_episode(self, episode):\n horizon = len(episode)\n for t in range(horizon):\n end_idx = min(t + self.n, horizon)\n discount = self.gamma ** (end_idx - t)\n mask = episode[end_idx - 1][\"nonterminal\"]\n episode[t][\"n_step_q\"] = discount * mask * episode[end_idx - 1][\"next_q_pred\"]\n for j in range(t, end_idx):\n if j == t:\n mask = 1.0\n discount = self.gamma ** (j - t)\n episode[t][\"n_step_q\"] += discount * mask * episode[j][\"reward\"]\n mask = episode[j][\"nonterminal\"]", "def add(self, episodes: Union[List[\"_Episode\"], \"_Episode\"]):\n if isinstance(episodes, _Episode):\n episodes = [episodes]\n\n for eps in episodes:\n # Make sure we don't change what's coming in from the user.\n # TODO (sven): It'd probably be better to make sure in the EnvRunner to not\n # hold on to episodes (for metrics purposes only) that we are returning\n # back to the user from `EnvRunner.sample()`. Then we wouldn't have to\n # do any copying. Instead, either compile the metrics right away on the\n # EnvRunner OR compile metrics entirely on the Algorithm side (this is\n # actually preferred).\n eps = copy.deepcopy(eps)\n\n self._num_timesteps += len(eps)\n self._num_timesteps_added += len(eps)\n\n # Ongoing episode, concat to existing record.\n if eps.id_ in self.episode_id_to_index:\n eps_idx = self.episode_id_to_index[eps.id_]\n existing_eps = self.episodes[eps_idx - self._num_episodes_evicted]\n old_len = len(existing_eps)\n self._indices.extend([(eps_idx, old_len + i) for i in range(len(eps))])\n existing_eps.concat_episode(eps)\n # New episode. Add to end of our episodes deque.\n else:\n self.episodes.append(eps)\n eps_idx = len(self.episodes) - 1 + self._num_episodes_evicted\n self.episode_id_to_index[eps.id_] = eps_idx\n self._indices.extend([(eps_idx, i) for i in range(len(eps))])\n\n # Eject old records from front of deque (only if we have more than 1 episode\n # in the buffer).\n while self._num_timesteps > self.capacity and self.get_num_episodes() > 1:\n # Eject oldest episode.\n evicted_eps = self.episodes.popleft()\n evicted_eps_len = len(evicted_eps)\n # Correct our size.\n self._num_timesteps -= evicted_eps_len\n\n # Erase episode from all our indices:\n # 1) Main episode index.\n evicted_idx = self.episode_id_to_index[evicted_eps.id_]\n del self.episode_id_to_index[evicted_eps.id_]\n # 2) All timestep indices that this episode owned.\n new_indices = [] # New indices that will replace self._indices.\n idx_cursor = 0\n # Loop through all (eps_idx, ts_in_eps_idx)-tuples.\n for i, idx_tuple in enumerate(self._indices):\n # This tuple is part of the evicted episode -> Add everything\n # up until here to `new_indices` (excluding this very index, b/c\n # it's already part of the evicted episode).\n if idx_cursor is not None and idx_tuple[0] == evicted_idx:\n new_indices.extend(self._indices[idx_cursor:i])\n # Set to None to indicate we are in the eviction zone.\n idx_cursor = None\n # We are/have been in the eviction zone (i pointing/pointed to the\n # evicted episode) ..\n elif idx_cursor is None:\n # ... but are now not anymore (i is now an index into a\n # non-evicted episode) -> Set cursor to valid int again.\n if idx_tuple[0] != evicted_idx:\n idx_cursor = i\n # But early-out if evicted episode was only 1 single\n # timestep long.\n if evicted_eps_len == 1:\n break\n # Early-out: We reached the end of the to-be-evicted episode.\n # We can stop searching further here (all following tuples\n # will NOT be in the evicted episode).\n elif idx_tuple[1] == evicted_eps_len - 1:\n assert self._indices[i + 1][0] != idx_tuple[0]\n idx_cursor = i + 1\n break\n\n # Jump over (splice-out) the evicted episode if we are still in the\n # eviction zone.\n if idx_cursor is not None:\n new_indices.extend(self._indices[idx_cursor:])\n\n # Reset our `self._indices` to the newly compiled list.\n self._indices = new_indices\n\n # Increase episode evicted counter.\n self._num_episodes_evicted += 1", "def episode_step(self):\n self.nsteps += 1", "def add_episodes(self, followed):\n for show in followed:\n for _ in range(10):\n factories.WatchedEpisodeFactory(\n show=show\n )\n # after episodes are created for a show\n # reset episode sequence so future episodes start from 0\n factories.WatchedEpisodeFactory.reset_sequence()", "def pad_to_max_length(self, sequence):\n sequence = sequence[:self.max_seq_length]\n n = len(sequence)\n #return sequence + ['[PAD]'] * (self.max_seq_length - n)\n return sequence + [0] *(self.max_seq_length - n)", "def pad(iterable, value = None):\n return chain(iterable, repeat(value))", "def pad_to_length(word_embeddings, length, padding):\n\n for sentence in word_embeddings:\n num_to_append = length - len(sentence)\n assert num_to_append >= 0\n for _ in range(num_to_append):\n sentence.append(padding)", "def _pad_sequences(sequences, pad=PAD):\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]]],\n mode='CONSTANT',\n constant_values=pad) for i, x in enumerate(sequences)\n ])\n return padded_sequences, lengths", "def pad_sequence(sequence, max_length, pad):\n padN = max(max_length - len(sequence), 0)\n result = sequence[:max_length - padN] + [pad] * padN\n return result", "def pad_intervals(parts, duration=128):\n part_duration = duration / (parts + 1)\n return [int((i + 1) * part_duration) for i in range(parts)]", "def add_episode(self, ep):\n #make da season\n ses = self._add_season(ep)\n dvdses = self._add_season(ep, dvd=True) \n self._add_episode(ep, ses)\n self._add_episode(ep, dvdses, dvd=True)", "def vec_repeat_at_end(x, p):\n n = x.shape[0]\n indices = jnp.arange(p) % n\n padding = x[indices]\n return jnp.concatenate((x, padding))", "def repeat(self, count):\n x = HSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def pad_sentence_batch(sentence_batch):\n max_sentence = max([len(sentence) for sentence in sentence_batch])\n return [sentence + [CODES['<PAD>']] * (max_sentence - len(sentence))\n for sentence in sentence_batch]", "def _store_episode(self):\n # For each transition in the last episode,\n # create a set of artificial transitions\n for transition_idx, transition in enumerate(self.episode_transitions):\n\n obs_t, action, reward, obs_tp1, done, info = transition\n\n # Add to the replay buffer\n self.replay_buffer.add(obs_t, action, reward, obs_tp1, done)\n\n # We cannot sample a goal from the future in the last step of an episode\n if (transition_idx == len(self.episode_transitions) - 1 and\n self.goal_selection_strategy == GoalSelectionStrategy.FUTURE):\n break\n\n # Sampled n goals per transition, where n is `n_sampled_goal`\n # this is called k in the paper\n sampled_goals = self._sample_achieved_goals(self.episode_transitions, transition_idx)\n # For each sampled goals, store a new transition\n for goal in sampled_goals:\n # Copy transition to avoid modifying the original one\n obs, action, reward, next_obs, done, info = copy.deepcopy(transition)\n\n # Convert concatenated obs to dict, so we can update the goals\n obs_dict, next_obs_dict = map(self.env.convert_obs_to_dict, (obs, next_obs))\n\n # Update the desired goal in the transition\n obs_dict['desired_goal'] = goal\n next_obs_dict['desired_goal'] = goal\n\n # Update the reward according to the new desired goal\n reward = self.env.compute_reward(next_obs_dict['achieved_goal'], goal, info)\n # Can we use achieved_goal == desired_goal?\n done = False\n\n # Transform back to ndarrays\n obs, next_obs = map(self.env.convert_dict_to_obs, (obs_dict, next_obs_dict))\n\n # Add artificial transition to the replay buffer\n self.replay_buffer.add(obs, action, reward, next_obs, done)", "def end_episode(self):\n self.training_buffer.reset_local_buffers()\n for agent_id in self.episode_steps:\n self.episode_steps[agent_id] = 0\n for rewards in self.collected_rewards.values():\n for agent_id in rewards:\n rewards[agent_id] = 0", "def repeat(self, count):\n x = _OSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def _update_episode(self):\n if self.episode_num > 0:\n self._publish_reward_topic(\n self.accumulated_episode_reward,\n self.episode_steps,\n self.episode_num\n )\n\n self.episode_num += 1\n self.accumulated_episode_reward = 0\n self.episode_steps = 0", "def episode(self, n):\n rewards = []\n for i in range(n):\n self.episode_n += 1\n self.state = self.env.reset()\n self.agent.new_episode(self.state)\n self.total_reward = 0.0\n t = 0\n for t in range(self.max_episode_len):\n terminate = self.step()\n if terminate:\n break\n logging.warning(\"Episode %d finished after %d steps, total reward=%f\", self.episode_n, t+1,\n self.total_reward)\n summary = tf.Summary()\n summary.value.add(tag=\"episode_total_reward\", simple_value=self.total_reward)\n summary.value.add(tag=\"epsisode_success\", simple_value=self.flag_success)\n self.summary_writer.add_summary(summary, self.episode_n)\n rewards.append(self.total_reward)\n if self.savedir is not None:\n self.steps_saver.end_save()\n return rewards", "def pad_sequence(seq):\n seq_split = seq.strip().split(\"1\")\n last = seq_split[0]\n new_seq = last + \"1\"\n inc_added = 0\n out_added = 0\n for i in range(1, len(seq_split)-1):\n current = seq_split[i]\n\n # break up the intial sequences that leak information by adding padding\n if current == last:\n if last == \"-\":\n new_seq += \"+1\"\n inc_added += 1\n last = \"+\"\n else:\n new_seq += \"-1\"\n out_added += 1\n last = \"-\"\n else:\n new_seq += current + \"1\"\n last = current\n\n # 30% chance to inject randomness\n coin = random.randint(1, 101)\n if coin <= 30:\n if coin % 2 == 0:\n new_seq += \"+1\"\n else:\n new_seq += \"-1\"\n \n # return padded sequence, original number of cells, \n # number of incoming padding cells, and number of outgoing padding cells\n return new_seq, len(seq_split), inc_added, out_added", "def concat_episode(self, episode_chunk: \"_Episode\"):\n assert episode_chunk.id_ == self.id_\n assert not self.is_done\n # Make sure the timesteps match.\n assert self.t == episode_chunk.t_started\n\n episode_chunk.validate()\n\n # Make sure, end matches other episode chunk's beginning.\n assert np.all(episode_chunk.observations[0] == self.observations[-1])\n # Make sure the timesteps match (our last t should be the same as their first).\n assert self.t == episode_chunk.t_started\n # Pop out our end.\n self.observations.pop()\n\n # Extend ourselves. In case, episode_chunk is already terminated (and numpyfied)\n # we need to convert to lists (as we are ourselves still filling up lists).\n self.observations.extend(list(episode_chunk.observations))\n self.actions.extend(list(episode_chunk.actions))\n self.rewards.extend(list(episode_chunk.rewards))\n self.t = episode_chunk.t\n self.states = episode_chunk.states\n\n if episode_chunk.is_terminated:\n self.is_terminated = True\n elif episode_chunk.is_truncated:\n self.is_truncated = True\n # Validate.\n self.validate()", "def add_padding(*data, value, maxlen=250, padding=\"post\"):\n return [keras.preprocessing.sequence.pad_sequences(\n d, value=value, padding=padding,\n maxlen=maxlen) for d in data]", "def __pad__(sequence, max_l):\n if max_l - len(sequence) < 0:\n sequence = sequence[:max_l]\n else: \n sequence = np.pad(sequence, (0, max_l - (len(sequence))), 'constant', constant_values=(0))\n return sequence", "def paddingSequence(X_train, X_test, maxLen=30):\r\n #######equalize list of seq\r\n X_train = pad_sequences(X_train, maxLen, padding='post', truncating='post')\r\n X_test = pad_sequences(X_test, maxLen, padding='post', truncating='post')\r\n return X_train, X_test", "def _pad_shorter(sequence: str) -> str:\n return sequence.ljust(3, \"X\")", "def pad_sentence_batch(sentence_batch):\r\n max_sentence = max([len(sentence) for sentence in sentence_batch])\r\n return [sentence + [vocab_to_int['<PAD>']] * (max_sentence - len(sentence)) for sentence in sentence_batch]", "def generate_episode(env, policy, max_steps=500):\n episode = []\n curr_state = env.reset() # reset the environment and place the agent in the start square\n ############################\n # YOUR IMPLEMENTATION HERE #\n \n finished = False\n steps = 0\n \n while not finished and steps < max_steps: # Continue generating episode until finished or max_steps\n # while steps < max_steps:\n action, reward, new_state, finished = take_one_step(env, policy, curr_state) # take a new action in the current state, get new state\n episode.append((curr_state, action, reward))\n curr_state = new_state\n steps += 1\n ############################\n \n return episode", "def run_episode(env, gamma = 1.0, render = False):\n actions = 4\n obs = env.reset()\n total_reward = 0\n step_idx = 0\n\n while True:\n if render:\n env.render()\n obs, reward, done , _ = env.step(random.randint(0, actions - 1))\n x1, x2, x3, x4, x5, x6, x7, x8 = obs\n x1s.append(x1)\n x2s.append(x2)\n x3s.append(x3)\n x4s.append(x4)\n x5s.append(x5)\n x6s.append(x6)\n x7s.append(x7)\n x8s.append(x8)\n total_reward += (gamma ** step_idx * reward)\n step_idx += 1\n if done:\n break\n return total_reward, step_idx", "def pad_sequence(xs, length=None, padding=0):\n return PadSequence(length, padding).apply((xs))[0]" ]
[ "0.62974507", "0.58488435", "0.58399844", "0.56710404", "0.56123376", "0.555278", "0.55409425", "0.55328083", "0.54572415", "0.5408101", "0.5379053", "0.53634495", "0.5328769", "0.5322512", "0.53201896", "0.5319856", "0.5299281", "0.52867496", "0.52844036", "0.5275552", "0.52655447", "0.5264107", "0.52590024", "0.52367413", "0.5203194", "0.5194228", "0.5185443", "0.5172888", "0.51625836", "0.51616335" ]
0.6246594
1
Returns the environment class specified by the type.
def get_env_class(environment_type): if environment_type == "vanilla": return city.CityGridEnv elif environment_type == "distraction": return city.DistractionGridEnv elif environment_type == "map": return city.MapGridEnv elif environment_type == "cooking": return cooking.CookingGridEnv elif environment_type == "miniworld_sign": # Dependencies on OpenGL, so only load if absolutely necessary from envs.miniworld import sign return sign.MiniWorldSign else: raise ValueError( "Unsupported environment type: {}".format(environment_type))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_environment_class_by_name(environment_type):\n for cls in util.iter_subclasses(Environment):\n if cls.tool_name == environment_type:\n return cls\n raise EnvironmentUnavailable(\n f\"Unknown environment type '{environment_type}'\")", "def get_environment_class(conf, python):\n if python == 'same':\n return ExistingEnvironment\n\n # Try the subclasses in reverse order so custom plugins come first\n classes = list(util.iter_subclasses(Environment))[::-1]\n\n if conf.environment_type:\n cls = get_environment_class_by_name(conf.environment_type)\n classes.remove(cls)\n classes.insert(0, cls)\n\n for cls in classes:\n if cls.matches_python_fallback and cls.matches(python):\n return cls\n raise EnvironmentUnavailable(\n f\"No way to create environment for python='{python}'\")", "def get_class(self):\n return devices.get_class(self.type)", "def _get_environment(cls):\n return cls.__name__.lower()", "def get_env_type ( base_name ) :\n return base_name.split( '-', 1 )[ 0 ]", "def get_system_type_class(cls, system_type):\n if system_type not in cls.system_type_classes:\n raise ValueError(f\"No coordinate system of type {system_type} registered!\")\n\n return cls.system_type_classes[system_type]", "def get_cls_for(obj_type):\n return {\n \"workflow\": Workflow\n }[obj_type]", "def get_class(self, name):\n return self.host.get_class(name)", "def get_class(self):\n\t\treturn self.CLASS", "def get_event_class_by_type(type):\n event_module = importlib.import_module('.'.join(type.split('.')[:-1]))\n return getattr(event_module, type.split('.')[-1])", "def type(cls):\n return cls.__name__", "def get_class(klass, kind):\n return getattr(sys.modules['model'], kind, None)", "def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)", "def _class(self):\n return self.__class", "def find_class(self):\n stack = inspect.stack()\n frame = stack[1][0]\n return frame.f_locals.get('self', None)", "def find_class(self, class_name: str) -> Type:\n pass", "def runtime_class(self) -> Optional[pulumi.Input['RuntimeClassStrategyOptionsArgs']]:\n return pulumi.get(self, \"runtime_class\")", "def getClassName(self):\n n = type(self).__name__\n return n", "def get_class(self):\n return self.meta_model.get_class()", "def get_class(cls):\n return '{}.{}'.format(cls.__module__, cls.__name__)", "def specific_class(self):\n\n specific_type = ContentType.objects.get_for_id(self.specific_type_id)\n return specific_type.model_class()", "def _get_class():\n return ASParameters", "def process_type(process_dict):\n if 'class' not in process_dict:\n exit_perm_fail(\"No class attribute in process\")\n if process_dict['class'] not in ['Workflow', 'CommandLineTool']:\n exit_perm_fail('Invalid class {} in process'.format(process_dict['class']))\n return process_dict['class']", "def get_obj_class(self, obj_type: str) -> Type[TgnObject]:\n pass", "def GetEquipmentClass(typename):\n p_match = EQUIPMENT_CLASS_REGEX.match(typename)\n if p_match:\n return p_match.group(2)\n return None", "def type(self) -> Type[ClassType]:\n return self._type", "def get_message_class_by_type(msgtype):\n\n try:\n module = importlib.import_module('platypush.message.' + msgtype)\n except ImportError as e:\n logging.warning('Unsupported message type {}'.format(msgtype))\n raise RuntimeError(e)\n\n cls_name = msgtype[0].upper() + msgtype[1:]\n\n try:\n msgclass = getattr(module, cls_name)\n except AttributeError as e:\n logging.warning('No such class in {}: {}'.format(\n module.__name__, cls_name))\n raise RuntimeError(e)\n\n return msgclass", "def runtime_class(self) -> Optional[pulumi.Input['RuntimeClassStrategyOptionsPatchArgs']]:\n return pulumi.get(self, \"runtime_class\")", "def _class(self, *args):\r\n\r\n if hasattr(args[0], '__mro__'):\r\n #this is a class\r\n return args[0]\r\n else:\r\n #this is an instance\r\n return type(args[0])", "def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)" ]
[ "0.82389814", "0.7236929", "0.67771494", "0.6746536", "0.65948284", "0.6437139", "0.6344269", "0.63292193", "0.6256534", "0.6174284", "0.61565256", "0.60773396", "0.5924187", "0.5919927", "0.58619225", "0.57948107", "0.57791364", "0.5763649", "0.57397777", "0.5733595", "0.5733537", "0.56835765", "0.56819504", "0.5678712", "0.5668897", "0.5662707", "0.5625533", "0.56182337", "0.56138176", "0.56100357" ]
0.8176247
1
Two sketches can be merged only if their gamma and min_values are equal.
def mergeable(self, other): return self.gamma == other.gamma and self.min_value == other.min_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def canBeMergedWith(self, other):", "def _merge_sanity_check(self, other):\n if self._fields is not None and (\n set(self.query.values_select) != set(other.query.values_select)\n or set(self.query.extra_select) != set(other.query.extra_select)\n or set(self.query.annotation_select) != set(other.query.annotation_select)\n ):\n raise TypeError(\n \"Merging '%s' classes must involve the same values in each case.\"\n % self.__class__.__name__\n )", "def _check_compatible_fill_values(self, other: \"FlattenedStorage\"):\n for k in set(self._fill_values).intersection(other._fill_values):\n if np.isnan(self._fill_values[k]) and np.isnan(other._fill_values[k]):\n continue\n else:\n if self._fill_values[k] != other._fill_values[k]:\n raise ValueError(\n \"Fill values for arrays in storages don't match, can't perform requested operation\"\n )", "def over(input_a, input_b):\n\n comp = input_b.duplicate()\n input_a.premult()\n ImageBufAlgo.over(comp, input_a, input_b)\n\n if comp.has_error:\n print \"Error merging over:\", comp.geterror()\n\n return comp", "def isMergeable(int1,int2):\n if set(int1.span)&set(int2.span) or int1.maxval+1==int2.minval:\n return True\n else:\n return False", "def merge_allowed(merged, buckets, min_waste, max_waste, min_aggr):\n if not len(merged):\n return False\n\n total_freq = sum([f for (_, l2f) in buckets for _, f in l2f.items()])\n curr_aggr = sum([f for _, f in merged[1].items()]) * 1.0 / total_freq\n curr_waste = waste_frac(merged)\n\n return curr_waste < min_waste or curr_waste < max_waste and curr_aggr < min_aggr", "def test_adv_merging(free_alg):\n\n dr = free_alg\n m, n, a, b, c = symbols('m n a b c')\n orig = m * a * b + n * a * c\n factored = (m * b + n * c) * a\n tensor = dr.sum(orig).expand()\n assert tensor.n_terms == 2\n\n res = tensor.merge()\n assert res.n_terms == 1\n amp = res.local_terms[0].amp\n assert amp == orig\n\n res = tensor.merge(consts=(m, n))\n assert res.n_terms == 2\n\n res = tensor.merge(consts=(m, n, b, c))\n assert res.n_terms == 1\n amp = res.local_terms[0].amp\n assert amp == factored\n assert amp != orig\n\n res = tensor.merge(gens=(a, b))\n assert res.n_terms == 2\n\n res = tensor.merge(gens=(a,))\n assert res.n_terms == 1\n amp = res.local_terms[0].amp\n assert amp == factored\n assert amp != orig", "def test_merge_two_two_same():\n run_merge([1, 3], [1, 3], [1, 1, 3, 3])", "def test_merge_only(self):\r\n x, y, z = tensor.vectors('x', 'y', 'z')\r\n t = x * y\r\n self.check([\r\n (x, t, (({}, False), ({t: x}, True))),\r\n (t * 2, x * 2, (({}, False), ({t: x}, True), )),\r\n (x * x, x * y, (({}, False), ({y: x}, True), )),\r\n (x * x, x * y, (({}, False), ({y: x}, True), )),\r\n (x * x + z, x * y + t, (({}, False),\r\n ({y: x}, False),\r\n ({y: x, t: z}, True))),\r\n ],\r\n debug=False)", "def merge(self,best1,best2):\n\t\treturn self.cu_for_merge(best1,best2,False)", "def merge_both_tables():\n old = Table.read('data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')\n wanted = Table.read('data/scocen_candidates_300k_only_spatial_cut.fits')\n additional = Table.read('data/scocen_candidates_300k_only_spatial_cut_200k_to_determine_bg_ols.fits')\n\n d_old = dict(zip(old['source_id'], old['background_log_overlap']))\n d_add = dict(zip(additional['source_id'], additional['background_log_overlap']))\n d_old.update(d_add)\n dct = d_old\n\n ln_bg_ols = [dct[source_id] for source_id in wanted['source_id']]\n print\n len(ln_bg_ols), len(wanted)\n\n wanted['background_log_overlap'] = ln_bg_ols\n print\n wanted\n\n wanted.write('data/scocen_candidates_300k_only_spatial_cut.fits', overwrite=True, format='fits')", "def merge_overwrap(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n for j in range(Ly):\n cff = z_u_w[j,N] - z_u_w[j,0]\n if self.hbls[j] + self.hbbl[j] > cff:\n self.hbls[j] = cff\n self.hbbl[j] = cff", "def mergable(self, op):\n if int(self.__size) != int(op.__size):\n return False\n if self.__value != op.__value:\n return False\n return True", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def union(self, other):\n if self.capacity != other.capacity or self.error_rate != other.error_rate:\n raise ValueError(\"Unioning filters requires both filters to have \\\nboth the same capacity and error rate\")\n raise NotImplementedError(\"RedisLocalBloomFilter not support union\")", "def test_merge_min(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'FirstTime', 'first_time'),\n awstats_reader.AwstatsDateTime(2009, 11, 1, 0, 2, 37))", "def merge(): #Status: WIP\r\n pass", "def intersection(self, other):\n if self.capacity != other.capacity or self.error_rate != other.error_rate:\n raise ValueError(\"Intersecting filters requires both filters to \\\nhave equal capacity and error rate\")\n raise NotImplementedError(\"RedisLocalBloomFilter not support intersection\")", "def _do_merge(ext, exts_other):\n for ext_other in exts_other:\n if not ext.is_duplicate(ext_other):\n return False\n return True", "def merge(a, b):\n if isinstance(a, CONFIG_VALID) \\\n and isinstance(b, CONFIG_VALID):\n # dict update\n if isinstance(a, dict) and isinstance(b, dict):\n a.update(b)\n return a\n # list update\n _a = list(a)\n for x in list(b):\n if x not in _a:\n _a.append(x)\n return _a\n if a and b:\n raise Exception(\"Cannot merge\")\n raise NotImplementedError", "def _merge_boundaries(self):\n optical = self._amalgamated_optical\n if bool(optical):\n optical[\"catagory\"] = OPTICAL * tf.ones_like(\n optical[\"xp\"],\n dtype=tf.int64\n )\n self._optical_count = tf.shape(\n optical[\"xp\"],\n out_type=tf.int64\n )[0]\n else:\n self._optical_count = 0\n \n stop = self._amalgamated_stop\n if bool(stop):\n stop[\"catagory\"] = STOP * tf.ones_like(\n stop[\"xp\"],\n dtype=tf.int64\n )\n self._stop_count = tf.shape(\n stop[\"xp\"],\n out_type=tf.int64\n )[0]\n else:\n self._stop_count = 0\n \n target = self._amalgamated_target\n if bool(target):\n target[\"catagory\"] = TARGET * tf.ones_like(\n target[\"xp\"],\n dtype=tf.int64\n )\n self._target_count = tf.shape(\n target[\"xp\"],\n out_type=tf.int64\n )[0]\n else:\n self._target_count = 0\n \n self._merged = amalgamate(\n [optical, stop, target], \n TRIANGLE_GEO_SIG | {\"catagory\"}\n )", "def mergeWith(self, others):", "def fits(x, y):\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk", "def remerge_subset():\n import wbia\n\n ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n ibs2 = wbia.opendb('PZ_Master1')\n\n gids1, gids2 = ibs1.images(), ibs2.images()\n idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids)\n isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2)\n\n assert all(\n set.issubset(set(a1), set(a2))\n for a1, a2 in zip(isect_gids1.annot_uuids, isect_gids2.annot_uuids)\n )\n\n annot_uuids = ut.flatten(isect_gids1.annot_uuids)\n # aids1 = ibs1.annots(ibs1.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n # aids2 = ibs2.annots(ibs2.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n aids1 = ibs1.annots(uuids=annot_uuids, asarray=True)\n aids2 = ibs2.annots(uuids=annot_uuids, asarray=True)\n import numpy as np\n\n to_aids2 = dict(zip(aids1, aids2))\n # to_aids1 = dict(zip(aids2, aids1))\n\n # Step 1) Update individual annot properties\n # These annots need updates\n # np.where(aids1.visual_uuids != aids2.visual_uuids)\n # np.where(aids1.semantic_uuids != aids2.semantic_uuids)\n\n annot_unary_props = [\n # 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'unary_tags']\n 'yaws',\n 'bboxes',\n 'thetas',\n 'qual',\n 'species',\n 'case_tags',\n 'multiple',\n 'age_months_est_max',\n 'age_months_est_min', # 'sex_texts'\n ]\n to_change = {}\n for key in annot_unary_props:\n prop1 = getattr(aids1, key)\n prop2 = getattr(aids2, key)\n diff_idxs = set(np.where(prop1 != prop2)[0])\n if diff_idxs:\n diff_prop1 = ut.take(prop1, diff_idxs)\n diff_prop2 = ut.take(prop2, diff_idxs)\n logger.info('key = {!r}'.format(key))\n logger.info('diff_prop1 = {!r}'.format(diff_prop1))\n logger.info('diff_prop2 = {!r}'.format(diff_prop2))\n to_change[key] = diff_idxs\n if to_change:\n changed_idxs = ut.unique(ut.flatten(to_change.values()))\n logger.info('Found %d annots that need updated properties' % len(changed_idxs))\n logger.info('changing unary attributes: {!r}'.format(to_change))\n if False and ut.are_you_sure('apply change'):\n for key, idxs in to_change.items():\n subaids1 = aids1.take(idxs)\n subaids2 = aids2.take(idxs)\n prop1 = getattr(subaids1, key)\n # prop2 = getattr(subaids2, key)\n setattr(subaids2, key, prop1)\n else:\n logger.info('Annot properties are in sync. Nothing to change')\n\n # Step 2) Update annotmatch - pairwise relationships\n infr1 = wbia.AnnotInference(aids=aids1.aids, ibs=ibs1, verbose=3, autoinit=False)\n\n # infr2 = wbia.AnnotInference(aids=ibs2.annots().aids, ibs=ibs2, verbose=3)\n aids2 = ibs2.get_valid_aids(is_known=True)\n infr2 = wbia.AnnotInference(aids=aids2, ibs=ibs2, verbose=3)\n infr2.reset_feedback('annotmatch', apply=True)\n\n # map feedback from ibs1 onto ibs2 using ibs2 aids.\n fb1 = infr1.read_wbia_annotmatch_feedback()\n fb1_t = {(to_aids2[u], to_aids2[v]): val for (u, v), val in fb1.items()}\n fb1_df_t = infr2._pandas_feedback_format(fb1_t).drop('am_rowid', axis=1)\n\n # Add transformed feedback into ibs2\n infr2.add_feedback_from(fb1_df_t)\n\n # Now ensure that dummy connectivity exists to preserve origninal names\n # from wbia.algo.graph import nx_utils\n # for (u, v) in infr2.find_mst_edges('name_label'):\n # infr2.draw_aids((u, v))\n # cc1 = infr2.pos_graph.connected_to(u)\n # cc2 = infr2.pos_graph.connected_to(v)\n # logger.info(nx_utils.edges_cross(infr2.graph, cc1, cc2))\n # infr2.neg_redundancy(cc1, cc2)\n # infr2.pos_redundancy(cc2)\n\n infr2.relabel_using_reviews(rectify=True)\n infr2.apply_nondynamic_update()\n\n if False:\n infr2.wbia_delta_info()\n infr2.wbia_name_group_delta_info()\n\n if len(list(infr2.inconsistent_components())) > 0:\n raise NotImplementedError('need to fix inconsistencies first')\n # Make it so it just loops until inconsistencies are resolved\n infr2.prioritize()\n infr2.qt_review_loop()\n else:\n infr2.write_wbia_staging_feedback()\n infr2.write_wbia_annotmatch_feedback()\n infr2.write_wbia_name_assignment()\n\n # if False:\n # # Fix any inconsistency\n # infr2.start_qt_interface(loop=False)\n # test_nodes = [5344, 5430, 5349, 5334, 5383, 2280, 2265, 2234, 5399,\n # 5338, 2654]\n # import networkx as nx\n # nx.is_connected(infr2.graph.subgraph(test_nodes))\n # # infr = wbia.AnnotInference(aids=test_nodes, ibs=ibs2, verbose=5)\n\n # # randomly sample some new labels to verify\n # import wbia.guitool as gt\n # from wbia.gui import inspect_gui\n # gt.ensure_qapp()\n # ut.qtensure()\n # old_groups = ut.group_items(name_delta.index.tolist(), name_delta['old_name'])\n # del old_groups['____']\n\n # new_groups = ut.group_items(name_delta.index.tolist(), name_delta['new_name'])\n\n # from wbia.algo.hots import simulate\n # c = simulate.compare_groups(\n # list(new_groups.values()),\n # list(old_groups.values()),\n # )\n # ut.map_vals(len, c)\n # for aids in c['pred_splits']:\n # old_nids = ibs2.get_annot_nids(aids)\n # new_nids = ut.take_column(infr2.gen_node_attrs('name_label', aids), 1)\n # split_aids = ut.take_column(ut.group_items(aids, new_nids).values(), 0)\n # aid1, aid2 = split_aids[0:2]\n\n # if False:\n # inspect_gui.show_vsone_tuner(ibs2, aid1, aid2)\n # infr2.start_qt_interface(loop=False)\n\n # if False:\n # # import wbia\n # ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n # infr1 = wbia.AnnotInference(aids='all', ibs=ibs1, verbose=3)\n # infr1.initialize_graph()\n # # infr1.reset_feedback('staging')\n # infr1.reset_feedback('annotmatch')\n # infr1.apply_feedback_edges()\n # infr1.relabel_using_reviews()\n # infr1.apply_review_inference()\n # infr1.start_qt_interface(loop=False)\n # delta = infr2.match_state_delta()\n # logger.info('delta = %r' % (delta,))\n\n # infr2.ensure_mst()\n # infr2.relabel_using_reviews()\n # infr2.apply_review_inference()\n\n # mst_edges = infr2.find_mst_edges()\n # set(infr2.graph.edges()).intersection(mst_edges)\n\n return\n \"\"\"\n TODO:\n Task 2:\n Build AnnotInfr for ibs2 then add all decision from\n ibs1 to the internal feedback dict.\n\n Ensure that all other (esp old name-id related) edges are correctly\n placed, then overrite with new vals (\n make sure implicit vals do not cuase conflicts with new\n explicit vals, but old explicit vals should cause a conflict).\n Then just commit to staging and then commit to annotmatch and\n re-infer the names.\n \"\"\"\n\n # Print some info about the delta\n # def _to_tup(x):\n # return tuple(x) if isinstance(x, list) else x\n # changetype_list = list(zip(\n # delta['old_decision'], delta['new_decision'],\n # map(_to_tup, delta['old_tags']),\n # map(_to_tup, delta['new_tags'])))\n # changetype_hist = ut.dict_hist(changetype_list, ordered=True)\n # logger.info(ut.align(ut.repr4(changetype_hist), ':'))\n\n # import pandas as pd\n # pd.options.display.max_rows = 20\n # pd.options.display.max_columns = 40\n # pd.options.display.width = 160\n # pd.options.display.float_format = lambda x: '%.4f' % (x,)\n\n # a, b = 86, 6265\n # c, d = to_aids1[a], to_aids1[b]\n # inspect_gui.show_vsone_tuner(ibs2, a, b)\n # inspect_gui.show_vsone_tuner(ibs1, to_aids1[a], to_aids1[b])\n # am1 = ibs1.get_annotmatch_rowids_between([to_aids1[a]],\n # [to_aids1[b]])\n # am2 = ibs2.get_annotmatch_rowids_between([a], [b])\n # logger.info(ibs1.db.get_table_csv('annotmatch', rowids=am1))\n # logger.info(ibs2.db.get_table_csv('annotmatch', rowids=am2))\n\n # inspect_gui.show_vsone_tuner(ibs2, 8, 242)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 103)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 6265)", "def min():\n valid=result_alpha.F>0\n src_data.F[valid]=np.minimum( src_data.F[valid],result_data.F[valid] )", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def gamma(x1, x2):\r\n gamma1 = math.exp(a / (1 + a * x1/(b * x2)) ** 2.0) \r\n gamma2 = math.exp(b / (1 + b * x2/(a * x1)) ** 2.0)\t\t\r\n return gamma1, gamma2", "def _compare_attributes_of_interpolate4(self, first: Node, second: Node) -> bool:\n # If some of attributes 'mode', 'coordinate_transformation_mode', 'nearest_mode', 'antialias', 'cube_coeff'\n # are different, then attributes of first and second are not identical.\n for attr in self.default_values_for_opset4.keys():\n default_value = self.default_values_for_opset4[attr]\n if first.soft_get(attr, default=default_value) != second.soft_get(attr, default=default_value):\n return False\n\n # If attributes 'pads_begin' or 'pads_end' of nodes first and second are different, then attributes\n # of first and second are not identical.\n for attr in ['pads_begin', 'pads_end']:\n if not np.array_equal(first.soft_get(attr, default=self.default_pads),\n second.soft_get(attr, default=self.default_pads)):\n return False\n return True", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def isMergableWith(self, op):\n if not is_glsl_block_function(op):\n return False\n if (self.getName() != op.getName()) or (self.getType() != op.getType()):\n return False\n return True" ]
[ "0.561305", "0.5554772", "0.55113935", "0.5468042", "0.54112643", "0.5404954", "0.5348093", "0.53448844", "0.5326112", "0.53066695", "0.5288724", "0.5269989", "0.51783633", "0.51139086", "0.5101292", "0.5095481", "0.507951", "0.50213623", "0.5012159", "0.4999083", "0.4995614", "0.49938977", "0.4976035", "0.49639282", "0.4961714", "0.4944489", "0.49401405", "0.49385703", "0.4936486", "0.49224693" ]
0.7373218
0
Takes a list of revision dicts and extracts globals, includes, and pages Expects revision dict to be sorted already Returns 3tuple
def extract_data(data, rev=0): globs = {'_pages' : {}} includes = [] pages = [] pages_list = [] for datum in data: globs.update(datum.get('globals', {})) includes += datum.get('includes', []) datum_pages = datum.get('pages', []) for page in datum_pages: if rev and datum.get('revision', None) == rev: page['_new'] = 1 globs['_pages'][page['_id']] = page pages.append(page) if page.get('datetime'): pages_list.append(page) globs['_pages_list'] = pages_list return globs, includes, pages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_revision_pages(url_text):\n\trevision_links = []\n\tgrammar_indices = [m.start() for m in re.finditer(\"grammar\", url_text.lower())]\n\t# print(\"Grammar indices:\",grammar_indices)\n\n\tfor i in range(len(grammar_indices)):\n\t\tgrammar_index = grammar_indices[i] \n\t\tprev_index = url_text[:grammar_index].rfind('prev')\n\t\thref_index = url_text[:prev_index].rfind('href')\n\t\turl_start_index = url_text[href_index:].find(\"\\\"\")+href_index\n\t\turl_end_index = url_text[url_start_index+1:].find(\"\\\"\")+url_start_index+1\n\t\turl2 = WIKI_URL+url_text[url_start_index+1:url_end_index]\n\t\trevision_links+=[url2]\n\n\treturn list(set(revision_links))", "def content(tmp_loc, ref_names_dict, order):\n \n fl = '[Content_Types].xml'\n inp_path = '/'.join([tmp_loc, fl])\n out_path = '/'.join([output_path, fl])\n \n cnt_lst = []\n asset_lst = []\n def_att = []\n d = dict()\n \n root1,tree1 = gen_tree(inp_path)\n root2,tree2 = gen_tree(out_path)\n \n # get all the extensions belongs to \"Default\" tag\n for relation in root2:\n if 'Default' in relation.tag:\n def_att.append(relation.attrib['Extension'])\n else:\n break\n \n for relation in root1:\n if 'Override' in relation.tag:\n attrib = relation.attrib['PartName'][1:]\n try:\n cnt = attrib.split('ppt/')[-1]\n ini = '/ppt/'\n except:\n cnt = attrib\n ini = '/'\n if cnt in ref_names_dict.keys():\n relation.attrib['PartName'] = f'{ini}{ref_names_dict[cnt]}'\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['PartName'])\n else:\n cnt_lst.append(relation)\n if relation.attrib['PartName'] not in asset_lst:\n asset_lst.append(relation.attrib['PartName'])\n else:\n attrib = relation.attrib['Extension']\n if attrib not in def_att:\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['Extension'])\n # deal with the assest_lst\n # print(\"AA: \", asset_lst)\n cnt_lst = natsort.natsorted(cnt_lst)\n for ele in cnt_lst:\n prev = tree2.find(ele.tag)\n prev.addnext(ele)\n \n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n \n unq_attr = []\n for relation in root2:\n if 'Override' in relation.tag:\n if relation.attrib['PartName'] not in unq_attr:\n unq_attr.append(relation.attrib['PartName'])\n else:\n root2.remove(relation)\n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)", "def _compare(self, previous, variant, all_revisions):\n entries = get_entries('article-pyrus' + variant,\n allrevisions=all_revisions)\n result = [entry.__dict__ for entry in entries]\n if previous:\n self.assertEqual(previous, result)\n return result", "def includes(self):\n r = {\n k: sorted(list(copy.deepcopy(v).values()), key=lambda x: x.get(\"order\", 0))\n for k, v in list(self.get_config(\"includes\").items())\n }\n if self.version is not None:\n for k, v in list(r.items()):\n for j in v:\n j[\"path\"] = self.versioned_url(j[\"path\"])\n return r", "def get_pages(epObject, fileDict):\r\n homePage = DOMAIN + epObject.ViewLink\r\n soup = make_soup(homePage)\r\n fileDict['pageUrls'].append(homePage)\r\n fileDict['pageFileNames'].append('index.html')\r\n fileDict['pageIds'].append(str(epObject.ObjectId))\r\n for a in soup.find_all('a', {'href': 'javascript://'}):\r\n if a['onclick'].find('GotoPage') > 0:\r\n pageId = get_page_id(str(a['onclick']), str(epObject.ObjectId))\r\n if pageId not in fileDict['pageIds']:\r\n address = homePage + \"&pageId={0}\".format(pageId)\r\n fileName = a.string.replace(' ', '').lower() + \".html\"\r\n fileDict['pageUrls'].append(address)\r\n fileDict['pageFileNames'].append(fileName)\r\n fileDict['pageIds'].append(pageId)\r\n return fileDict", "def test_three_paginated_list_pages():\n\n TEST_WEBSITE.wipe()\n _clean_up_test_items_md()\n _generate_test_items_md(10)\n\n Webpage.write_list_pages_from_directory(TEST_WEBSITE, TEST_WEBSITE.config.value('source_path'))\n\n # There should be an index.html and exactly 2 blog-n.html files\n assert path.isfile(TEST_WEBSITE.config.value('output_path') + 'index.html')\n assert not path.isfile(TEST_WEBSITE.config.value('output_path') + 'blog-1.html')\n assert path.isfile(TEST_WEBSITE.config.value('output_path') + 'blog-2.html')\n assert path.isfile(TEST_WEBSITE.config.value('output_path') + 'blog-3.html')\n assert not path.isfile(TEST_WEBSITE.config.value('output_path') + 'blog-4.html')\n\n with open(TEST_WEBSITE.config.value('output_path') + 'index.html', 'r') as myfile:\n blog_1_content = myfile.read()\n\n with open(TEST_WEBSITE.config.value('output_path') + 'blog-2.html', 'r') as myfile:\n blog_2_content = myfile.read()\n\n with open(TEST_WEBSITE.config.value('output_path') + 'blog-3.html', 'r') as myfile:\n blog_3_content = myfile.read()\n\n assert blog_1_content.count('<article>') == 4\n assert 'Article 10.' in blog_1_content\n assert 'Article 9.' in blog_1_content\n assert 'Article 8.' in blog_1_content\n assert 'Article 7.' in blog_1_content\n assert '<p>Listing page template</p>' in blog_1_content\n\n\n assert blog_2_content.count('<article>') == 4\n assert 'Article 6.' in blog_2_content\n assert 'Article 5.' in blog_2_content\n assert 'Article 4.' in blog_2_content\n assert 'Article 3.' in blog_2_content\n assert '<p>Listing page template</p>' in blog_2_content\n\n assert blog_3_content.count('<article>') == 2\n assert 'Article 2.' in blog_3_content\n assert 'Article 1.' in blog_3_content\n assert '<p>Listing page template</p>' in blog_3_content\n\n\n # Page title = \"Website Name - Page n\"\n assert 'Test website name - test tag & line' in blog_1_content\n assert '<title>Test website name - Page 2</title>' in blog_2_content\n assert '<title>Test website name - Page 3</title>' in blog_3_content\n\n # First page should have link to older posts but not newer\n assert '<a href=\"blog-2.html\" class=\"magnetizer-next\">Older posts</a>' in blog_1_content\n assert 'class=\"magnetizer-previous\"' not in blog_1_content\n\n # Middle page should have link to older posts (i.e. homepage) and newer\n assert '<a href=\"blog-3.html\" class=\"magnetizer-next\">Older posts</a>' in blog_2_content\n assert '<a href=\"/\" class=\"magnetizer-previous\">Newer posts</a>' in blog_2_content\n\n # Last page should have link to newer posts but not older\n assert 'class=\"magnetizer-next\"' not in blog_3_content\n assert '<a href=\"blog-2.html\" class=\"magnetizer-previous\">Newer posts</a>' in blog_3_content\n\n # Pages should have meta description from config\n assert '<meta name=\"description\" content=\"Meta \\\\\"description\\\\\" from cfg\">' in blog_1_content\n assert '<meta name=\"description\" content=\"Meta \\\\\"description\\\\\" from cfg\">' in blog_2_content\n assert '<meta name=\"description\" content=\"Meta \\\\\"description\\\\\" from cfg\">' in blog_3_content\n\n # index.html and the blog-n pages should be present in the sitemap\n assert 'https://example.com/' in TEST_WEBSITE.sitemap.pages\n assert not 'https://example.com/blog-1.html' in TEST_WEBSITE.sitemap.pages\n assert 'https://example.com/blog-2.html' in TEST_WEBSITE.sitemap.pages\n assert 'https://example.com/blog-3.html' in TEST_WEBSITE.sitemap.pages", "def get_pages(book, site):\n if 'manga24.ru' in site:\n name =book.split('/')[-1]\n if(not name):\n name = book.split('/')[-2];\n try:\n page = urllib2.urlopen(book)\n html = page.read()\n try:\n parsedhtml = BeautifulSoup(html)\n js = parsedhtml.findAll('script')\n lines = js[2].text.split('\\n')\n dir = lines[5].split(' ')[-1]\n files = ' '.join(lines[7].strip().split()[1:])\n dir = dir.replace('\\/', '/')[1:-2]\n files = files[2:-2].split('], [')\n res_files = []\n for file in files:\n match = pattern_manga24.match(file)\n res_files.append(dir + match.group(1))\n return (res_files, name)\n except Exception as e:\n print e.message\n sys.exit()\n except Exception:\n print \"Problem with internet connection, or something\"\n sys.exit()\n elif 'adultmanga.ru' in site:\n name =book.split('/')[-1]\n name = book.split('/')[-2]+ \"_\" +name;\n pos = name.find('?')\n if pos >= 0:\n name = name[:pos]\n print name\n try:\n print book\n page = urllib2.urlopen(book)\n html = page.read()\n try:\n parsedhtml = BeautifulSoup(html)\n js = parsedhtml.findAll('script')\n lines = js[12].text.split('var')\n #dir = lines[3].split(' ')[-1]\n files = lines[2]\n pos = files.find('=')\n if pos >= 0:\n files = files[pos+2:]\n files = files.strip()[:-1]\n files = files[2:-2].split(\"},{\")\n res_files = []\n for file in files:\n match = pattern_adultmanga.match(file)\n res_files.append(match.group(1))\n return (res_files, name)\n except Exception as e:\n print e.message\n sys.exit()\n except Exception:\n print \"Problem with internet connection, or something\"\n sys.exit()\n else:\n print \"Can't work with this site\"\n sys.exit()", "def test_docdict_order():\n from mne.utils.docs import docdict\n\n # read the file as text, and get entries via regex\n docs_path = Path(__file__).parent.parent / \"utils\" / \"docs.py\"\n assert docs_path.is_file(), docs_path\n with open(docs_path, \"r\", encoding=\"UTF-8\") as fid:\n docs = fid.read()\n entries = re.findall(r'docdict\\[(?:\\n )?[\"\\'](.+)[\"\\']\\n?\\] = ', docs)\n # test length & uniqueness\n assert len(docdict) == len(entries)\n # test order\n assert sorted(entries) == entries", "def inspect(filename):\n bfile = open(filename, 'rb')\n bdata = bfile.read()\n bfile.close()\n doc = loads(bdata)\n file_seq = []\n second = None\n for ver, snapshot in enumerate(doc.index):\n nb_obj = len(snapshot)\n cache = nb_obj * [None]\n mini_index = nb_obj * [None]\n for i in range(1, len(snapshot)):\n mini_index[i] = (snapshot[i]['o_gen'], snapshot[i]['o_ver'])\n if type(snapshot[0]) == list:\n second = snapshot[0].pop()\n snapshot[0] = snapshot[0][0]\n memoize_obj_in_cache([snapshot], doc.bdata, i, cache)\n snapshot[0]['content'] = cache[0]\n snapshot[0]['mini_index'] = mini_index\n if 'xref_stream' not in snapshot[0]:\n file_seq.append(snapshot[0])\n snapshot[0] = second\n for i in range(len(snapshot)):\n if snapshot[i]['o_num'] == 0 and 'xref_stream' in snapshot[i]:\n snapshot[i]['ignore'] = True\n continue\n memoize_obj_in_cache([snapshot], doc.bdata, i, cache)\n snapshot[i]['content'] = cache[i]\n snapshot[i]['mini_index'] = mini_index\n if i == 0: print(snapshot[i])\n file_seq.extend(snapshot)\n file_seq = [x for x in file_seq if x is not None and 'ignore' not in x]\n pos_index = {}\n\n STARTXREF = b'startxref'\n startxref_pos = 0\n while True:\n startxref_pos = bdata.find(STARTXREF, startxref_pos)\n if startxref_pos == -1:\n break\n i, j, _ = next_token(bdata, startxref_pos + len(STARTXREF))\n xref_pos = int(bdata[i:j])\n file_seq.append({'abs_pos':startxref_pos, 'o_num':-1, 'o_gen':-1, 'o_ver':startxref_pos,\n 'mini_index':None, 'content':xref_pos})\n startxref_pos += len(STARTXREF)\n\n EOF = b'%%EOF'\n eof_pos = 0\n while True:\n eof_pos = bdata.find(EOF, eof_pos)\n if eof_pos == -1:\n break\n file_seq.append({'abs_pos':eof_pos, 'o_num':-2, 'o_gen':-2, 'o_ver':eof_pos,\n 'mini_index':None, 'content':None})\n eof_pos += len(EOF)\n \n for obj in file_seq:\n if 'abs_pos' in obj and obj['o_num'] != -2:\n pos_index[obj['abs_pos']] = f\"{obj['o_num']}.{obj['o_gen']}.{obj['o_ver']}\"\n file_seq.sort(key=lambda x: x.get('abs_pos') or x.get('a_')) \n print(build_html(file_seq, pos_index, filename))", "def GetRevisionsSample():\n client = CreateClient()\n for entry in client.GetResources(limit=55).entry:\n revisions = client.GetRevisions(entry)\n for revision in revisions.entry:\n print revision.publish, revision.GetPublishLink()", "def scan_path(path,ext_lst=['md','markdown']):\n if not os.path.exists(path):\n logger.debug('scan_path: invalid path : %s' %(path,files))\n return -1\n pages={}\n files=[]\n for item in glob.glob(os.path.join(path,'*.*')):\n if string.split(os.path.basename(item),'.')[-1] in ext_lst:\n files.append(item)\n logger.debug('scan_path: %s : %s' %(path,files))\n res = None\n pagelist=[]\n articledict={}\n for item in files:\n res=os.path.basename(item).split()\n res=string.split(os.path.basename(item),'.')\n #print 'scan_path: res : %s' %res \n if res[0] not in pagelist:\n logger.debug('scan_path: add page : %s' %res[0]) \n pagelist.append(res[0])\n articledict[res[0]]=[] #or add itself?\n articledict[res[0]].append(item) \n pages['pagelist'] = pagelist\n if len(pagelist)>0:\n for page in pagelist: \n logger.debug('scan_path: pages= %s' %page)\n logger.debug('scan_path: articles= %s' %articledict[page]) \n pages[page]=articledict[page] \n #look for templates/layout... To ENHANCE...\n pages['template']=glob.glob(os.path.join(path,'*.tpl')) \n if os.path.isfile(os.path.join(path,'layout.tpl')):\n logger.info('scan_path: found general layout file : %s' %os.path.join(path,'layout.tpl'))\n pages['layout']=os.path.join(path,'layout.tpl')\n return pages", "def main():\n lines_list = []\n with open(bookmark_file, 'r') as f:\n lines_list = f.readlines()\n entries_list = []\n for idx, line in enumerate(lines_list):\n entry = {}\n if re.match(r'^<DT>', line):\n entry['url'] = re.match(r'^.*HREF=\\\"([^\\\"]+)\\\"', line).group(1)\n entry['add_date'] = re.match(r'^.*ADD_DATE=\\\"([^\\\"]+)\\\"', line).group(1)\n entry['private'] = re.match(r'^.*PRIVATE=\\\"([^\\\"]*)\\\"', line).group(1)\n entry['tags'] = re.match(r'^.*TAGS=\\\"([^\\\"]*)\\\"', line).group(1).split(',')\n entry['title'] = re.match(r'^.*<A [^>]+>(.*)</A>', line).group(1)\n if re.match(r'^<DD>', lines_list[idx + 1]):\n dd_tmp = []\n increment = 1\n try:\n while True:\n if re.match(r'^<DT>', lines_list[idx + increment]):\n break\n dd_tmp.append(re.match(r'^(<DD>)?(.*)$', lines_list[idx + increment]).group(2))\n increment += 1\n except:\n pass\n entry['description'] = '\\n'.join(dd_tmp)\n entries_list.append(entry)\n return entries_list", "async def org_info_above_14(orgs_urls14):\n org_info_14 = []\n project_urls_from14 = []\n for url in orgs_urls14:\n try:\n soup = await get_page(url)\n org_name = basename(url)\n org_info = soup.find_all('p')\n web_page = org_info[1].text.splitlines()[-1].strip()\n mailing_list = org_info[2].text.split(\":\")[-1].strip()\n description = soup.find('div', {'class': 'main mdl-cell mdl-cell--8-col\\\n mdl-card mdl-shadow--4dp'})\n detail = description.find_all('p')[2].nextSibling\n org_info_14.append({'name': org_name, 'page': web_page,\n 'about': detail, 'mail': mailing_list,\n 'link': url})\n project_urls_from14.extend(grab_project_links(soup))\n except IndexError:\n print(url)\n\n return org_info_14, get_project_info(project_urls_from14)", "def main():\n\t\tn = 0 \n\t\tfor page in range(pages):\n\t\t\t\tpageNumber = str(page + 1)\n\t\t\t\tprint \"Processing page number \" + pageNumber\n\t\t\t\tpageUrl = 'https://api.github.com/users/' + USER + '/gists?page=' + pageNumber + '&per_page=' + str(int(perpage))\n\t\t\t\tu = urlopen (pageUrl)\n\t\t\t\tgists = json.load(u)\n\t\t\t\t\t\t \n\t\t\t\tfor gist in gists:\n\t\t\t\t\t\tn += 1\n\t\t\t\t\t\tprint \"==== %d ====\" % n\n\t\t\t\t\t\t# print gist.keys()\n\t\t\t\t\t\tgistd = gist['id']\n\t\t\t\t\t\tgisturl = gist['html_url']\n\t\t\t\t\t\tgistdesc = gist['description'] or gistd\n\t\t\t\t\t\tgistfiles = gist['files']\n\t\t\t\t\t\tprint \"gistd: \", gistd\n\t\t\t\t\t\tprint \"gisturl: \", gisturl\n\t\t\t\t\t\tprint \"gistdesc: \", gistdesc\n\t\t\t\t\t\tprint \"gistfiles: \", len(gistfiles)\n\t\t\t\t\t\tfor f in gistfiles:\n\t\t\t\t\t\t\t\tfileurl = gistfiles[f]['raw_url']\n\t\t\t\t\t\t\t\t_filetype = gistfiles[f]['language']\n\t\t\t\t\t\t\t\tif _filetype in ALLOWED_FILE_TYPES:\n\t\t\t\t\t\t\t\t\t\tfiletype = _filetype\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tfiletype = \"None\"\n\t\t\t\t\t\t\t\tprint \"fileurl: \", fileurl \n\t\t\t\t\t\t\t\tprint \"filetype: \", filetype, \"(found='%s')\" % _filetype \n\t\t\t\t\t \n\t\t\t\t\t\t\t\tif TESTING:\n\t\t\t\t\t\t\t\t\t\t# testing\n\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\t\t\tprint e\n\t\t\t\t\t\t\t\t\t\t\t\tprint \"*** ERROR WRITING TO sqlite3 ***\"\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\n\t\t\t\tif TESTING:\n\t\t\t\t\t\t# so to avoid calling github API too much...\n\t\t\t\t\t\tbreak", "def loadVersionMap():\n\tlines = readVersionList(cfg.FILE_VERSION)\n\tver_map = {}\n\tval = []\n\tflag = False\n\n\tfor line in lines:\n\t\tline = line.strip()\n\t\tif line.startswith(cfg.FLAG_TIPS):\n\t\t\tline_list = line.split()\n\t\t\tlen_row = len(line_list)\n\t\t\ta_DmnNum = {}\n\t\t\tDOMAIN = cfg.FLAG_NULL\n\n\t\t\tfor i in range(0,len_row):\n\t\t\t\tDOMAIN = line_list[i]\n\t\t\t\ta_DmnNum[DOMAIN] = i\n\t\t\tval = line_list\n\t\telif line.startswith(cfg.OPEN_BRACKET):\n\t\t\tleft = line.find(cfg.OPEN_BRACKET)\n\t\t\tright = line.find(cfg.CLOSE_BRACKET)\n\t\t\tName = line[left+1:right].strip()\n\t\t\tver_map[Name] = []\n\t\t\tver_map[Name].append(val[1:])\n\t\telif not line:\n\t\t\tcontinue\n\t\telse:\n\t\t\tline_list = line.split()\n\t\t\tver_map[Name].append(line_list)\n\tsortVersion(ver_map)\n\treturn ver_map", "async def org_info_below_13(org_urls13):\n org_info_till13 = []\n project_urls_till13 = []\n for url in org_urls13:\n # General information about the org\n try:\n soup = await get_page(url)\n org_name = basename(url)\n org_info = soup.find_all('p')\n web_page = org_info[0].text.splitlines()[-1].strip()\n mailing_list = org_info[1].text.split(\":\")[-1].strip()\n detail = org_info[2].text\n org_info_till13.append({'name': org_name, 'about': detail,\n 'page': web_page, 'mail': mailing_list,\n 'link': url})\n project_urls_till13.extend(grab_project_links(soup))\n\n except IndexError:\n print(url)\n\n return org_info_till13, get_project_info(project_urls_till13)", "def svn_rev_info(path): # pragma: no cover\n if not os.path.isdir(os.path.join(path, '.svn')):\n path = os.path.join(path, '..')\n\n _program_dir = path\n filename = os.path.join(_program_dir, '.svn/entries')\n if os.path.isfile(filename):\n with open(filename) as entries:\n version = entries.readline().strip()\n if version != '12':\n for _ in range(3):\n entries.readline()\n tag = entries.readline().strip()\n t = tag.split('://', 1)\n t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/',\n '')\n tag = '[{}] {}'.format(*t)\n for _ in range(4):\n entries.readline()\n date = time.strptime(entries.readline()[:19],\n '%Y-%m-%dT%H:%M:%S')\n rev = entries.readline()[:-1]\n return tag, rev, date\n\n # We haven't found the information in entries file.\n # Use sqlite table for new entries format\n from sqlite3 import dbapi2 as sqlite\n with closing(\n sqlite.connect(os.path.join(_program_dir, '.svn/wc.db'))) as con:\n cur = con.cursor()\n cur.execute(\"\"\"select\nlocal_relpath, repos_path, revision, changed_date, checksum from nodes\norder by revision desc, changed_date desc\"\"\")\n _name, tag, rev, date, _checksum = cur.fetchone()\n cur.execute('select root from repository')\n tag, = cur.fetchone()\n\n tag = os.path.split(tag)[1]\n date = time.gmtime(date / 1_000_000)\n return tag, rev, date", "def get_pages() -> [(str, str, int)]:\n\ttext = requests.get(url_pages).text\n\ttable = re.search(pat_program_table, text).group(1)\n\tpages = re.findall(pat_program_entry, table)[2:] # First 2 - table headers\n\treturn [get_page(x) for x in pages]", "def read_pages(self, repo, extension, exception_list):\n for file in os.listdir(self.repo_path):\n if file.endswith('.'.join(['', extension])):\n if file not in exception_list:\n file_handler = FileHandler(self.repo_path, file)\n content = file_handler.read_file()\n head_data, body_content = (\n file_handler.read_wrapped_content(content, '---'))\n head_dict = YAMLHandler().read_yaml(head_data)\n # will have to intialize full_dict as the content of dict\n # never changes.\n full_dict = {}\n full_dict = dict(copy.deepcopy(head_dict))\n full_dict['content'] = body_content\n full_dict['repo'] = repo\n PageDbIO().save_db_instance(full_dict)", "def extract_all_references(dig_parent_dir, readfile):\n dig_parent_path_obj = Path(dig_parent_dir)\n extracted = {\"refs\": {}, \"hrefsToRefs\": {}}\n for split_page_num in [282, 283, 284]:\n split_page_dir = dig_parent_path_obj / \"dig/html/split\"\n refs_html = readfile(\n \"report\" + str(split_page_num) + \"b.html\", split_page_dir\n )\n data = extract_references_page(refs_html)\n extracted['refs'].update(data['refs'])\n extracted['hrefsToRefs'].update(data['hrefsToRefs'])\n\n return extracted", "def __get_relevant_pages__():\n try:\n assert type(relevant_pages) is set\n assert len(relevant_pages) > 0\n\n return relevant_pages\n except (NameError, AssertionError):\n return None", "def _knownrevs(repo, nodes):\n torev = repo.changelog.nodemap.get\n for n in nodes:\n rev = torev(n)\n if rev is not None:\n yield rev", "def load_revisions(self) -> Dict[str, DBRevision]:\n ret_revisions: Dict[str, DBRevision] = {}\n for revision_path in self.migrations.rglob(\"*.y?ml\"):\n revision_dict: Dict = yaml.load(revision_path.open(), Loader=yaml.SafeLoader)\n revision: DBRevision = DBRevision.from_yaml(revision_dict)\n if revision.active:\n ret_revisions[revision.revision_name] = revision\n\n return ret_revisions", "def populate_file_dict(epObject, uc, fileDict):\r\n fileDict = get_pages(epObject, fileDict)\r\n for url in fileDict['pageUrls']:\r\n soup = make_soup(url)\r\n fileDict = get_embedded_object(soup, fileDict, uc)\r\n fileDict = get_css(soup, fileDict)\r\n fileDict = get_img(soup, fileDict, uc)\r\n return fileDict", "def get_page_args():\n pages = {}\n for arg in request.args:\n re_match = re.findall(\"page_(.*)\", arg)\n if re_match:\n pages[re_match[0]] = int(request.args.get(arg))\n return pages", "def common_template_data(request, revision=None, mime_type=None):\n\n cfg = request.cfg\n\n # Initialize data dictionary members (sorted alphanumerically)\n data = TemplateData(\n {\n \"annotate_href\": None,\n \"cfg\": cfg,\n \"docroot\": (\n cfg.options.docroot is None\n and request.script_name + \"/\" + docroot_magic_path\n or cfg.options.docroot\n ),\n \"download_href\": None,\n \"download_text_href\": None,\n \"graph_href\": None,\n \"home_href\": request.script_name or \"/\",\n \"kv\": request.kv,\n \"lockinfo\": None,\n \"log_href\": None,\n \"nav_path\": nav_path(request),\n \"pathtype\": None,\n \"prefer_markup\": ezt.boolean(0),\n \"queryform_href\": None,\n \"rev\": None,\n \"revision_href\": None,\n \"rootname\": (request.rootname and request.server.escape(request.rootname) or None),\n \"rootpath\": request.rootpath,\n \"roots_href\": None,\n \"roottype\": request.roottype,\n \"rss_href\": None,\n \"tarball_href\": None,\n \"up_href\": None,\n \"username\": request.username,\n \"view\": _view_codes[request.view_func],\n \"view_href\": None,\n \"vsn\": __version__,\n \"where\": request.server.escape(request.where),\n }\n )\n\n rev = revision\n if not rev:\n rev = request.query_dict.get(\"annotate\")\n if not rev:\n rev = request.query_dict.get(\"revision\")\n if not rev and request.roottype == \"svn\":\n rev = request.query_dict.get(\"pathrev\")\n try:\n data[\"rev\"] = hasattr(request.repos, \"_getrev\") and request.repos._getrev(rev) or rev\n except vclib.InvalidRevision:\n raise ViewVCException(\"Invalid revision\", \"404 Not Found\")\n\n if request.pathtype == vclib.DIR:\n data[\"pathtype\"] = \"dir\"\n elif request.pathtype == vclib.FILE:\n data[\"pathtype\"] = \"file\"\n\n if request.path_parts:\n dir = _path_join(request.path_parts[:-1])\n data[\"up_href\"] = request.get_url(\n view_func=view_directory, where=dir, pathtype=vclib.DIR, params={}, escape=1\n )\n\n if \"roots\" in cfg.options.allowed_views:\n data[\"roots_href\"] = request.get_url(view_func=view_roots, escape=1, params={})\n\n if request.pathtype == vclib.FILE:\n fvi = get_file_view_info(request, request.where, data[\"rev\"], mime_type)\n data[\"view_href\"] = fvi.view_href\n data[\"download_href\"] = fvi.download_href\n data[\"download_text_href\"] = fvi.download_text_href\n data[\"annotate_href\"] = fvi.annotate_href\n data[\"revision_href\"] = fvi.revision_href\n data[\"prefer_markup\"] = fvi.prefer_markup\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n if request.roottype == \"cvs\" and cfg.options.use_cvsgraph:\n data[\"graph_href\"] = request.get_url(view_func=view_cvsgraph, params={}, escape=1)\n file_data = request.repos.listdir(request.path_parts[:-1], request.pathrev, {})\n entries = [item for item in file_data if item.name == request.path_parts[-1]]\n if len(entries) == 1:\n request.repos.dirlogs(request.path_parts[:-1], request.pathrev, entries, {})\n data[\"lockinfo\"] = entries[0].lockinfo\n elif request.pathtype == vclib.DIR:\n data[\"view_href\"] = request.get_url(view_func=view_directory, params={}, escape=1)\n if \"tar\" in cfg.options.allowed_views:\n data[\"tarball_href\"] = request.get_url(view_func=download_tarball, params={}, escape=1)\n if request.roottype == \"svn\":\n data[\"revision_href\"] = request.get_url(\n view_func=view_revision, params={\"revision\": data[\"rev\"]}, escape=1\n )\n\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n\n if is_querydb_nonempty_for_root(request):\n if request.pathtype == vclib.DIR:\n params = {}\n if request.roottype == \"cvs\" and request.pathrev:\n params[\"branch\"] = request.pathrev\n data[\"queryform_href\"] = request.get_url(\n view_func=view_queryform, params=params, escape=1\n )\n data[\"rss_href\"] = request.get_url(\n view_func=view_query, params={\"date\": \"month\", \"format\": \"rss\"}, escape=1\n )\n elif request.pathtype == vclib.FILE:\n parts = _path_parts(request.where)\n where = _path_join(parts[:-1])\n data[\"rss_href\"] = request.get_url(\n view_func=view_query,\n where=where,\n pathtype=request.pathtype,\n params={\"date\": \"month\", \"format\": \"rss\", \"file\": parts[-1], \"file_match\": \"exact\"},\n escape=1,\n )\n return data", "def __revision_list_and_max__(self, path_i):\n # | - __revision_list_and_max__\n if self.folders_exist:\n\n # dirs = os.listdir(os.path.join(self.working_dir, path_i))\n dirs = os.listdir(path_i)\n\n revision_dirs = [dir for dir in dirs if dir[0] == \"_\" and\n dir[-1].isdigit() and \" \" not in dir]\n\n # dir[1].isdigit() and \" \" not in dir]\n\n revision_dirs.sort()\n\n if len(revision_dirs) == 0:\n highest_rev = None\n else:\n highest_rev = max(\n [int(i.split(\"_\")[-1]) for i in revision_dirs],\n )\n\n return(revision_dirs, highest_rev)\n else:\n dummy_return = (\n [\"_1\"],\n 1,\n )\n\n return(dummy_return)\n # __|", "def walkthrough(software_map):\n\n for i in software_map:\n\n if not i[\"is_file\"]:\n\n # for each directory: make a index.md\n dname = \"./docs/\" + i[\"name\"]\n index = \"./docs/\" + i[\"name\"] + \"/index.md\"\n print(index)\n os.mkdir(dname)\n\n with open(index, \"w+\") as f:\n\n children = i[\"children\"]\n\n # list files\n f.write(\"Files:\\n\\n\")\n for i in children:\n if i[\"is_file\"]:\n\n fname = i[\"name\"]\n fext = fname.split(\".\")\n if len(fext) == 2:\n fext = fext[1]\n else:\n fext = \"none\"\n # for each file, note name and extension\n f.write(fname + \" : \" + fext + \"\\n\")\n\n # list subdirectories\n f.write(\"\\nSubdirectories:\\n\\n\")\n for i in children:\n if not i[\"is_file\"]:\n\n dirname = i[\"name\"]\n\n # note the number of files and subdirs in it\n num_files, num_dirs = 0, 0\n for child in i[\"children\"]:\n if child[\"is_file\"]:\n num_files += 1\n elif not child[\"is_file\"]:\n num_dirs += 1\n\n # note down name and numbers for each dir\n f.write(dirname + \" : \" + str(num_files) + \" files, \" +\n str(num_dirs) + \" directories\\n\")\n\n # goto subdir\n if len(i[\"children\"]) > 0:\n walkthrough(i[\"children\"])", "def extract_modified_file_repo(self, hash_list):\n\n print(\"Extract modified files\")\n return_dict = {}\n num_hash_list = len(hash_list)\n for idx, commit_hash in enumerate(hash_list):\n if idx%1000==0:\n print(\"{0}/{1}\".format(idx, num_hash_list))\n return_dict[commit_hash] = git_reader.get_all_modified_files(self.repo_dir, commit_hash)\n\n return return_dict", "def _get_pages():\n pages = {}\n\n # Create the root pages.\n for path in _get_paths():\n pages[path] = {}\n\n # Create the intl pages.\n for locale in locales:\n for path in _get_paths():\n pages[('/intl/' + locale + path)] = {}\n\n return pages" ]
[ "0.58713603", "0.5255621", "0.5184779", "0.5133159", "0.5087633", "0.50743926", "0.5060516", "0.50166947", "0.49424127", "0.49343994", "0.49080712", "0.48932", "0.48899907", "0.4886464", "0.48854086", "0.48630324", "0.48628467", "0.48229107", "0.48138183", "0.4796315", "0.4788132", "0.4779379", "0.47779608", "0.47694692", "0.47613135", "0.47581577", "0.47355118", "0.4720913", "0.4712491", "0.47073334" ]
0.7212066
0
r"""Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for linear models with L1 + L2 regularization. As global optimization objective is stronglyconvex, the optimizer optimizes the dual objective at each step. The optimizer applies each update one example at a time. Examples are sampled uniformly, and the optimizer is learning rate free and enjoys linear convergence rate. Proximal Stochastic Dual Coordinate Ascent, ShalevShwartz, Shai; Zhang, Tong.
def _sdca_optimizer(sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data, loss_type, l1, l2, num_loss_partitions, num_inner_iterations, adaptative=None, name=None): result = _op_def_lib.apply_op("SdcaOptimizer", sparse_example_indices=sparse_example_indices, sparse_feature_indices=sparse_feature_indices, sparse_feature_values=sparse_feature_values, dense_features=dense_features, example_weights=example_weights, example_labels=example_labels, sparse_indices=sparse_indices, sparse_weights=sparse_weights, dense_weights=dense_weights, example_state_data=example_state_data, loss_type=loss_type, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations, adaptative=adaptative, name=name) return _SdcaOptimizerOutput._make(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimize_sgd(beta, X, y, num_iterations, step_size):\n \n N = X.shape[0]\n P = X.shape[1]\n costs = []\n #variable step size\n if step_size == 'rm': #Robbins–Monro rule\n t0 = 2\n C = 1\n alpha = 0.5\n for i in range(num_iterations): \n j = random.randint(0,N-1) #Randomly sample a datapoint with replacement\n \n # Here I only pick a slice of X and an entry of y\n # To reuse our codes for standard GD, \n dbeta, cost = propagate(beta, X[j,:].reshape(1,P), y[j,:].reshape(1,1)) \n \n beta -= dbeta * C * ((num_iterations + t0)**(-alpha)) \n \n if i%1000 == 0:\n _, cost = propagate(beta, X, y) \n costs.append(cost.flatten())\n \n else: # constant step size\n step_size = float(step_size)\n for i in range(num_iterations):\n j = random.randint(0,N-1) #Randomly sample a datapoint with replacement\n \n # Here I only pick a slice of X and an entry of y\n # To reuse our codes for standard GD, \n dbeta, cost = propagate(beta, X[j,:].reshape(1,P), y[j,:].reshape(1,1)) \n \n beta -= dbeta * step_size\n \n if i%1000 == 0:\n _, cost = propagate(beta, X, y) \n costs.append(cost.flatten())\n \n \n return beta, costs", "def _make_spsa_optimizer(self):\n def optimize(maxiter: int = 1000,\n tol = None,\n save_steps: int = 1,\n c0: float = 0.62,\n c1: float = 0.1,\n c2: float = 0.602,\n c3: float = 0.101,\n c4: float = 0):\n \"\"\"\n This method is heavily based on qiskits optimizers.spsa method, \n adapted here to worth with on quibs tn's without exact gradients \n\n Parameters\n ----------\n maxiter: Maximum number of iterations to perform.\n tol : None or float stops optim if tol is reached (default none - completes all steps)\n save_steps: Save intermediate info every save_steps step. It has a min. value of 1.\n last_avg: Averaged parameters over the last_avg iterations.\n If last_avg = 1, only the last iteration is considered. It has a min. value of 1.\n c0: The initial a. Step size to update parameters.\n c1: The initial c. The step size used to approximate gradient.\n c2: The alpha in the paper, and it is used to adjust a (c0) at each iteration.\n c3: The gamma in the paper, and it is used to adjust c (c1) at each iteration.\n c4: The parameter used to control a as well.\n \n Returns\n -------\n TYPE : updated object? (same return as TNOptimize)\n \"\"\"\n _spsa_vars = [c0, c1, c2, c3, c4]\n theta = self.vectorizer.vector\n nb_params = len(theta)\n use_exact_grads = 'grads' in self._method\n \n if save_steps:\n theta_vec = [theta]\n cost_vec = [self.vectorized_value_and_grad(theta)[0]]\n \n \n pbar = tqdm(total=maxiter, disable=not self.progbar)\n def callback(_):\n pbar.clear()\n pbar.update()\n val = round(self.loss, 5)\n pbar.set_description(str(val))\n\n if self.loss_target is not None:\n if self.loss < self.loss_target:\n # returning True doesn't terminate optimization\n pbar.close()\n raise KeyboardInterrupt\n \n for ii in range(maxiter):\n \n a_spsa = float(_spsa_vars[0]) / ((ii + 1 + _spsa_vars[4])**_spsa_vars[2])\n c_spsa = float(_spsa_vars[1]) / ((ii + 1)**_spsa_vars[3])\n delta = 2 * randint(0, 2, size=nb_params) - 1\n # plus and minus directions\n \n if use_exact_grads:\n raise NotImplementedError('Will use grad calc to project on to SP-direction')\n else:\n theta_plus = theta + c_spsa * delta\n theta_minus = theta - c_spsa * delta\n\n cost_plus = self.vectorized_value_and_grad(theta_plus)[0]\n cost_minus = self.vectorized_value_and_grad(theta_minus)[0]\n # derivative estimate\n g_spsa = (cost_plus - cost_minus) * delta / (2.0 * c_spsa)\n # updated theta\n theta = theta - a_spsa * g_spsa\n \n callback(ii)\n \n if tol is not None:\n if (cost_plus + cost_minus)/2 < tol:\n pbar.close()\n break\n \n if save_steps:\n theta_vec.append(theta)\n cost_vec.append(cost_plus/2+cost_minus/2)\n \n \n result_dict = {'hyper_parameters':_spsa_vars,\n 'maxiter':maxiter,\n 'theta_opt':theta,\n 'cost_opt':self.vectorized_value_and_grad(theta)[0],\n 'grad_opt':self.vectorized_value_and_grad(theta)[1]}\n if save_steps:\n result_dict['theta_history'] = theta_vec\n result_dict['cost_history'] = cost_vec\n self.result_dict = result_dict\n pbar.close()\n\n return self.inject_res_vector_and_return_tn()\n return optimize", "def optimizer(self):\n return 'sgd'", "def sgd(cost, params, lr=1.0, alpha=0.1):\n grads = T.grad(cost=cost, wrt=params)\n updates = []\n for p, g in zip(params, grads):\n v = shared(p.get_value() * 0.)\n v_new = v * (1.0 - alpha) - alpha * lr * g\n updates.append((v, v_new))\n updates.append((p, p + v_new )) #+ T.sqrt(lr) / 600.0 * srnd.normal(v.shape, dtype=theano.config.floatX)\n \n return updates, norm_gs(params, grads)", "def DistributedOptimizer(optimizer, name=None,\n device_dense='', device_sparse='',\n compression=Compression.none,\n sparse_as_dense=False):\n return _impl.create_distributed_optimizer(keras, optimizer, name,\n device_dense, device_sparse, compression,\n sparse_as_dense)", "def dist_optimizer(config, optimizer):\n build_strategy, exec_strategy = create_strategy(config)\n\n dist_strategy = DistributedStrategy()\n dist_strategy.execution_strategy = exec_strategy\n dist_strategy.build_strategy = build_strategy\n\n dist_strategy.nccl_comm_num = 1\n dist_strategy.fuse_all_reduce_ops = True\n dist_strategy.fuse_grad_size_in_MB = 16\n optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)\n\n return optimizer", "def deep_slda_core50(override_args=None):\n args = create_default_args({'cuda': 0, 'feature_size': 512, 'batch_size': 512,\n 'shrinkage': 1e-4, 'plastic_cov': True, 'seed': None}, override_args)\n set_seed(args.seed)\n device = torch.device(f\"cuda:{args.cuda}\"\n if torch.cuda.is_available() and\n args.cuda >= 0 else \"cpu\")\n\n _mu = [0.485, 0.456, 0.406] # imagenet normalization\n _std = [0.229, 0.224, 0.225]\n transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=_mu,\n std=_std)\n ])\n\n benchmark = avl.benchmarks.CORe50(scenario='nc', train_transform=transform, eval_transform=transform)\n\n eval_plugin = avl.training.plugins.EvaluationPlugin(\n loss_metrics(epoch=True, experience=True, stream=True),\n accuracy_metrics(epoch=True, experience=True, stream=True),\n forgetting_metrics(experience=True, stream=True),\n loggers=[InteractiveLogger()]\n )\n\n criterion = torch.nn.CrossEntropyLoss()\n model = avl.models.SLDAResNetModel(device=device, arch='resnet18',\n imagenet_pretrained=True)\n\n cl_strategy = avl.training.StreamingLDA(model, criterion,\n args.feature_size, num_classes=50,\n eval_mb_size=args.batch_size,\n train_mb_size=args.batch_size,\n train_epochs=1,\n shrinkage_param=args.shrinkage,\n streaming_update_sigma=args.plastic_cov,\n device=device, evaluator=eval_plugin)\n\n warnings.warn(\n \"The Deep SLDA example is not perfectly aligned with \"\n \"the paper implementation since it does not use a base \"\n \"initialization phase and instead starts streming from \"\n \"pre-trained weights. Performance should still match.\")\n\n res = None\n for i, exp in enumerate(benchmark.train_stream):\n cl_strategy.train(exp)\n res = cl_strategy.eval(benchmark.test_stream)\n\n return res", "def AdaGrad(score_list, closure, batch_size, D, labels,\n max_epoch=100, init_step_size=None, linesearch_option=0, \n adaptive_termination=0, threshold_at=0.5,\n c=0.5, beta=0.7,\n x0=None, verbose=True, D_test=None, labels_test=None):\n n = D.shape[0]\n d = D.shape[1]\n \n m = int(n/batch_size)\n\n if x0 is None:\n x = np.zeros(d)\n x0 = np.zeros(d)\n elif isinstance(x0, np.ndarray) and x0.shape == (d,):\n x = x0.copy()\n else:\n raise ValueError('x0 must be a numpy array of size (d, )')\n\n num_grad_evals = 0\n Gk2 = 0\n \n if linesearch_option in [0]:\n step_size = init_step_size\n if linesearch_option in [1]:\n step_size = init_step_size / 2**(batch_size/n) \n \n condition_checked = False\n \n if adaptive_termination == 1:\n Gk2_list = np.zeros(max_epoch * m)\n iteration_counter = 0\n warmup_time = int(n / (batch_size))\n ratio_max = 0\n print(threshold_at, warmup_time)\n\n \n\n for k in range(max_epoch): \n # if num_grad_evals >= 2 * n * max_epoch:\n # # exceeds the number of standard SVRG gradient evaluations (only for batch-size = 1)\n # print('End of budget for gradient evaluations')\n # break\n t_start = time.time()\n\n\n\n loss, full_grad = closure(x, D, labels)\n \n if verbose:\n output = 'Epoch.: %d, Grad. norm: %.2e' % \\\n (k, np.linalg.norm(full_grad))\n output += ', Func. value: %e' % loss\n output += ', Step size: %e' % step_size\n output += ', Num gradient evaluations/n: %f' % (num_grad_evals / n) \n print(output) \n\n \n\n score_dict = {\"epoch\": k}\n score_dict[\"optimizer\"] = 0\n score_dict[\"n_grad_evals\"] = num_grad_evals\n score_dict[\"n_grad_evals_normalized\"] = num_grad_evals / n\n score_dict[\"train_loss\"] = loss\n score_dict[\"grad_norm\"] = np.linalg.norm(full_grad)\n score_dict[\"train_accuracy\"] = accuracy(x, D, labels)\n score_dict[\"train_loss_log\"] = np.log(loss)\n score_dict[\"grad_norm_log\"] = np.log(score_dict[\"grad_norm\"])\n # score_dict[\"train_accuracy_log\"] = np.log(score_dict[\"train_accuracy\"])\n if D_test is not None:\n test_loss = closure(x, D_test, labels_test, backwards=False)\n score_dict[\"test_loss\"] = test_loss\n score_dict[\"test_accuracy\"] = accuracy(x, D_test, labels_test)\n score_dict[\"test_loss_log\"] = np.log(test_loss)\n # score_dict[\"test_accuracy_log\"] = np.log(score_dict[\"test_accuracy\"])\n\n score_list += [score_dict]\n \n if np.linalg.norm(full_grad) <= 1e-10:\n break\n if np.linalg.norm(full_grad) >= 1e10:\n break\n if np.isnan(full_grad).any():\n break\n \n # Create Minibatches:\n minibatches = make_minibatches(n, m, batch_size)\n for i in range(m):\n # get the minibatch for this iteration\n indices = minibatches[i]\n Di, labels_i = D[indices, :], labels[indices]\n\n # compute the loss, gradients\n loss, x_grad = closure(x, Di, labels_i) \n gk = x_grad\n num_grad_evals = num_grad_evals + batch_size\n \n Gk2 = Gk2 + (np.linalg.norm(gk) ** 2)\n \n if linesearch_option == 0:\n step_size = init_step_size\n\n \n elif linesearch_option == 1:\n step_size, armijo_iter = armijo_ls(closure, Di, labels_i, x, loss,\n x_grad, x_grad, 2**(batch_size/n) * step_size, c=c, beta=beta,\n precond = 1)\n num_grad_evals += batch_size * armijo_iter\n \n if \"armijo_iter\" in score_list[len(score_list) - 1].keys(): \n score_list[len(score_list) - 1][\"armijo_iter\"] += armijo_iter\n else:\n score_list[len(score_list) - 1][\"armijo_iter\"] = armijo_iter\n \n if adaptive_termination == 1: \n if iteration_counter >= warmup_time:\n Gk2_list[iteration_counter] = Gk2\n if iteration_counter % 2 == 0: \n if iteration_counter/2 >= warmup_time:\n Gk2_last = Gk2_list[int(iteration_counter/2)] \n ratio = (Gk2 - Gk2_last) / Gk2_last\n #print(ratio)\n if ratio > ratio_max:\n ratio_max = ratio\n if ratio > threshold_at:\n x -= (step_size / np.sqrt(Gk2)) * gk\n print('Breaking out of inner loop at iteration', iteration_counter)\n condition_checked = True\n break\n iteration_counter += 1\n \n \n \n x -= (step_size / np.sqrt(Gk2)) * gk\n \n \n\n \n \n t_end = time.time()\n time_epoch = t_end - t_start\n score_list[len(score_list) - 1][\"time\"] = time_epoch \n if condition_checked:\n break\n \n return score_list, x, num_grad_evals, k", "def fit(zs, ys, L, lam_1, lam_2, rho=10, maxiter=100, verbose=True, warm_start=None,\n eps_abs = 1e-5, eps_rel = 1e-5):\n K = int(zs.max() + 1)\n N, n = ys.shape\n Ys, cts = [], []\n for i in range(K):\n idx = zs == i\n cts.append(idx.sum()) #N_i, number of samples per z\n ys_i = ys[idx]\n Ys.append(ys_i.T @ ys_i)\n \n if verbose:\n print (\"Fitting covariance stratified model.\")\n print (\"%d stratification values, %d data points, %d dimensions\" % (K, N, n))\n print (\"%d\" % (K * n * n), \"optimization variables\")\n print (\"lam_1 = %3.3e, lam_2 = %3.3e, rho = %3.3e, maxiter=%d\" % (lam_1, lam_2, rho, maxiter))\n print (\"count per stratification value:\", cts)\n print (Ys[0].shape)\n\n shape = (K, n, n)\n if warm_start is None:\n warm_start = []\n for _ in range(5):\n warm_start.append(np.zeros(shape))\n inv_covs_loss, inv_covs_reg, inv_covs_lapl, U_1, U_2 = warm_start\n \n solve = factorized(L.tocsc() + rho * sparse.eye(K, format='csc'))\n \n for _ in range(maxiter):\n # inv_covs_loss\n for i in range(K):\n if cts[i] == 0:\n inv_covs_loss[i] = (inv_covs_lapl[i] - U_1[i])\n continue\n w, v = np.linalg.eigh((rho/cts[i]) * (inv_covs_lapl[i] - U_1[i]) - Ys[i]/cts[i])\n w_new = (w*cts[i]/rho + np.sqrt((w*cts[i]/rho)**2 + 4*cts[i]/rho))/2\n inv_covs_loss[i] = v @ np.diag(w_new) @ v.T \n \n # inv_covs_reg\n for i in range(K):\n inv_covs_reg[i][np.arange(n), np.arange(n)] = np.diag(inv_covs_lapl[i] - U_2[i] - lam_1/rho) #diagonal elements\n \n st2 = soft_threshold(inv_covs_lapl[i] - U_2[i], lam_2 / rho)\n od_idx = np.where(~np.eye(n,dtype=bool)) #gets off_diags\n inv_covs_reg[i][od_idx] = st2[od_idx] \n \n # inv_covs_lapl\n rhs = (inv_covs_loss + inv_covs_reg) / 2 + (U_1 + U_2) / 2\n rhs *= rho\n inv_covs_lapl_new = solve(rhs.reshape(K, n*n)).reshape(shape)\n S = rho * np.repeat(inv_covs_lapl_new - inv_covs_lapl, 2, axis=0)\n inv_covs_lapl = inv_covs_lapl_new.copy()\n\n # U_1\n R_1 = inv_covs_loss - inv_covs_lapl\n U_1 += R_1\n \n # U_2\n R_2 = inv_covs_reg - inv_covs_lapl\n U_2 += R_2\n \n R = np.concatenate([R_1, R_2], axis=0)\n \n # stopping criterion\n eps_pri = np.sqrt(2 * K * n * n) * eps_abs + eps_rel * max(np.linalg.norm(np.concatenate([inv_covs_loss, inv_covs_reg], axis=0)),\n np.linalg.norm(np.repeat(inv_covs_lapl, 2, axis=0)))\n eps_dual = np.sqrt(K * n * n) * eps_abs + eps_rel * np.linalg.norm(np.concatenate([U_1, U_2], axis=0))\n if verbose:\n print (np.linalg.norm(R), np.linalg.norm(S), eps_pri, eps_dual)\n \n return inv_covs_loss, inv_covs_reg, inv_covs_lapl", "def ADMM_SGL(S, lambda1, Omega_0, Theta_0=np.array([]), X_0=np.array([]),\n rho=1., max_iter=1000, tol=1e-7, rtol=1e-4, stopping_criterion='boyd',\\\n update_rho=True, verbose=False, measure=False, latent=False, mu1=None):\n assert Omega_0.shape == S.shape\n assert S.shape[0] == S.shape[1]\n assert lambda1 > 0\n\n assert stopping_criterion in [\"boyd\", \"kkt\"]\n\n if latent:\n assert mu1 is not None\n assert mu1 > 0\n\n (p, p) = S.shape\n\n assert rho > 0, \"ADMM penalization parameter must be positive.\"\n\n # initialize\n Omega_t = Omega_0.copy()\n\n if len(Theta_0) == 0:\n Theta_0 = Omega_0.copy()\n if len(X_0) == 0:\n X_0 = np.zeros((p, p))\n\n Theta_t = Theta_0.copy()\n L_t = np.zeros((p, p))\n X_t = X_0.copy()\n\n runtime = np.zeros(max_iter)\n residual = np.zeros(max_iter)\n status = ''\n\n\n if verbose:\n print(\"------------ADMM Algorithm for Single Graphical Lasso----------------\")\n\n if stopping_criterion == 'boyd':\n hdr_fmt = \"%4s\\t%10s\\t%10s\\t%10s\\t%10s\"\n out_fmt = \"%4d\\t%10.4g\\t%10.4g\\t%10.4g\\t%10.4g\"\n print(hdr_fmt % (\"iter\", \"r_t\", \"s_t\", \"eps_pri\", \"eps_dual\"))\n elif stopping_criterion == 'kkt':\n hdr_fmt = \"%4s\\t%10s\"\n out_fmt = \"%4d\\t%10.4g\"\n print(hdr_fmt % (\"iter\", \"kkt residual\"))\n\n ##################################################################\n ### MAIN LOOP STARTS\n ##################################################################\n for iter_t in np.arange(max_iter):\n if measure:\n start = time.time()\n\n\n # Omega Update\n W_t = Theta_t - L_t - X_t - (1 / rho) * S\n eigD, eigQ = np.linalg.eigh(W_t)\n Omega_t_1 = Omega_t.copy()\n Omega_t = phiplus(beta=1 / rho, D=eigD, Q=eigQ)\n\n # Theta Update\n Theta_t = prox_od_1norm(Omega_t + L_t + X_t, (1 / rho) * lambda1)\n\n # L Update\n if latent:\n C_t = Theta_t - X_t - Omega_t\n # C_t = (C_t.T + C_t)/2\n eigD1, eigQ1 = np.linalg.eigh(C_t)\n L_t = prox_rank_norm(C_t, mu1/rho, D=eigD1, Q=eigQ1)\n\n # X Update\n X_t = X_t + Omega_t - Theta_t + L_t\n\n \n \n if measure:\n end = time.time()\n runtime[iter_t] = end - start\n\n # Stopping criterion\n if stopping_criterion == 'boyd':\n r_t,s_t,e_pri,e_dual = ADMM_stopping_criterion(Omega_t, Omega_t_1, Theta_t, L_t, X_t,\\\n S, rho, tol, rtol, latent)\n \n # update rho\n if update_rho:\n if r_t >= 10*s_t:\n rho_new = 2*rho\n elif s_t >= 10*r_t:\n rho_new = 0.5*rho\n else:\n rho_new = 1.*rho\n \n # rescale dual variables\n X_t = (rho/rho_new)*X_t\n rho = rho_new\n \n \n residual[iter_t] = max(r_t,s_t)\n\n if verbose:\n print(out_fmt % (iter_t,r_t,s_t,e_pri,e_dual))\n if (r_t <= e_pri) and (s_t <= e_dual):\n status = 'optimal'\n break\n\n elif stopping_criterion == 'kkt':\n eta_A = kkt_stopping_criterion(Omega_t, Theta_t, L_t, rho * X_t, S, lambda1, latent, mu1)\n residual[iter_t] = eta_A\n\n if verbose:\n print(out_fmt % (iter_t,eta_A))\n if eta_A <= tol:\n status = 'optimal'\n break\n\n\n ##################################################################\n ### MAIN LOOP FINISHED\n ##################################################################\n\n # retrieve status (partially optimal or max iter)\n if status != 'optimal':\n if stopping_criterion == 'boyd':\n if (r_t <= e_pri):\n status = 'primal optimal'\n elif (s_t <= e_dual):\n status = 'dual optimal'\n else:\n status = 'max iterations reached'\n else:\n status = 'max iterations reached'\n\n print(f\"ADMM terminated after {iter_t+1} iterations with status: {status}.\")\n\n ### CHECK FOR SYMMETRY\n if abs((Omega_t).T - Omega_t).max() > 1e-5:\n warnings.warn(f\"Omega variable is not symmetric, largest deviation is {abs((Omega_t).T - Omega_t).max()}.\")\n \n if abs((Theta_t).T - Theta_t).max() > 1e-5:\n warnings.warn(f\"Theta variable is not symmetric, largest deviation is {abs((Theta_t).T - Theta_t).max()}.\")\n \n if abs((L_t).T - L_t).max() > 1e-5:\n warnings.warn(f\"L variable is not symmetric, largest deviation is {abs((L_t).T - L_t).max()}.\")\n\n ### CHECK FOR POSDEF\n D = np.linalg.eigvalsh(Theta_t - L_t)\n if D.min() <= 0:\n print(\n f\"WARNING: Theta (Theta - L resp.) is not positive definite. Solve to higher accuracy! (min EV is {D.min()})\")\n\n if latent:\n D = np.linalg.eigvalsh(L_t)\n if D.min() < -1e-8:\n print(f\"WARNING: L is not positive semidefinite. Solve to higher accuracy! (min EV is {D.min()})\")\n\n if latent:\n sol = {'Omega': Omega_t, 'Theta': Theta_t, 'L': L_t, 'X': X_t}\n else:\n sol = {'Omega': Omega_t, 'Theta': Theta_t, 'X': X_t}\n\n if measure:\n info = {'status': status, 'runtime': runtime[:iter_t+1], 'residual': residual[:iter_t+1]}\n else:\n info = {'status': status}\n\n return sol, info", "def optimizer(grad, method, init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N):\r\n\r\n\t\r\n\tif grad == 'NO':\r\n\t\tif method == 'Powell' :\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'Nelder-Mead':\r\n\t\t\tres = opt.minimize(Ulike,init_par, method = method,\r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t options = {'ftol': 0.0001})\r\n\t\t\treturn res.x, res.nit\r\n\t\telif method == 'default':\r\n\t\t\tres = opt.minimize(Ulike,init_par, \r\n\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N))\r\n\t\t\treturn res.x, res.nit\r\n\r\n\telif grad == 'YES':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, \r\n \t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t options={'disp': True, 'maxiter': 4000, 'xtol': 1e-4})\r\n\t\treturn res.x, res.nit \r\n\t\t\t\r\n\t\t\r\n\telif grad == 'HESS':\r\n\t\tres = opt.minimize(Ulike, init_par, method = method, jac = stella_grad_full, hess = stella_hessian,\r\n\t\t\t\t\t args = (alpha, delta, plx_obs, mualpha_obs,mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoeff, N),\r\n\t\t\t\t\t options = {'disp': True, 'maxiter': 4000, 'xtol': 1.e-06}) \r\n\t\treturn res.x, res.nit", "def __init__(\n self,\n params,\n lr=required,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n scale=1,\n clip_norm=0,\n ):\n if lr is not required and lr < 0.:\n raise ValueError('Invalid learning rate: {}'.format(lr))\n if momentum < 0.:\n raise ValueError('Invalid momentum value: {}'.format(momentum))\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n nesterov=nesterov, weight_decay=weight_decay,\n scale=scale, clip_norm=clip_norm)\n if nesterov and (momentum <= 0. or dampening != 0.):\n raise ValueError('Nesterov momentum requires a momentum and zero dampening.')\n super(SGD, self).__init__(params, defaults)\n self._op_type = ('Nesterov' if nesterov else 'SGD') + 'Update'\n self._hyper = {\n 'lr': ('lr', collections.defaultdict(str)),\n 'momentum': ('momentum', collections.defaultdict(str)),\n 'weight_decay': ('weight_decay', collections.defaultdict(str)),\n 'scale': ('scale', collections.defaultdict(str)),\n 'clip_norm': ('clip_norm', collections.defaultdict(str)),\n }", "def calc_dense2(self):\n # Local copies of instance attributes for faster access\n b = self.b\n qs = self.q_sim\n qo = self.q_obs\n maxlead = self.max_lead\n maxlag = self.max_lag\n measure = self.measure\n time = self.time\n n = len(self.q_obs)\n\n if self.calc_rays:\n warnings.warn('Not possible to calculate connecting rays when using HMA.calc_dense2')\n if self.keep_internals:\n warnings.warn('Not possible to keep internals when using HMA.calc_dense2')\n\n for i_o in range(n):\n # Cycle cw arrays\n if i_o > 0:\n cw0_prev = cw0_cur\n cw1_prev = cw1_cur\n cw0_cur = np.full(n, np.inf, dtype=np.float64)\n cw1_cur = np.full(n, np.inf, dtype=np.float64)\n\n iss = max(0, i_o - maxlead) # sim index start\n ise = min(n, i_o + maxlag + 1) # sim index end\n isv = np.arange(iss, ise) # sim index as vector\n if time is not None:\n dt = time[isv] - time[i_o]\n # Heavily penalize sim-obs combinations outside allowed window\n # so that they are not selected\n dt[(dt < -maxlag) | (dt > maxlead)] = np.inf\n else:\n dt = isv - i_o\n if measure in ('nse', 'square'):\n e = (qs[iss:ise] - qo[i_o]) ** 2 + b ** 2 * dt ** 2\n elif measure in ('mae', 'abs'):\n e = np.abs(qs[iss:ise] - qo[i_o]) + b * np.abs(dt)\n\n if i_o == 0:\n # Only populate first column of cw0 and move to i_o = 1\n cw0_cur[iss:ise] = e\n continue\n\n # Repeat the same simulation point\n d = cw0_prev[iss:ise]\n cw1_cur[iss:ise] = e + d\n\n # Find the 'cheapest' available preceding sim point\n points = np.full((len(isv), 4), np.inf)\n # Use the previous simulation point\n st = max(0, iss-1)\n end = min(n + 1, ise - 1)\n l = end - st\n points[-l:, 0] = cw0_prev[st:end]\n points[-l:, 1] = cw1_prev[st:end]\n # Skip a simulation point\n st = max(0, iss - 2)\n end = min(n + 1, ise - 2)\n l = end - st\n points[-l:, 2] = cw0_prev[st:end]\n points[-l:, 3] = cw1_prev[st:end]\n cw0_cur[iss:ise] = (e + np.min(points, axis=1))\n\n # Find the cheapest point in the last column, i.e. end of cheapest path, i.e. optimum score\n self.opt_score = min(np.min(cw0_cur), np.min(cw1_cur))\n self.of = 1 - self.opt_score / self.fbench\n\n return self.of", "def sgdmgc(cost, params, lr=1.0, alpha=0.1, max_magnitude=5.0, infDecay=0.1):\n grads = T.grad(cost=cost, wrt=params)\n updates = []\n\n norm = norm_gs(params, grads)\n sqrtnorm = T.sqrt(norm)\n not_finite = T.or_(T.isnan(sqrtnorm), T.isinf(sqrtnorm))\n adj_norm_gs = T.switch(T.ge(sqrtnorm, max_magnitude), max_magnitude / sqrtnorm, 1.)\n\n for p, g in zip(params, grads):\n v = shared(p.get_value() * 0.)\n g = T.switch(not_finite, infDecay * p, g * adj_norm_gs)\n v_new = v * (1.0 - alpha) - alpha * lr * g\n updates.append((v, v_new))\n updates.append((p, p + v_new ))\n \n return updates, norm", "def optimize(maxiter: int = 1000,\n tol = None,\n save_steps: int = 1,\n c0: float = 0.62,\n c1: float = 0.1,\n c2: float = 0.602,\n c3: float = 0.101,\n c4: float = 0):\n _spsa_vars = [c0, c1, c2, c3, c4]\n theta = self.vectorizer.vector\n nb_params = len(theta)\n use_exact_grads = 'grads' in self._method\n \n if save_steps:\n theta_vec = [theta]\n cost_vec = [self.vectorized_value_and_grad(theta)[0]]\n \n \n pbar = tqdm(total=maxiter, disable=not self.progbar)\n def callback(_):\n pbar.clear()\n pbar.update()\n val = round(self.loss, 5)\n pbar.set_description(str(val))\n\n if self.loss_target is not None:\n if self.loss < self.loss_target:\n # returning True doesn't terminate optimization\n pbar.close()\n raise KeyboardInterrupt\n \n for ii in range(maxiter):\n \n a_spsa = float(_spsa_vars[0]) / ((ii + 1 + _spsa_vars[4])**_spsa_vars[2])\n c_spsa = float(_spsa_vars[1]) / ((ii + 1)**_spsa_vars[3])\n delta = 2 * randint(0, 2, size=nb_params) - 1\n # plus and minus directions\n \n if use_exact_grads:\n raise NotImplementedError('Will use grad calc to project on to SP-direction')\n else:\n theta_plus = theta + c_spsa * delta\n theta_minus = theta - c_spsa * delta\n\n cost_plus = self.vectorized_value_and_grad(theta_plus)[0]\n cost_minus = self.vectorized_value_and_grad(theta_minus)[0]\n # derivative estimate\n g_spsa = (cost_plus - cost_minus) * delta / (2.0 * c_spsa)\n # updated theta\n theta = theta - a_spsa * g_spsa\n \n callback(ii)\n \n if tol is not None:\n if (cost_plus + cost_minus)/2 < tol:\n pbar.close()\n break\n \n if save_steps:\n theta_vec.append(theta)\n cost_vec.append(cost_plus/2+cost_minus/2)\n \n \n result_dict = {'hyper_parameters':_spsa_vars,\n 'maxiter':maxiter,\n 'theta_opt':theta,\n 'cost_opt':self.vectorized_value_and_grad(theta)[0],\n 'grad_opt':self.vectorized_value_and_grad(theta)[1]}\n if save_steps:\n result_dict['theta_history'] = theta_vec\n result_dict['cost_history'] = cost_vec\n self.result_dict = result_dict\n pbar.close()\n\n return self.inject_res_vector_and_return_tn()", "def solve_SVM_dual_SMO(x_train, y_train, x_test, C=1):\n n, d = x_train.shape[0], x_train.shape[1]\n alpha = np.zeros((n))\n count = 0\n while True:\n count += 1\n alpha_prev = np.copy(alpha)\n for j in range(0, n):\n # Getting random int i!=j\n i = j\n cnt=0\n while i == j and cnt<1000:\n i = rnd.randint(0,n-1)\n cnt=cnt+1\n x_i, x_j, y_i, y_j = x_train[i,:], x_train[j,:], y_train[i], y_train[j]\n k_ij = (np.dot(x_i, x_i.T)) + (np.dot(x_j, x_j.T) ) - (2 * np.dot(x_i, x_j.T))\n if k_ij <= 0:\n continue\n alpha_prime_j, alpha_prime_i = alpha[j], alpha[i]\n if(y_i != y_j):\n (L,H) = (max(0, alpha_prime_j - alpha_prime_i), min(C, C - alpha_prime_i + alpha_prime_j))\n else:\n (L,H) = (max(0, alpha_prime_i + alpha_prime_j - C), min(C, alpha_prime_i + alpha_prime_j))\n if(L==H):\n continue\n # Computing model parameters\n w = np.dot(x_train.T, np.multiply(alpha,y_train))\n b = np.mean(y_train - np.dot(w.T, x_train.T))\n E_i = np.sign(np.dot(w.T, x_i.T) + b).astype(int) - y_i\n E_j = np.sign(np.dot(w.T, x_j.T) + b).astype(int) - y_j\n # Setting new alpha values(Lagrange multipliers)\n alpha[j] = alpha_prime_j + float(y_j * (E_i - E_j))/k_ij\n alpha[j] = max(alpha[j], L)\n alpha[j] = min(alpha[j], H)\n alpha[i] = alpha_prime_i + y_i*y_j * (alpha_prime_j - alpha[j])\n # Checking for convergence\n diff = np.linalg.norm(alpha - alpha_prev)\n if diff < 0.000000001:\n break\n # Computing weights and bias\n b = np.mean(y_train-np.dot(w.T,x_train.T))\n w = np.dot(x_train.T, np.multiply(alpha,y_train))\n y_pred_test = (np.sign(np.dot(w.T, x_test.T) + b).astype(int))\n return (y_pred_test,alpha)", "def solve(self):\n \n # Check if cost is available for both estimators\n if not self.est0.cost_avail or not self.est1.cost_avail:\n self.comp_cost = False\n \n # Initial estimate from the input node\n if self.comp_cost:\n z0, zvar0, cost0 = self.est0.est_init(return_cost=True)\n else:\n z0, zvar0 = self.est0.est_init(return_cost=False)\n cost0 = 0\n self.z0 = z0\n self.zvar0 = zvar0\n self.cost0 = cost0\n \n # Initialize other variables\n self.var_cost0 = 0\n self.var_cost1 = 0\n self.cost = 0\n self.s = np.zeros(self.shape1)\n \n for it in range(self.nit):\n \n # Forward transform to est1\n t0 = time.time()\n rvar1_new = self.A.var_dot(self.zvar0)\n rvar1_rep = common.repeat_axes(rvar1_new,self.shape1,\\\n self.var_axes1,rep=False)\n z1_mult = self.A.dot(self.z0)\n r1_new = z1_mult - rvar1_rep*self.s\n \n # Damping\n if it > 0: \n self.r1 = (1-self.step)*self.r1 + self.step*r1_new\n self.rvar1 = (1-self.step)*self.rvar1 + self.step*rvar1_new\n else:\n self.r1 = r1_new\n self.rvar1 = rvar1_new\n\n # Estimator 1 \n if self.comp_cost: \n z1, zvar1, cost1 = self.est1.est(self.r1, self.rvar1, return_cost=True) \n if not self.map_est:\n cost1 -= self.cost_adjust(self.r1,z1,self.rvar1,zvar1,\\\n self.shape1,self.var_axes1)\n else:\n z1, zvar1 = self.est1.est(self.r1, self.rvar1, return_cost=False) \n cost1 = 0\n self.z1 = z1\n self.zvar1 = zvar1\n self.cost1 = cost1 \n con_new = np.mean(np.abs(z1-z1_mult)**2) \n \n # Reverse nonlinear transform to est 0\n self.s = (self.z1-self.r1)/rvar1_rep\n self.sprec = 1/self.rvar1*(1-self.zvar1/self.rvar1)\n t1 = time.time()\n self.time_est1 = t1-t0\n \n # Reverse linear transform to est 0 \n rvar0_new = 1/self.A.var_dotH(self.sprec)\n rvar0_rep = common.repeat_axes(rvar0_new,self.shape0,\\\n self.var_axes0,rep=False)\n r0_new = self.z0 + rvar0_rep*self.A.dotH(self.s)\n \n # Damping\n if it > 0:\n self.r0 = (1-self.step)*self.r0 + self.step*r0_new\n self.rvar0 = (1-self.step)*self.rvar0 + self.step*rvar0_new\n else:\n self.r0 = r0_new\n self.rvar0 = rvar0_new\n \n \n # Estimator 0\n if self.comp_cost:\n z0, zvar0, cost0 = self.est0.est(self.r0, self.rvar0, return_cost=True)\n if not self.map_est:\n cost0 -= self.cost_adjust(self.r0,z0,self.rvar0,zvar0,\\\n self.shape0,self.var_axes0)\n \n else:\n z0, zvar0 = self.est0.est(self.r0, self.rvar0, return_cost=False)\n cost0 = 0\n self.z0 = z0\n self.zvar0 = zvar0\n self.cost0 = cost0 \n\n \n # Compute total cost and constraint \n cost_new = self.cost0 + self.cost1 \n if not self.map_est:\n cost_new += self.cost_gauss()\n \n # Step size adaptation\n if (self.step_adapt) and (it > 0):\n if (con_new < self.con):\n self.step = np.minimum(1,self.step_inc*self.step)\n else:\n self.step = np.maximum(self.step_min, self.step_dec*self.step)\n self.cost=cost_new\n self.con=con_new\n \n t2 = time.time()\n self.time_est0 = t2-t1\n self.time_iter = t2-t0\n \n # Print progress\n if self.prt_period > 0:\n if (it % self.prt_period == 0):\n if self.comp_cost:\n print(\"it={0:4d} cost={1:12.4e} con={2:12.4e} step={3:12.4e}\".format(\\\n it, self.cost, self.con, self.step))\n else:\n print(\"it={0:4d} con={1:12.4e}\".format(\\\n it, self.con))\n \n # Save history\n self.save_hist()", "def loss_sparse_dist(model, inputs_a, inputs_b, inputs_pairinfo, **kwargs):\n device = next(model.parameters()).device\n batch_size = len(inputs_a)\n\n scores = model.run(inputs_a, inputs_b)\n\n r_size = model.r_size\n\n # Generate sparse distribution for input batch\n sparse = torch.zeros(batch_size, r_size, device=device).detach()\n # FIXME: assertion that score is 0~5 range\n MAX_SCORE = 5\n for i, score in enumerate(inputs_pairinfo):\n score = score['sts']\n if score == MAX_SCORE:\n sparse[i, -1] = 1\n else:\n floor = int(score / MAX_SCORE * (r_size-1)) \n ceil = floor + 1\n weight = score / MAX_SCORE * (r_size-1) - floor\n if weight < 1e-4:\n # On the grid\n sparse[i, floor] = 1\n else:\n # Between the grid\n sparse[i, ceil] = weight\n sparse[i, floor] = 1 - weight\n\n # Calculate Kullback-Liebler Divergence loss for sparce distribution\n # Note that KLDivLoss does not support batch-first shaping.\n loss = nn.KLDivLoss(reduction='batchmean')(scores, sparse)\n\n return loss", "def optimization_step(self):\n \n if \"CSS\" in self.algorithm:\n \n input_dict = {self.x: self.train_inputs[self.minibatch_set,:]}\n \n var_list = [self.x_tilda, self.minibatch_set]\n \n if (self.num_samples > 0) and (not self.mixture):\n \n if ((self.mf_steps > 0) and self.alpha >0) or\\\n self.gibbs_steps > 0: \n \n var_list.append(self.sampler_theta)\n \n elif \"CD\" in self.algorithm:\n \n input_dict = {self.x : self.train_inputs[self.minibatch_set,:]} \n \n var_list = [self.minibatch_set]\n \n var_list.append(self.learning_rate)\n \n if self.use_momentum:\n \n var_list.append(self.momentum)\n \n output_vars = [self.pseudo_cost]\n \n if self.report_p_tilda:\n \n output_vars.append(self.p_tilda)\n \n else:\n \n output_vars.append(theano.shared(0))\n \n opt_step = theano.function(inputs = var_list,\n outputs = output_vars,\n updates = self.updates,\n givens = input_dict,\n on_unused_input='warn')\n \n return opt_step", "def test_determinism_2():\n\n def run_sgd(mode):\n # Must be seeded the same both times run_sgd is called\n disturb_mem.disturb_mem()\n rng = np.random.RandomState([2012, 11, 27])\n\n batch_size = 5\n train_batches = 3\n valid_batches = 4\n num_features = 2\n\n # Synthesize dataset with a linear decision boundary\n w = rng.randn(num_features)\n\n def make_dataset(num_batches):\n disturb_mem.disturb_mem()\n m = num_batches*batch_size\n X = rng.randn(m, num_features)\n y = np.zeros((m, 1))\n y[:, 0] = np.dot(X, w) > 0.\n\n rval = DenseDesignMatrix(X=X, y=y)\n\n rval.yaml_src = \"\" # suppress no yaml_src warning\n\n X = rval.get_batch_design(batch_size)\n assert X.shape == (batch_size, num_features)\n\n return rval\n\n train = make_dataset(train_batches)\n valid = make_dataset(valid_batches)\n\n num_chunks = 10\n chunk_width = 2\n\n class ManyParamsModel(Model):\n \"\"\"\n Make a model with lots of parameters, so that there are many\n opportunities for their updates to get accidentally re-ordered\n non-deterministically. This makes non-determinism bugs manifest\n more frequently.\n \"\"\"\n\n def __init__(self):\n super(ManyParamsModel, self).__init__()\n self.W1 = [sharedX(rng.randn(num_features, chunk_width)) for i\n in xrange(num_chunks)]\n disturb_mem.disturb_mem()\n self.W2 = [sharedX(rng.randn(chunk_width))\n for i in xrange(num_chunks)]\n self._params = safe_union(self.W1, self.W2)\n self.input_space = VectorSpace(num_features)\n self.output_space = VectorSpace(1)\n\n disturb_mem.disturb_mem()\n model = ManyParamsModel()\n disturb_mem.disturb_mem()\n\n class LotsOfSummingCost(Cost):\n \"\"\"\n Make a cost whose gradient on the parameters involves summing many\n terms together, so that T.grad is more likely to sum things in a\n random order.\n \"\"\"\n\n supervised = True\n\n def expr(self, model, data, **kwargs):\n self.get_data_specs(model)[0].validate(data)\n X, Y = data\n disturb_mem.disturb_mem()\n\n def mlp_pred(non_linearity):\n Z = [T.dot(X, W) for W in model.W1]\n H = [non_linearity(z) for z in Z]\n Z = [T.dot(h, W) for h, W in safe_izip(H, model.W2)]\n pred = sum(Z)\n return pred\n\n nonlinearity_predictions = map(mlp_pred,\n [T.nnet.sigmoid,\n T.nnet.softplus,\n T.sqr,\n T.sin])\n pred = sum(nonlinearity_predictions)\n disturb_mem.disturb_mem()\n\n return abs(pred-Y[:, 0]).sum()\n\n def get_data_specs(self, model):\n data = CompositeSpace((model.get_input_space(),\n model.get_output_space()))\n source = (model.get_input_source(), model.get_target_source())\n return (data, source)\n\n cost = LotsOfSummingCost()\n\n disturb_mem.disturb_mem()\n\n algorithm = SGD(cost=cost,\n batch_size=batch_size,\n learning_rule=Momentum(.5),\n learning_rate=1e-3,\n monitoring_dataset={'train': train, 'valid': valid},\n update_callbacks=[ExponentialDecay(decay_factor=2.,\n min_lr=.0001)],\n termination_criterion=EpochCounter(max_epochs=5))\n\n disturb_mem.disturb_mem()\n\n train_object = Train(dataset=train,\n model=model,\n algorithm=algorithm,\n extensions=[PolyakAveraging(start=0),\n MomentumAdjustor(final_momentum=.9,\n start=1,\n saturate=5), ],\n save_freq=0)\n\n disturb_mem.disturb_mem()\n\n train_object.main_loop()\n\n output = cStringIO()\n record = Record(file_object=output, replay=False)\n record_mode = RecordMode(record)\n\n run_sgd(record_mode)\n\n output = cStringIO(output.getvalue())\n playback = Record(file_object=output, replay=True)\n playback_mode = RecordMode(playback)\n\n run_sgd(playback_mode)", "def sgd_optim(config = None, global_step = None):\n learning_rate = config[\"learning_rate\"]\n \n train_step = tf.train.GradientDescentOptimizer(learning_rate)\n #train_step = tf.train.GradientDescentOptimizer(learning_rate)\n return train_step", "def td_sarsa(env, iterations=1000, gamma=0.9, alpha=0.1):\n\n nS = env.nS # number of states\n nA = env.nA # number of actions\n Q_value = np.zeros((nS, nA))\n policy = np.ones((env.nS,env.nA))/env.nA\n epsilon = 1\n s_t1 = env.reset() # reset the environment and place the agent in the start square\n a_t1 = sample_action(policy, s_t1)\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Don't forget to decay epsilon according to GLIE\n \n curr_state = s_t1\n curr_action = a_t1\n \n start = time.time() # to time how long convergence takes\n print(\"---TD SARSA---\\nTraining Started.\")\n \n for k in range (1, iterations):\n # if (k%10000) == 0:\n # print(\"Now playing iteration: \", k)\n epsilon = 1/k\n curr_action, reward, new_state, done = take_one_step(env, policy, curr_state)\n new_action = sample_action(policy, new_state)\n Q_value[curr_state, curr_action] = Q_value[curr_state, curr_action] + alpha * (reward + gamma * Q_value[new_state, new_action] - Q_value[curr_state, curr_action])\n \n # epsilon-greedy policy update\n Q_list = np.argwhere(Q_value[curr_state] == np.amax(Q_value[curr_state])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n for a in range (nA):\n if a == max_Q:\n policy[curr_state][a] = epsilon/nA + (1 - epsilon) # for the chosen maximal index of Q, set the policy to epsilon/m + 1 - epsilon\n else:\n policy[curr_state][a] = epsilon/nA # epsilon / 3\n \n # print(\"Q_value = {0}\".format(Q_value))\n # print(\"policy = {0}\".format(policy))\n \n if done:\n curr_state = env.reset() # reset the environment and place the agent in the start square\n curr_action = sample_action(policy, curr_state)\n else:\n curr_state = new_state\n curr_action = new_action\n \n stop = time.time()\n print(\"Training Completed.\")\n print(\"It took: {0} iterations and {1} minutes\".format(k,(stop-start)/60))\n \n ############################\n det_policy = np.argmax(Q_value, axis=1)\n return Q_value, det_policy", "def sgd_optimization(dataset, learning_rate, n_epochs, batch_size):\n datasets = load_data(dataset)\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n #number of minibatches\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\n\n #build the model\n print \"... building the model\"\n\n index = T.lscalar()\n x = T.matrix('x') #data for the rasterized images\n y = T.ivector('y') # labels (int)\n\n # logistic regression Class\n classifierLR = LogisticRegression(input=x, n_in=28*28, n_out=10)\n cost = classifierLR.negative_log_likelihood(y)\n\n # test model (no updates)\n test_model = theano.function(\n inputs=[index],\n outputs=classifierLR.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #validate model (no updates)\n validate_model = theano.function(\n inputs=[index],\n outputs=classifierLR.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n #compute the gradient of cost wrt W, b\n g_W = T.grad(cost=cost, wrt=classifierLR.W)\n g_b = T.grad(cost=cost, wrt=classifierLR.b)\n\n #updating expression\n updates = [(classifierLR.W, classifierLR.W - learning_rate * g_W),\n (classifierLR.b, classifierLR.b - learning_rate * g_b)]\n\n # Train model (theano function); updates\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n\n }\n )\n\n # Training model (early stopping with validation examples)\n print \"... training the model\"\n patience = 5000\n patience_inc = 2 # wait this much\n improved_threshold = 0.995 # relative improvement (significant)\n validation_frequency = min(n_train_batches, patience / 2)\n best_validation_loss = numpy.inf\n test_score = 0.\n start_time = timeit.default_timer()\n\n done_looping = False\n epoch = 0\n while (epoch < n_epochs) and (not done_looping):\n epoch += 1\n for minibatch_index in xrange(n_train_batches):\n minibatch_avg_cost = train_model(minibatch_index)\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute loss on validation set\n validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n\n print(\n \"Epoch: %i, minibatch: %i/%i, validation_error: %f %%\" %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n if this_validation_loss < best_validation_loss:\n #improve patience if good improvement\n if this_validation_loss < best_validation_loss * improved_threshold:\n patience = max(patience, iter * patience_inc)\n\n best_validation_loss = this_validation_loss\n\n #testing on test_set\n test_losses = [test_model(i) for i in xrange(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print(\n (\n \"Epoch : %i, minibatch %i/%i,\"\n \" test error of best model %f %%\"\n ) % (\n epoch,\n minibatch_index,\n n_train_batches,\n test_score * 100.\n )\n )\n\n #save the best model\n print \"New best model found; saving ...\"\n with open('best_model.pkl', \"w\") as f:\n cPickle.dump(classifierLR, f)\n\n if patience <= iter:\n done_looping = True\n break\n\n\n end_time = timeit.default_timer()\n print(\n (\n \"Optimization Complete: best validation score : %f %%,\"\n \" test performance : %f %%\"\n )\n % (best_validation_loss * 100., test_score * 100.)\n )\n print \"The code run for %d epochs, with %f epochs/sec\" %(epoch, 1. * epoch / (end_time - start_time))\n print >> sys.stderr, (\"The code for file \" + os.path.split(__file__)[1] + \" ran for %.1fs\" % ((end_time - start_time)))", "def cs_grad_estimator(self, test_batch_size=None):\n\n # Get a session.\n sess = self.sess\n # Initialize variables.\n try:\n tf.global_variables_initializer().run()\n except:\n tf.initialize_all_variables().run()\n\n # Set CS example batch size.\n if test_batch_size is not None:\n self.cs_bsize = test_batch_size\n else:\n if not self.cs_m_and_orig:\n self.cs_bsize = FLAGS.cs_batch_size\n else:\n self.cs_bsize = FLAGS.cs_batch_size + FLAGS.batch_size # When original images are used.\n\n # Create the generator.\n if self.contrastive_learning:\n self.cs_input_labels = tf.placeholder(tf.int32, shape=(self.cs_bsize), name='cs_input_labels')\n\n # The z latent variable.\n self.z_batch = tf.Variable(tf.random_normal([self.cs_bsize * FLAGS.cs_num_random_restarts, self.z_dim]),\n name='z_batch')\n\n # Regularizer.\n self.zp_loss_batch = tf.reduce_mean(self.z_batch ** 2, 1)\n\n # Placeholder for initializing z.\n self.z_init_pl = tf.placeholder(tf.float32, [self.cs_bsize * FLAGS.cs_num_random_restarts, self.z_dim])\n self.z_init_op = tf.assign(self.z_batch, self.z_init_pl)\n\n cs_batch_size = self.cs_bsize * FLAGS.cs_num_random_restarts\n self.cs_batch_size = cs_batch_size\n\n # CS loss (The same function is used for encoder input).\n self.cs_loss(self.z_batch, cs_batch_size=cs_batch_size)\n\n # Set up gradient descent optimizer.\n global_step = tf.Variable(0, trainable=False)\n self.cs_learning_rate = get_cs_learning_rate(global_step)\n with tf.variable_scope(tf.get_variable_scope(), reuse=False):\n opt = tf.train.AdamOptimizer(self.cs_learning_rate)\n self.cs_update_op = opt.minimize(self.cs_total_loss, var_list=[self.z_batch], global_step=global_step,\n name='update_op')\n\n # Get optimizer variables to re-initialize before each estimator call.\n uninitialized_vars = []\n for var in tf.global_variables():\n try:\n sess.run(var)\n except tf.errors.FailedPreconditionError:\n print '[WARNING] UNINIT {}'.format(var.op.name)\n uninitialized_vars.append(var)\n self.cs_initialize_op = tf.variables_initializer(uninitialized_vars)", "def getCsnnOptimizerOp(self, global_step):\n # Define loss and optimizer\n csnn_optimizer, _ = self.__csnn.applySomLearning(global_step, self.__num_gpus)\n return csnn_optimizer", "def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history", "def __init__(self, optimizer):\n super(ShardedOptimizer, self).__init__(optimizer, name=\"ShardedOptimizer\")", "def __init__(self,\n weight_decay,\n global_step,\n max_matrix_size=768,\n gbar_decay=0.0,\n gbar_weight=1.0,\n mat_gbar_decay=1.0,\n mat_gbar_weight=1.0,\n learning_rate=1.0,\n svd_interval=1,\n precond_update_interval=1,\n epsilon=1e-4,\n alpha=0.5,\n use_iterative_root=False,\n use_locking=False,\n name=\"ShampooW\"):\n super(ShampooWOptimizer, self).__init__(\n weight_decay,\n global_step=global_step,\n max_matrix_size=max_matrix_size,\n gbar_decay=gbar_decay,\n gbar_weight=gbar_weight,\n mat_gbar_decay=mat_gbar_weight,\n learning_rate=learning_rate,\n svd_interval=svd_interval,\n precond_update_interval=precond_update_interval,\n epsilon=epsilon,\n alpha=alpha,\n use_iterative_root=use_iterative_root,\n use_locking=use_locking,\n name=name)", "def _batch_descent_on_samples(self, positivity_state_samples_all,\n derivative_state_samples_all, optimizer,\n positivity_state_repeatition,\n derivative_state_repeatition,\n options: AdversarialTrainingOptions):\n derivative_state_samples_next_all =\\\n self.lyapunov_hybrid_system.system.step_forward(\n derivative_state_samples_all)\n positivity_sample_initial_loss, derivative_sample_initial_loss = \\\n self.sample_loss(\n positivity_state_samples_all,\n derivative_state_samples_all,\n derivative_state_samples_next_all,\n self.lyapunov_positivity_sample_cost_weight,\n self.lyapunov_derivative_sample_cost_weight,\n positivity_state_repeatition,\n derivative_state_repeatition)\n best_loss = positivity_sample_initial_loss +\\\n derivative_sample_initial_loss\n best_training_params = self._get_current_training_params()\n if self.output_flag:\n print(\"Before training, positivity_sample_loss \" +\n f\"{positivity_sample_initial_loss.item()}, \" +\n \"derivative_sample_loss \" +\n f\"{derivative_sample_initial_loss.item()}\")\n positivity_dataset = torch.utils.data.TensorDataset(\n positivity_state_samples_all, positivity_state_repeatition)\n derivative_dataset = torch.utils.data.TensorDataset(\n derivative_state_samples_all, derivative_state_repeatition)\n # TODO(hongkai.dai): currently by using batch_size, I don't guarantee\n # to get options.num_batches batches in the dataset. Write a customized\n # loader later.\n positivity_loader = torch.utils.data.DataLoader(\n positivity_dataset,\n batch_size=int(\n np.ceil(len(positivity_dataset) / options.num_batches)),\n shuffle=True)\n derivative_loader = torch.utils.data.DataLoader(\n derivative_dataset,\n batch_size=int(\n np.ceil(len(derivative_dataset) / options.num_batches)),\n shuffle=True)\n for epoch in range(options.num_epochs_per_mip):\n it_positivity_samples = iter(positivity_loader)\n it_derivative_samples = iter(derivative_loader)\n for i in range(\n np.min((len(positivity_loader), len(derivative_loader)))):\n optimizer.zero_grad()\n positivity_state_batch, positivity_state_repeatition_batch =\\\n next(it_positivity_samples)\n derivative_state_batch, derivative_state_repeatition_batch =\\\n next(it_derivative_samples)\n derivative_state_next_batch = \\\n self.lyapunov_hybrid_system.system.step_forward(\n derivative_state_batch)\n positivity_sample_loss, derivative_sample_loss = \\\n self.sample_loss(\n positivity_state_batch, derivative_state_batch,\n derivative_state_next_batch,\n self.lyapunov_positivity_sample_cost_weight,\n self.lyapunov_derivative_sample_cost_weight,\n positivity_state_repeatition_batch,\n derivative_state_repeatition_batch)\n batch_loss = positivity_sample_loss +\\\n derivative_sample_loss\n batch_loss.backward()\n optimizer.step()\n\n derivative_state_samples_next_all =\\\n self.lyapunov_hybrid_system.system.step_forward(\n derivative_state_samples_all)\n positivity_sample_epoch_loss, derivative_sample_epoch_loss = \\\n self.sample_loss(\n positivity_state_samples_all,\n derivative_state_samples_all,\n derivative_state_samples_next_all,\n self.lyapunov_positivity_sample_cost_weight,\n self.lyapunov_derivative_sample_cost_weight,\n positivity_state_repeatition,\n derivative_state_repeatition)\n if self.output_flag:\n print(f\"epoch {epoch}, positivity_sample_loss \" +\n f\"{positivity_sample_epoch_loss.item()}, \" +\n \"derivative_sample_loss \" +\n f\"{derivative_sample_epoch_loss.item()}\")\n if positivity_sample_epoch_loss == 0. and\\\n derivative_sample_epoch_loss == 0.:\n return\n if positivity_sample_epoch_loss + derivative_sample_epoch_loss <\\\n best_loss:\n best_training_params = self._get_current_training_params()\n best_loss = positivity_sample_epoch_loss +\\\n derivative_sample_epoch_loss\n # End of training, set the training parameters to the one\n # corresponding to the best loss\n self._set_training_params(best_training_params)\n pass", "def testSecondDerivatives(self):\n problem = problems.simple()\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options={\"layers\": ()}))\n minimize_ops = optimizer.meta_minimize(problem, 3,\n second_derivatives=True)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n train(sess, minimize_ops, 1, 2)" ]
[ "0.58430123", "0.57700384", "0.559415", "0.55526096", "0.5507653", "0.5500585", "0.54425055", "0.5425867", "0.5423508", "0.5412528", "0.54118615", "0.5398304", "0.53416395", "0.534044", "0.53132707", "0.52848023", "0.5283411", "0.5282129", "0.5275689", "0.5273703", "0.52306956", "0.5222955", "0.51928484", "0.5191712", "0.5181114", "0.51801795", "0.5152932", "0.5151803", "0.5147249", "0.51404446" ]
0.68696374
0
r"""Applies L1 regularization shrink step on the parameters.
def _sdca_shrink_l1(weights, l1, l2, name=None): result = _op_def_lib.apply_op("SdcaShrinkL1", weights=weights, l1=l1, l2=l2, name=name) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l1_regularizer(scale):\n if isinstance(scale, numbers.Integral):\n raise ValueError('scale cannot be an integer: %s' % scale)\n if isinstance(scale, numbers.Real):\n if scale < 0.:\n raise ValueError('Setting a scale less than 0 on a regularizer: %g' %\n scale)\n if scale >= 1.:\n raise ValueError('Setting a scale greater than 1 on a regularizer: %g' %\n scale)\n if scale == 0.:\n logging.info('Scale of 0 disables regularizer.')\n return lambda _, name=None: None\n\n def l1(weights, name=None):\n \"\"\"Applies L1 regularization to weights.\"\"\"\n with ops.op_scope([weights], name, 'l1_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(\n my_scale,\n standard_ops.reduce_sum(standard_ops.abs(weights)),\n name=scope)\n\n return l1", "def _update_parameters(self, delta):\n if delta is not None:\n self.SGD.update_with_L1_regularization(self.variables, delta, self.L1)", "def l1_regularization(variables, factor=1e-4, name='l1_regularization', collections=['regularization']):\n l1 = tf.add_n([tf.reduce_sum(tf.abs(var)) for var in variables], name=name) if variables else tf.constant(0.)\n loss = factor * l1\n scalar_summary(loss, name, collections)\n return loss", "def _step1_optimization_closure(self, iteration, step):\n if iteration == self.num_iter_first_step - 1:\n reg_noise_std = 0\n else:\n reg_noise_std = (1 / 1000.) * (iteration // 300) # TODO: make it dependant in the max number of iterations\n aug = self._get_augmentation(iteration)\n if iteration == self.num_iter_first_step - 1:\n aug = 0\n # creates left_net_inputs and right_net_inputs by adding small noise\n clean_net_input = self.clean_net_inputs[aug] + (self.clean_net_inputs[aug].clone().normal_() * reg_noise_std)\n # watermark_net_input = self.watermark_net_inputs[aug] # + (self.watermark_net_input.clone().normal_())\n # mask_net_input = self.mask_net_inputs[aug]\n # applies the nets\n self.clean_net_output = self.clean_net(clean_net_input)\n self.total_loss = 0\n self.blur = 0\n self.total_loss += self.extended_l1_loss(self.clean_net_output,\n self.image_torchs[aug],\n (1 - self.watermark_hint_torchs[aug]))\n self.total_loss.backward(retain_graph=True)", "def update_regularizer(self, regularizer = regularizers.l1(0.1)):\n # for layer in self.layers:\n # layer.kernel_regularizer = regularizer\n self.list_cnn[-1].kernel_regularizer = regularizer", "def l1(weights, name=None):\n with ops.op_scope([weights], name, 'l1_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(\n my_scale,\n standard_ops.reduce_sum(standard_ops.abs(weights)),\n name=scope)", "def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers", "def l1(parameter, bias=None, reg=0.01, lr=0.1):\n Norm = reg*lr\n\n # Update W\n if parameter.is_cuda:\n Norms_w = Norm*torch.ones(parameter.size(), device=torch.device(\"cuda\"))\n else:\n Norms_w = Norm*torch.ones(parameter.size(), device=torch.device(\"cpu\"))\n pos = torch.min(Norms_w, Norm*torch.clamp(parameter, min=0)) # get all positive values\n neg = torch.min(Norms_w, -1.0*Norm*torch.clamp(parameter, max=0)) # get all negative values\n update_w = parameter - pos + neg # l1 step is the magnitude of all positive and all negative\n parameter.data = update_w\n\n if bias is not None:\n if bias.is_cuda:\n Norms_b = Norm*torch.ones(bias.size(), device=torch.device(\"cuda\"))\n else:\n Norms_b = Norm*torch.ones(bias.size(), device=torch.device(\"cpu\"))\n pos = torch.min(Norms_b, Norm*torch.clamp(bias, min=0))\n neg = torch.min(Norms_b, -1.0*Norm*torch.clamp(bias, max=0))\n update_b = bias - pos + neg\n bias.data = update_b", "def apply_regularization(self, w, loss, gradient, regularization, lambda_, m):\n if regularization == 'l2':\n loss += lambda_ / (2 * m) * np.squeeze(w.T.dot(w))\n gradient += lambda_ / m * w\n elif regularization == 'l1':\n loss += lambda_ / (2 * m) * np.sum(np.abs(w))\n gradient += lambda_ / m * np.sum((w >= 0) * 1 + (w < 0) * -1)\n return loss, gradient", "def EmbeddingL1RegularizationUpdate(embedding_variable, net_input, learn_rate, l1_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n sign_inside = tf.sign(tf.matmul(net_input, embedding_variable))\n where = tf.equal(sign_inside, 0)\n # should replace 0's with random in [-1, 1] for an better (not necessarily acute)implementation\n grad = l1_reg_val * tf.matmul(tf.transpose(net_input), sign_inside)\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l1_reg_val * tf.norm(tf.matmul(net_input, embedding_variable), ord=1)\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l1 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def reg_loss(model: nn.Module, regularizer: str, l1: float=0.01, l2: float=0.01):\n if regularizer == 'l1':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n return l1_reg\n if regularizer == 'l2':\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l2_reg\n if regularizer == 'l1_l2':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l1_reg + l2_reg", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def l1(name, weights):\n\n with tf.name_scope(name):\n regularizer = np.float32(0.0)\n for weight in weights:\n tf.add(regularizer, tf.nn.l1_loss(weight))\n\n return regularizer", "def __init__(self, l1_regularization=0.):\n self.l1_regularization = l1_regularization\n self.nspec = None\n self.npixels = None\n self.nlabels = None\n self.ncoeffs = None\n\n self.coeffs = None\n self.scatter = None\n\n # normalization factor\n self.labels_median = 0.\n self.labels_std = 1.\n\n # labels names\n self.label_names = ['Teff', 'Logg', 'M_H', 'Alpha_M']\n\n self.trained_flag = False\n self.force_cpu = False\n self.log_device_placement = False", "def set_kernel_reg(model, lambdal1 = 0, lambdal2 = 0):\r\n\r\n\r\n\tfor layer in model.layers:\r\n\t\tif hasattr(layer, 'kernel_regularizer'):\r\n\t\t\tlayer.kernel_regularizer = l1_l2(l1 = lambdal1, l2 = lambdal2)\r\n\r\n\treturn model", "def l2_regularizer(scale):\n if isinstance(scale, numbers.Integral):\n raise ValueError('scale cannot be an integer: %s' % (scale,))\n if isinstance(scale, numbers.Real):\n if scale < 0.:\n raise ValueError('Setting a scale less than 0 on a regularizer: %g.' %\n scale)\n if scale >= 1.:\n raise ValueError('Setting a scale greater than 1 on a regularizer: %g.' %\n scale)\n if scale == 0.:\n logging.info('Scale of 0 disables regularizer.')\n return lambda _, name=None: None\n\n def l2(weights, name=None):\n \"\"\"Applies l2 regularization to weights.\"\"\"\n with ops.op_scope([weights], name, 'l2_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)\n\n return l2", "def mp_regular_linreg(X, y, beta_0, alpha, L1_ratio, max_iter=50, tol=0.0001, *args, **kwargs):\n\n N, p = X.shape\n beta = beta_0.copy()\n b_new = np.zeros(p)\n\n for itr in range(max_iter):\n with ProcessPoolExecutor(max_workers=8) as pool:\n b_new = np.array(list(pool.map(deco, [j for j in range(p)], repeat(X), repeat(y), repeat(N),\n repeat(p), repeat(beta), repeat(alpha), repeat(L1_ratio))))\n beta = b_new\n return beta", "def regularization_loss(params: hk.Params) -> jnp.ndarray:\r\n\r\n # L1 Loss\r\n sum_in_layer = lambda p: jnp.sum(jnp.abs(p))\r\n sum_p_layers = [sum_in_layer(p) for p in jax.tree_leaves(params)]\r\n l1_loss = sum(sum_p_layers)\r\n\r\n # L2 Loss\r\n l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))\r\n\r\n return l2_coef * l2_loss + l1_coef * l1_loss", "def rescale_all(self):\n for param_code in self.parameters.keys():\n self.rescale_parameter(param_code)", "def l2_regularization(cg, rate=0.01):\n W = VariableFilter(roles=[WEIGHT])(cg.variables)\n L2_cost = rate * l2_norm(W)\n\n return L2_cost", "def _reset_parameters(self):\r\n\t\tfor p in self.parameters():\r\n\t\t\tif p.dim() > 1:\r\n\t\t\t\txavier_uniform_(p)", "def adjust_learning_rate(optimizer, shrink_factor):\n\n print(\"\\nDECAYING learning rate.\")\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * shrink_factor\n print(\"The new learning rate is %f\\n\" % (optimizer.param_groups[0]['lr'],))", "def adjust_learning_rate(optimizer, shrink_factor):\n\n print(\"\\nDECAYING learning rate.\")\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * shrink_factor\n print(\"The new learning rate is %f\\n\" % (optimizer.param_groups[0]['lr'],))", "def prox_l1_norm(w, lamb):\n\treturn np.sign(w) * np.maximum( np.abs(w) - lamb, 0)", "def test_l1norm () :\n n = 10\n rfs = RewardFnSpace(list(range(n)))\n for i in range(10): \n b = rfs.bs[i]\n rfs.lp += b == 0\n rfs.lp.solve()\n rfs._setCoeffs()\n coeffs = np.array(rfs.coeffs)\n assert(np.linalg.norm(coeffs - np.ones(n)) < 1e-4)", "def l2(parameter, bias=None, reg=0.01, lr=0.1):\n \n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n L2 = reg # lambda: regularization strength\n Norm = (lr*L2/w_and_b.norm(2))\n if Norm.is_cuda:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cuda\"))\n else:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_w, Norm)\n update = (parameter*l2T) \n parameter.data = update\n # Update bias\n if bias is not None:\n if Norm.is_cuda:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cuda\"))\n else:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_b, bias)\n update_b = (bias*l2T)\n bias.data = update_b", "def l2_regularization(W, reg_strength):\n # TODO: Copy from the previous assignment\n loss = reg_strength*np.sum(W*W)\n grad = 2*reg_strength*W\n return loss, grad", "def l2_regularization(variables, factor=1e-4, name='l2_regularization', collections=['regularization']):\n l2 = tf.add_n([tf.sqrt(2.*tf.nn.l2_loss(var)) for var in variables], name=name) if variables else tf.constant(0.)\n loss = factor * l2\n scalar_summary(loss, name, collections)\n return loss", "def forward(self, parameters: List[Tuple[str, nn.Parameter]]) -> torch.Tensor:\n # calculate regularized loss \n reg_loss = regularize(parameters, self.weight_decay, self.norm)\n\n return reg_loss", "def fit_regularized(self, start_params=None, method='l1', alpha=0,\n ceps=1e-4, ptol=1e-6, maxit=200, **fit_kwargs):\n\n if type(method) == str and (method.lower() != 'l1'):\n raise ValueError(\"Invalid regularization method\")\n\n # If method is a smooth penalty just optimize directly.\n if isinstance(method, Penalty):\n # Scale the penalty weights by alpha\n method.alpha = alpha\n fit_kwargs.update({\"fe_pen\": method})\n return self.fit(**fit_kwargs)\n\n if np.isscalar(alpha):\n alpha = alpha * np.ones(self.k_fe, dtype=np.float64)\n\n # Fit the unpenalized model to get the dependence structure.\n mdf = self.fit(**fit_kwargs)\n fe_params = mdf.fe_params\n cov_re = mdf.cov_re\n scale = mdf.scale\n try:\n cov_re_inv = np.linalg.inv(cov_re)\n except np.linalg.LinAlgError:\n cov_re_inv = None\n\n for itr in range(maxit):\n\n fe_params_s = fe_params.copy()\n for j in range(self.k_fe):\n\n if abs(fe_params[j]) < ceps:\n continue\n\n # The residuals\n fe_params[j] = 0.\n expval = np.dot(self.exog, fe_params)\n resid_all = self.endog - expval\n\n # The loss function has the form\n # a*x^2 + b*x + pwt*|x|\n a, b = 0., 0.\n for k, lab in enumerate(self.group_labels):\n\n exog = self.exog_li[k]\n ex_r = self.exog_re_li[k]\n ex2_r = self.exog_re2_li[k]\n resid = resid_all[self.row_indices[lab]]\n\n x = exog[:,j]\n u = _smw_solve(scale, ex_r, ex2_r, cov_re,\n cov_re_inv, x)\n a += np.dot(u, x)\n b -= 2 * np.dot(u, resid)\n\n pwt1 = alpha[j]\n if b > pwt1:\n fe_params[j] = -(b - pwt1) / (2 * a)\n elif b < -pwt1:\n fe_params[j] = -(b + pwt1) / (2 * a)\n\n if np.abs(fe_params_s - fe_params).max() < ptol:\n break\n\n # Replace the fixed effects estimates with their penalized\n # values, leave the dependence parameters in their unpenalized\n # state.\n params_prof = mdf.params.copy()\n params_prof[0:self.k_fe] = fe_params\n\n scale = self.get_scale(fe_params, mdf.cov_re_unscaled)\n\n # Get the Hessian including only the nonzero fixed effects,\n # then blow back up to the full size after inverting.\n hess = self.hessian_full(params_prof)\n pcov = np.nan * np.ones_like(hess)\n ii = np.abs(params_prof) > ceps\n ii[self.k_fe:] = True\n ii = np.flatnonzero(ii)\n hess1 = hess[ii, :][:, ii]\n pcov[np.ix_(ii,ii)] = np.linalg.inv(-hess1)\n\n results = MixedLMResults(self, params_prof, pcov / scale)\n results.fe_params = fe_params\n results.cov_re = cov_re\n results.scale = scale\n results.cov_re_unscaled = mdf.cov_re_unscaled\n results.method = mdf.method\n results.converged = True\n results.cov_pen = self.cov_pen\n results.k_fe = self.k_fe\n results.k_re = self.k_re\n results.k_re2 = self.k_re2\n\n return MixedLMResultsWrapper(results)" ]
[ "0.6826777", "0.61285764", "0.6045434", "0.59946656", "0.59823173", "0.5980362", "0.59782636", "0.5918527", "0.58867913", "0.57994086", "0.57767415", "0.57330287", "0.57198095", "0.56476456", "0.55648047", "0.5526961", "0.5520416", "0.5410303", "0.5392533", "0.5335324", "0.5330576", "0.53217465", "0.53217465", "0.5307455", "0.53030574", "0.529683", "0.5290117", "0.5284549", "0.52834356", "0.5252479" ]
0.61554843
1
This class holds the windows which shows the create experiment widget.
def __init__(self,currentExperiment): super(AmoebaCreateExperimentWindow,self).__init__() self.currentExperiment = currentExperiment #Create the window self.subWindow = QMdiSubWindow() self.widget = AmoebaCreateExperiment(self.subWindow,self.currentExperiment) #Create the UI. self.setWindowTitle("Create a new experiment.") self.scroll = QScrollArea() self.scroll.setMinimumWidth(270) self.scroll.setWidget(self.widget) self.scroll.setWidgetResizable(True) #Connect button to next function. self.subWindow.setWidget(self.scroll)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createWidgets(self):\n raise NotImplementedError", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def build_window(self):\n\n main_frame = tk.Frame(self.root)\n main_frame.pack(fill='both')\n\n self.open_machine_learner_window_button = tk.Button(main_frame, text=\"Open Machine Learner\")\n self.open_machine_learner_window_button.bind('<Button-1>', self.open_machine_learner_window)\n self.open_machine_learner_window_button.pack(side=\"left\")\n\n self.open_web_crawler_window_button = tk.Button(main_frame, text=\"Open Web Crawler\")\n self.open_web_crawler_window_button.bind('<Button-1>', self.open_web_crawler_window)\n self.open_web_crawler_window_button.pack(side=\"left\")\n\n self.open_webpage_classifier_window_button = tk.Button(main_frame, text=\"Open WebPage Classifier\")\n self.open_webpage_classifier_window_button.bind('<Button-1>', self.open_webpage_classifier_window)\n self.open_webpage_classifier_window_button.pack(side=\"left\")\n\n self.run_steady_state_genetic_button = tk.Button(main_frame, text=\"Run Steady State\")\n self.run_steady_state_genetic_button.bind('<Button-1>', self.run_steady_state)\n self.run_steady_state_genetic_button.pack(side=\"left\")\n\n # Protocol for closing window using 'x' button\n self.root.protocol(\"WM_DELETE_WINDOW\", self.on_closing_event)", "def create(self):\n\n cv2.namedWindow(winname=self.title, flags=self.style)", "def create_widgets( self ):", "def create_widgets(self):", "def createWidgets(self):\r\n top = self.winfo_toplevel()\r\n top.rowconfigure(0, weight=1)\r\n top.columnconfigure(0, weight=1)\r\n self.rowconfigure(0, weight=1)\r\n self.columnconfigure(0, weight=1) \r\n\r\n self.button_quit = tk.Button(self, text='Quit', command=self.quit)\r\n self.button_quit.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def create_widgets(self):\n # self.var_spherical = IntVar()\n # self.var_3d = IntVar()\n # self.var_spatial_audio = IntVar()\n # self.button_open[\"command\"] = self.action_open\n # self.button_inject[\"command\"] = self.action_inject\n pass", "def buildUI(self):\n\n if cmds.window(\"pyART_AddToCanvasWIN\", exists=True):\n cmds.deleteUI(\"pyART_AddToCanvasWIN\", wnd=True)\n\n # create the main window\n self.mainWin = QtWidgets.QMainWindow(self.pickerUI)\n\n # create the main widget\n self.mainWidget = QtWidgets.QWidget()\n self.mainWin.setCentralWidget(self.mainWidget)\n\n # create the mainLayout\n self.layout = QtWidgets.QVBoxLayout(self.mainWidget)\n\n # load stylesheet\n styleSheetFile = utils.returnNicePath(self.toolsPath, \"Core/Scripts/Interfaces/StyleSheets/animPicker.qss\")\n f = open(styleSheetFile, \"r\")\n self.style = f.read()\n f.close()\n\n self.mainWin.setStyleSheet(self.style)\n\n self.mainWin.setMinimumSize(QtCore.QSize(250, 400))\n self.mainWin.setMaximumSize(QtCore.QSize(250, 400))\n self.mainWin.resize(250, 400)\n\n # set qt object name\n self.mainWin.setObjectName(\"pyART_AddToCanvasWIN\")\n self.mainWin.setWindowTitle(\"Add Module To Canvas\")\n\n # label, listWidget, button\n label = QtWidgets.QLabel(\"Available Modules:\")\n label.setProperty(\"boldFont\", True)\n self.layout.addWidget(label)\n\n self.moduleList = QtWidgets.QListWidget()\n self.moduleList.setMaximumSize(230, 300)\n self.moduleList.setMinimumSize(230, 300)\n self.layout.addWidget(self.moduleList)\n\n # add modules to listWidget\n self.addModulesToList()\n\n # create add button\n button = QtWidgets.QPushButton(\"Add Selected To Canvas\")\n self.layout.addWidget(button)\n button.setObjectName(\"blueButton\")\n button.clicked.connect(self.addSelectedToCanvas)\n\n # show ui\n self.mainWin.show()", "def createWidgets(self):\n layout = QHBoxLayout()\n \n self.logsItem = TestsView.TestsView(parent=self, local = self.local)\n \n self.resumeView = ResumeView.TextualView(parent=self)\n if QtHelper.str2bool( Settings.instance().readValue( key = 'TestRun/hide-resume-view' ) ):\n self.hideResumeView()\n\n self.graphView = GraphView.FlowChartView(parent=self)\n self.logsView = TextualView.TextualView2(parent=self)\n self.hexLogsView = DetailedView.DetailedView(parent=self)\n \n self.displayTab = QTabWidget()\n\n hSplitter = QSplitter(self)\n hSplitter.setOrientation(Qt.Vertical)\n\n hSplitter.addWidget( self.resumeView )\n hSplitter.addWidget( self.logsView )\n hSplitter.addWidget( self.hexLogsView )\n\n self.displayTab.addTab(hSplitter, self.tr('Events') )\n self.displayTab.addTab(self.graphView, self.tr('Diagram') )\n \n defaultTab = Settings.instance().readValue( key = 'TestRun/default-tab-run' )\n self.displayTab.setCurrentIndex(int(defaultTab)) \n \n self.currentEdit = QLineEdit()\n self.currentEdit.setReadOnly(True)\n self.currentEdit.setStyleSheet(\"QLineEdit { background-color : #F0F0F0; color: grey; }\")\n\n leftFrame = QFrame()\n leftLayout = QVBoxLayout()\n leftLayout.setContentsMargins(0, 0, 0, 0) \n leftFrame.setLayout(leftLayout)\n\n leftLayout.addWidget(self.currentEdit)\n leftLayout.addWidget(self.displayTab)\n\n v_splitter = QSplitter(self) \n v_splitter.addWidget( self.logsItem )\n v_splitter.addWidget( leftFrame )\n v_splitter.setStretchFactor(1, 1)\n\n layout.addWidget(v_splitter)\n \n self.setLayout(layout)", "def widgets(self):\r\n self.setWindowTitle(\"PyCrypt\")\r\n self.setMinimumSize(QSize(500, 500))\r\n self.setMaximumSize(QSize(500, 500))\r\n# Adding the sub def for widgets etc\r\n self.add_menus_and_status()\r\n self.add_buttons()", "def _create_window(self):\n self.window = Gtk.Window()\n self.window.set_title(\"Yapsy Example\")\n self.window.set_default_size(400, 400)\n self.window.connect(\"destroy\", lambda w: Gtk.main_quit())\n # PluginList() is a composite widget that shows all installed plugins\n # in a Gtk.TreeView. See widgets.py\n self._plugin_list = PluginList(self.window)\n box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n box.pack_start(self._plugin_list, True, True, 0)\n box.show_all()\n self.window.add(box)", "def _init_widgets(self):\n # Container frame\n self.container = Frame(self)\n # Workspace block\n self.main_container = Frame(self.container)\n\n self.text = Label(self.main_container)\n self.text.config(text=\"PyEventLogViewer is a timeline-based tool used to simplify the way\\n\"\n \"a user can view and explore Windows EVTX files. To begin using this\\n\"\n \"software you must do the following:\\n\\n\"\n \"\\t1) File → New → 'Create a new project'\\n\"\n \"\\t2) Tools → Import Log File → 'Open a specified EVTX file'\\n\"\n \"\\t3) Explore the presented timeline.\\n\"\n \"\\t4) Double-click a specific record to view the XML data for that record.\\n\"\n \"\\t5) File → Export → 'Generate a CSV or HTML file for timeline presentation.'\\n\\n\"\n \"At this point, only System and Security EVTX files are parsable with this software.\")\n\n self.show_var = BooleanVar()\n self.show_check = Checkbutton(self.main_container, text=\"Don't Show on Startup\", variable=self.show_var)\n\n # Action block\n self.button_ok = Button(self.main_container, text='Ok', underline=0, command=self.callback_close)\n self.bind('<Return>', self.callback_close)\n self.bind('<Escape>', self.callback_close)\n\n # Focus on window - required for binds to work.\n self.focus_set()", "def createWindow(self):\r\n\t\t# give the window a title\r\n\t\tself.parent.title( 'Acrobat Data Acquisition')\r\n\t\t# set the style\r\n\t\tself.style = ttk.Style()\r\n\t\tself.style.theme_use('default')\r\n\t\tself.pack(fill= tk.BOTH, expand=1)", "def createUI(self):\n\n q.getQItem(windowID, QtWidgets.QWidget)\n cmds.setParent(q.fullPath)\n\n # ################################################\n # Active Render Layer\n\n # cmds.separator(height=12, style='none')\n addFrameLayout(\n '%s_frameLayoutLayers' % windowID,\n 'Visible Render Layer', collapsable=False,\n labelVisible=False,\n marginHeight=0\n )\n\n addRowLayout(\n '%s_rowLayoutActiveRenderLayer' % windowID,\n 4,\n columnAlign4=('left', 'left', 'right', 'right'),\n columnAttach4=('left', 'both', 'right', 'right'),\n columnWidth4=(\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.775,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075\n )\n )\n\n\n addButton('%s_addNewLayer' % windowID, 'New', rsAddNewLayer,\n image='RS_create_layer', size=(21, 21))\n addOptionMenu('%s_selectActiveLayer' % windowID,\n 'Active Layer ', (), rsSelectActiveLayer)\n addButton('rsOpenRenderSetupWindow', 'Render Setup',\n rsOpenRenderSetupWindow, image='render_setup.png',\n size=(21, 21))\n addButton('rsOpenUnifiedRenderGlobals', 'Render Globals',\n rsOpenUnifiedRenderGlobals, image='render_setup.png',\n size=(21, 21))\n\n # ################################################\n # Work Render Layers\n\n cmds.setParent(q.fullPath)\n addFrameLayout('%s_frameLayoutLayersB' % windowID,\n 'Work Render Layer', collapsable=False,\n labelVisible=False, marginHeight=0)\n addRowLayout('%s_rowLayoutVisibleRenderLayer' % windowID, 3,\n columnAlign3=('left', 'left', 'right'),\n columnAttach3=('left', 'both', 'right'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.075, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.85,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075))\n\n cmds.separator()\n addOptionMenu('%s_selectVisibleLayer' % windowID,\n 'Visible Layer ', (), rsSelectVisibleLayer)\n cmds.separator()\n\n cmds.setParent(q.fullPath)\n cmds.separator(height=12, style='none')\n\n # ################################################\n # Collections\n\n addFrameLayout('%s_frameLayout02' % windowID, 'Collections',\n labelVisible=False, marginHeight=0)\n\n addRowLayout(\n '%s_rowLayout02' % windowID,\n 6,\n columnAlign6=('left', 'left', 'left', 'left', 'left', 'left'),\n columnAttach6=('both', 'both', 'right', 'right', 'right', 'right'),\n columnWidth6=(\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.18,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.18,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.415,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.075,\n )\n )\n\n addButton('rsAddCollection', 'Add', rsAddCollection)\n addButton('rsRemoveCollection', 'Remove', rsRemoveCollection)\n addButton('rsSelectShapes', 'Select Shapes', rsSelectShapes,\n image='selectObject.png', size=(21, 21))\n addButton('rsRenameShader', 'Rename Shader', rsRenameShader,\n size=(21, 21), image='QR_rename.png')\n addButton('rsDuplicateShader', 'Duplicate Shader',\n duplicateShader, size=(21, 21), image='newPreset.png')\n addButton('rsRefreshUI', 'Refresh', rsRefreshUI, size=(21, 21),\n image='QR_refresh.png')\n\n # ###########################\n # Filter List\n\n cmds.setParent('%s_frameLayout02' % windowID)\n addRowLayout('%s_rowLayout03' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'both'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.6, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.42))\n\n addTextField('%s_filterShaderList' % windowID, 'Search',\n rsFilterShaderList_off, rsFilterShaderList_off,\n window.updateUI)\n addOptionMenu('rsShaderGroups', '|', (), rsShaderGroups)\n\n # ###########################\n # The shaders scroll list\n\n cmds.setParent('%s_frameLayout02' % windowID)\n addRowLayout('%s_rowLayout04' % windowID, 1, columnAlign1='both', columnAttach1='both', columnWidth1=WINDOW_WIDTH\n + 12)\n addTextScrollList('%s_ShaderScrollList' % windowID, (),\n rsShaderScrollList_doubleClick,\n rsShaderScrollList_onSelect,\n rsShaderScrollList_deleteKey)\n\n # Add popup menu:\n\n cmds.popupMenu('rsShaderScrollListPopupMenu',\n parent='%s_ShaderScrollList' % windowID,\n allowOptionBoxes=False, markingMenu=True,\n postMenuCommand=postMenuCommand)\n cmds.menuItem('%s_popupMenuItem02' % windowID,\n label='Duplicate Shader', command=duplicateShader)\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem04' % windowID,\n label='Graph Shader')\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem03' % windowID,\n label='Select Shader')\n cmds.menuItem(divider=True)\n cmds.menuItem('%s_popupMenuItem05' % windowID,\n label='Select Assigned Shapes')\n cmds.menuItem('%s_popupMenuItem06' % windowID,\n label='Select Assigned Transforms')\n\n # ##################################################\n # Arnold Property Overrides\n\n cmds.setParent('%s_frameLayout02' % windowID)\n cmds.columnLayout(\n '%s_columnLayout20' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n\n cmds.separator(parent='%s_columnLayout20' % windowID, height=4,\n style='none')\n\n addRowLayout('%s_rowLayout05' % windowID, 2,\n columnAlign2=('left', 'both'),\n columnAttach2=('left', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.75, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.25))\n addText('%s_textArnoldPropertyOverridesLabel' % windowID,\n 'Apply Arnold Property Overrides', 'plainLabelFont')\n addCheckBox('rsArnoldPropertyOverridesCheckBox', '',\n rsArnoldPropertyOverridesCheckBox,\n rsArnoldPropertyOverridesCheckBox)\n cmds.separator(parent='%s_columnLayout20' % windowID, height=4,\n style='none')\n\n # Column Layout to toggle\n\n cmds.setParent('%s_columnLayout20' % windowID)\n cmds.columnLayout(\n '%s_columnLayout02' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n\n addCheckboxes('%s_columnLayout02' % windowID)\n cmds.columnLayout('%s_columnLayout02' % windowID, edit=True,\n visible=False)\n\n # #################################################\n # Shader Override\n\n cmds.setParent('%s_frameLayout02' % windowID)\n cmds.columnLayout(\n '%s_columnLayout21' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('left', 0),\n adjustableColumn=False,\n rowSpacing=0,\n )\n cmds.separator(parent='%s_columnLayout21' % windowID, height=4,\n style='none')\n addRowLayout('%s_rowLayout06' % windowID, 2,\n columnAlign2=('left', 'right'),\n columnAttach2=('left', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.75, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.25))\n addText('%s_shaderOverrideLabel' % windowID, 'Shader Override',\n 'plainLabelFont')\n addCheckBox('%s_shaderOverrideCheckbox' % windowID, '',\n rsShaderOverrideCheckbox, rsShaderOverrideCheckbox)\n cmds.separator(parent='%s_columnLayout21' % windowID, height=4,\n style='none')\n\n cmds.setParent('%s_columnLayout21' % windowID)\n cmds.columnLayout(\n '%s_columnLayout03' % windowID,\n width=WINDOW_WIDTH - FRAME_MARGIN * 2,\n columnAlign='left',\n columnAttach=('both', 4),\n adjustableColumn=True,\n rowSpacing=0,\n )\n cmds.setParent('%s_columnLayout03' % windowID)\n addOptionMenu('%s_optionMenu02' % windowID, 'Select: ', (),\n rsShaderOverridesMenu)\n\n global selectedShaderOverride\n\n # default selection\n\n selectedShaderOverride = SHADER_OVERRIDE_OPTIONS[0]['ui']\n cmds.columnLayout('%s_columnLayout03' % windowID, edit=True,\n visible=False)\n\n # #################################################\n\n cmds.setParent(q.fullPath)\n cmds.separator(height=10, style='none')\n\n # #################################################\n # Extras\n\n addFrameLayout('%s_frameLayout50' % windowID, 'Extras',\n collapsable=True, marginHeight=0,\n labelVisible=False)\n\n # #################################################\n # Add & Assign Shader Groups\n\n addFrameLayout(\n '%s_frameLayout05' % windowID,\n 'Add & Assign Shader Groups',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=False,\n labelVisible=True,\n )\n\n # Add the renamer window\n\n self.gwCustomRenamer = CustomRenamer()\n self.gwCustomRenamer.createUI()\n\n # #################################################\n # AutoConnect\n\n cmds.setParent('%s_frameLayout50' % windowID)\n\n addFrameLayout(\n '%s_frameLayout03' % windowID,\n 'Adobe Connector',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=True,\n labelVisible=True,\n )\n addRowLayout('%s_rowLayout07', 3, columnAlign3=('left', 'left',\n 'left'), columnAttach3=('both', 'both', 'both'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.4, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3))\n addButton('updateConnections', '> Update Connections <',\n updateConnections)\n addButton('uvSnapshot', 'UV Snapshot', uvSnapshot)\n addButton('editTexture', 'Edit Texture', editTexture)\n\n # After Effects\n\n cmds.setParent('%s_frameLayout03' % windowID)\n addRowLayout('%s_rowLayout11' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'both'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.4, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.6))\n addText('%s_text90' % windowID, 'Send to After Effects:')\n addButton('makeCompButton', 'Send to After Effects', rsMakeComp)\n\n # #################################################\n # Render Setup /\n # Output settings\n\n cmds.setParent('%s_frameLayout50' % windowID)\n addFrameLayout(\n '%s_frameLayout04' % windowID,\n 'Output Settings',\n collapsable=True,\n marginWidth=0,\n marginHeight=0,\n collapse=True,\n labelVisible=True,\n )\n addRowLayout('%s_rowLayout08' % windowID, 1,\n columnAlign1='center', columnAttach1='both',\n columnWidth1=WINDOW_WIDTH - FRAME_MARGIN * 2)\n addButton('%s_revealOutputDirectory' % windowID,\n 'Output path not set yet', rsRevealOutputDirectory)\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout09' % windowID, 3,\n columnAlign3=('left', 'right', 'right'),\n columnAttach3=('left', 'right', 'right'),\n columnWidth3=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.8, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.14,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.06))\n\n addOptionMenu('%s_optionMenu05' % windowID, '', (),\n rsSelectOutputTemplate)\n addOptionMenu('%s_outputVersionMenu' % windowID, '', (),\n rsSelectOutputVersion)\n cmds.menuItem(label='v001')\n\n cmds.setParent('%s_rowLayout09' % windowID)\n addButton('%s_incrementOutputVersionButton' % windowID, '+1',\n rsIncrementOutputVersion, size=(21, 21))\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout10' % windowID, 2,\n columnAlign2=('left', 'left'),\n columnAttach2=('both', 'right'),\n columnWidth2=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.7, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.3))\n addOptionMenu('%s_optionMenu03' % windowID, 'Format:', (),\n rsOutputTemplatesMenu)\n addOptionMenu('%s_optionMenu06' % windowID, '', (),\n rsSetFPSMenu)\n\n cmds.setParent('%s_frameLayout04' % windowID)\n addRowLayout('%s_rowLayout12' % windowID, 4,\n columnAlign4=('right', 'left', 'right', 'left'),\n columnAttach4=('both', 'both', 'both', 'both'),\n columnWidth4=((WINDOW_WIDTH - FRAME_MARGIN * 2)\n * 0.50, (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.15,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.20,\n (WINDOW_WIDTH - FRAME_MARGIN * 2) * 0.15))\n\n addText('%s_setInFrameLabel' % windowID, 'In Frame ')\n addTextField('%s_setInFrame' % windowID, '', setInFrame,\n setInFrame, setInFrame)\n\n addText('%s_setOutFrameLabel' % windowID, 'Out Frame ')\n addTextField('%s_setOutFrame' % windowID, '', setOutFrame,\n setOutFrame, setOutFrame)", "def createWindow():\n\n windowName = \"ObjectSpawner\"\n\n if cmds.window(windowName, query=True, exists=True):\n cmds.deleteUI(windowName)\n\n cmds.window(windowName)\n\n populateUI()\n enableEditorDrop()\n\n cmds.showWindow(windowName)", "def create_widgets(self):\r\n self.create_containers()\r\n self.setup_containers()\r\n self.create_panel_widgets()\r\n self.setup_scrollbar()", "def __init__(self,subWindow,currentExperiment):\n super(AmoebaCreateExperiment,self).__init__()\n self.stage = 0\n self.currentExperiment = currentExperiment\n #self.XMLWriter = ExperimentXMLWriter()\n self.XMLWriter = Amoeba_experiment()\n if AMOEBA_CREATE_EXPERIMENT_DEBUG:\n print \"Create new experiment.\"\n\n self.subWindow = subWindow\n\n #Create a scroll bar for the summary area\n self.layout = QVBoxLayout()\n\n #Create the widgets\n self.SetFundamentalParameters = AmoebaSetFundamentalParameters()\n self.ShowAllInstruments = AmoebaShowAllInstuments()\n self.LinkInstruments = LinkInstrumentsForm()\n\n self.next = QPushButton(\"Next\")\n self.next.clicked.connect(self.next_pressed)\n\n #Add to the scroll widget\n self.layout.addWidget(self.SetFundamentalParameters)\n self.layout.addWidget(self.ShowAllInstruments)\n self.layout.addWidget(self.LinkInstruments)\n self.layout.addWidget(self.next)\n\n #Add the Widgets to the Subwindow\n self.setLayout(self.layout)\n\n #Hide all the widgets that will later be revealed.\n self.SetFundamentalParameters.hide()\n self.LinkInstruments.hide()", "def showUI(cls):\r\n win = cls(uiFile)\r\n win.create()\r\n return win", "def show(self, window):\r\n\r\n return", "def _setup_ui(self):\n\n self.window = ui.Widget()\n self.window.dimensions = ui.normalize_dimension((\n 0, 0,\n self.normalized_screen_resolution[0],\n self.normalized_screen_resolution[1]\n ))\n self.window.background_color = ImageColor.getcolor('#000000', 'RGB')\n\n interface_frame = ui.Widget(parent=self.window)\n interface_frame.dimensions = ui.normalize_dimension((\n self.preview_renderer.window[2],\n 0,\n self.normalized_screen_resolution[0] - self.preview_renderer.window[2],\n self.normalized_screen_resolution[1]\n ))\n interface_frame.background_color = ImageColor.getcolor('#ffffff', 'RGB')\n\n number = ui.LabelWidget(\"\",\n name=NAME_GET_STARTED,\n parent=interface_frame,\n align=\"center\",\n font_color=(0, 0, 0, 255))\n number.dimensions = (\n 5, 5,\n interface_frame.width - 10,\n interface_frame.height - 10\n )", "def __init__(self):\n self.r = Tk()\n self.r.title(\"Website Library 123\")\n self.r.geometry(\"500x250\")\n self.r.configure(background=\"#ddaf7e\")\n\n '''Configuring So that the First Window holds buttons'''\n\n self.title = Label(self.r, text=\"Website Library\", bg=\"#ddaf7e\", font=\"Calibri 26\").pack()\n self.divider = Label(self.r, text=\" \"*100, bg=\"#ddaf7e\").pack()\n self.saved = Button(self.r, text=\"View Saved Websites\", font=\"Verdana 15\", command=lambda: self.newwind(1)).pack(pady=10)\n self.addnew = Button(self.r, text=\"Add New Websites\", font=\"Verdana 15\", command=lambda: self.newwind(2)).pack(pady=10)\n self.r.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\n self.r.mainloop()", "def create(self, parent):\n self.widget = _QMainWindow(parent)", "def createWindow(self):\n\n # create window, set basic attributes\n w = gtk.Window(gtk.WINDOW_TOPLEVEL)\n w.set_size_request(*self.__def_win_size__)\n w.set_decorated(False)\n #w.fullscreen()\n #w.unfullscreen()\n w.set_title(self.__name__)\n w.connect(\"destroy\", gtk.main_quit)\n\n # declare buttons and their associated handlers\n controls = (\n (\"open_button\", gtk.ToolButton(gtk.STOCK_OPEN), self.onPlay),\n (\"play_button\", gtk.ToolButton(gtk.STOCK_MEDIA_PLAY), self.onPlay),\n (\"stop_button\", gtk.ToolButton(gtk.STOCK_MEDIA_STOP), self.onStop),\n (\"quit_button\", gtk.ToolButton(gtk.STOCK_QUIT), gtk.main_quit)\n )\n\n # as well as the container in which to put them\n box = gtk.HButtonBox()\n\n # for every widget, connect to its clicked signal and add it\n # to the enclosing box\n for name, widget, handler in controls:\n widget.connect(\"clicked\", handler)\n box.pack_start(widget, True)\n setattr(self, name, widget)\n\n viewer = gtk.DrawingArea()\n viewer.modify_bg(gtk.STATE_NORMAL, viewer.style.black)\n\n # we will need this later\n self.xid = None\n\n # now finally do the top-level layout for the window\n layout = gtk.VBox(False)\n layout.pack_start(viewer)\n\n # subclasses can override childWidgets() to supply\n # custom controls\n layout.pack_start(self.customWidgets(), False, False)\n layout.pack_end(box, False, False)\n w.add(layout)\n w.show_all()\n\n # we want to return only the portion of the window which will\n # be used to display the video, not the whole top-level\n # window. a DrawingArea widget is, in fact, an X11 window.\n return viewer", "def createWindow(self, type):\n # this = Browser(self.url())\n # this.show()\n\n self.popup = SequanixQWebView(**self.kwargs)\n self.popup.setObjectName(\"web_content\")\n self.popup.setWindowTitle(\"Sequana browser\")\n self.popup.page().windowCloseRequested.connect(self.popup.close)\n self.popup.show()\n return self.popup", "def openWindow(self):\n # self.showSessionAct.setEnabled(False)\n self.musketeers_widget = MusketeersWidget(parent=self)\n self.setCentralWidget(self.musketeers_widget)\n self.saveGroupMenu = QAction('Save Group', self.fileMenu)\n self.fileMenu.addAction(self.saveGroupMenu)\n self.saveGroupMenu.triggered.connect(self.musketeers_widget.session_widget.save_group)", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def create_widget(self):\n pass", "def create_window(self, img, roi, name):\n\n self.window = SpinBalanceDialog()\n\n # call the user-implemented functionality\n self.window.main(img, roi)\n # show the window\n self.window.show()\n\n return self.window" ]
[ "0.73238564", "0.70476335", "0.7020338", "0.69660527", "0.6964482", "0.695962", "0.6924835", "0.67237866", "0.6702971", "0.6695231", "0.6694868", "0.6665668", "0.666019", "0.6648684", "0.6624867", "0.6576935", "0.6558816", "0.6556843", "0.6536284", "0.65104073", "0.6472229", "0.6463434", "0.64361304", "0.6434522", "0.6388566", "0.6384997", "0.6373525", "0.6361389", "0.6342775", "0.6317117" ]
0.7192625
1
Testing Tuna is not null
def test_Tuna(self): tuna = Tuna("1", "2", "3", "4") self.assertIsNotNone(tuna)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testIsNullTrueAgain(self):\n val = is_null('') \n self.assertTrue(val)", "def test_non_thesis(non_thesis):\n assert non_thesis is None", "def nulltest():", "def testIsNullTrue(self):\n val = is_null(\"\") \n self.assertTrue(val)", "def is_null(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_null)", "def has_value(var) :\n return var != None", "def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)", "def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Sample') is None)", "def _test_empty(t):\n return t.is_empty()", "def is_not_none(e):\n return e is not None", "def is_null(value: Any) -> bool:\n return not value", "def not_none(value):\n return not value is None", "def compare_with_none():\n value = {};\n if value is not None:\n print(\"value is not none\")\n else:\n print(\"value is none\")", "def is_none(obj):\n return obj is None", "def is_null(self):\n return self.value is None", "def _val_is_null(self, val):\r\n return val is None", "def is_null(self) -> bool:\n return self.allele1 == -1 and self.allele2 == -1", "def testIsNullFalseAgain(self):\n val = is_null(5) \n self.assertFalse(val)", "def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Category') is None)", "def test_get_none(self):\n self.assertTrue(self.tester.get('Not_a_Category') is None)", "def test_valid_null(self):\n f = lws.valid_null\n assert f(None, '') is True\n assert f('asdasdasd', '') is True", "def testIsNullFalse(self):\n val = is_null(\"False\") \n self.assertFalse(val)", "def is_null(val):\n return (val is None)", "def test_null_has_no_value(self):\n\n class Node:\n my_metric = Metric(Int64)\n\n node = Node()\n node.my_metric = None\n my_metric = get_metric_object(node, 'my_metric')\n tahu_metric = my_metric.tahu_metric(node)\n self.assertFalse(tahu_metric.HasField('long_value'))", "def test_is_null(self):\n\n class Node:\n my_metric = Metric(Int64)\n\n node = Node()\n node.my_metric = None\n my_metric = get_metric_object(node, 'my_metric')\n tahu_metric = my_metric.tahu_metric(node)\n self.assertTrue(tahu_metric.is_null)", "def exist(x):\n return x is not None", "def assert_is_not_none(self, obj):\n if obj is None:\n raise AssertionError('unexpectedly None')", "def isnull(obj):\n return _isnull(obj)", "def isnull(inputobject):\n return isinstance(inputobject, matrix) and inputobject.y == 0", "def is_driver_null(data):\n return data == DRIVER_NULL" ]
[ "0.66639656", "0.6589096", "0.6544759", "0.6537418", "0.65249217", "0.6464309", "0.6438317", "0.6438317", "0.6416694", "0.6395729", "0.6372581", "0.6337188", "0.63306767", "0.6319348", "0.6262661", "0.62500733", "0.61865604", "0.6135527", "0.6083987", "0.6083987", "0.6083662", "0.60798484", "0.6076057", "0.60601246", "0.6056041", "0.595791", "0.59244365", "0.59136534", "0.58893275", "0.5858065" ]
0.6774843
0
Testing Tuna's setTunasFeatures method works
def test_setTunaFeatures(self): tuna = Tuna() array = ["1", "2", "3", "4"] tuna.setTunaFeatures(array) self.assertEqual(tuna.getTunaFeatures(), array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_getTunaFeatures(self):\n tuna = Tuna(\"1\", \"2\", \"3\", \"4\")\n array = [\"1\", \"2\", \"3\", \"4\"]\n self.assertEqual(tuna.getTunaFeatures(), array)", "def _initialize_with_tune_context(self, context: \"TuneContext\") -> None:\n raise NotImplementedError", "def prepare_features(self, wavs, stage):\n wavs, lens = wavs\n if stage == sb.Stage.TRAIN:\n if hasattr(self.modules, \"env_corrupt\"):\n wavs_noise = self.modules.env_corrupt(wavs, lens)\n wavs = torch.cat([wavs, wavs_noise], dim=0)\n lens = torch.cat([lens, lens])\n\n if hasattr(self.hparams, \"augmentation\"):\n wavs = self.hparams.augmentation(wavs, lens)\n\n # Choose what features we want to use\n # todo: support multiple features and feature concat\n target_feats = self.hparams.embedding_features\n\n FEATURE_EXTRACTOR = {\n # 'cqt': self.modules.cqt,\n # 'fbanks': self.modules.fbanks\n 'fastaudiogauss': self.modules.fastaudiogauss\n # 'ifr': self.modules.ifr\n # 'mag': self.modules.mag\n # 'mfcc': self.modules.mfcc\n # 'leaf': self.modules.leaf\n # 'tdfbanks': self.modules.tdfbanks\n # 'pcen': self.modules.pcen\n # 'sincnet': self.modules.sincnet\n # 'trainable_fbanks': self.modules.trainable_fbanks\n }\n\n if len(target_feats) == 1:\n # wavs = wavs.unsqueeze(1).cuda()\n feats = FEATURE_EXTRACTOR[target_feats[0]](wavs)\n # feats = torch.unsqueeze(feats, 1)\n # feats = torch.transpose(feats, 1,2)\n if target_feats[0]=='cqt':\n log_spec = 10.0 * torch.log10(torch.clamp(feats, min=1e-30))\n log_spec -= 10.0\n feats=log_spec\n feats = torch.transpose(feats, 1,2)\n else:\n feats = []\n for target in target_feats:\n temp = FEATURE_EXTRACTOR[target](wavs)\n if target=='cqt':\n temp = torch.transpose(temp, 1,2)\n feats.append(temp)\n f =feats[0]\n for i in range(1, len(feats)):\n f = torch.cat((f, feats[i]), dim=2)\n feats = f\n feats = self.modules.mean_var_norm(feats, lens)\n return feats, lens", "def _initialize_with_tune_context(self, context: \"TuneContext\") -> None:\n _ffi_api.MutatorInitializeWithTuneContext( # type: ignore # pylint: disable=no-member\n self, context\n )", "def test(self, test):\r\n self.ml_data.set_target(test[0])\r\n self.ml_data.set_features(test[1])\r\n if self.ml_data.target_type.all() == np.float64 or self.ml_data.target_type.all() == np.int64:\r\n self.model_qua.open()\r\n else:\r\n self.model_quali.open()", "def features(self, features):\n\n self._features = features", "def test_svm():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = r.randint(1, 10598)\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n\n # iris\n pres = \"Test pour le data set Iris (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Iris, quantum_instance)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Breast_cancer, quantum_instance)\n\n # digits (it's long so be careful)\n #pres = \"Test pour le data set Digits (difficile, classique)\"\n #test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n pres = \"Test pour le data set Wine (moyen, classique)\"\n test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func(pres, 25, 10, 2, True, Gaussian, quantum_instance)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func(pres, 10, 15, 14, True, Sequence, quantum_instance)", "def set_features(self, features):\n self.features_ = list(features)", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def set_features(self, features: list):\n self._features = features", "def test_intent_classifier_update_training_samples(self):\n pass", "def set_features(self, features: np.ndarray):\n self.features = features", "async def async_set_features(self, features):\n self._features = features", "def testSetup(self):\n \n train = QuizBowlData(None, \"\", self.vectorizer)\n train.vectorize(kTOY_TRAIN) \n model, optimizer = setup(train, 1.0)\n\n self.assertEqual(list(model.weight.size()), [2, 2])\n self.assertEqual(list(model.bias.size()), [2])", "def setup(self, num_qubit, fusion_enable, use_cu1):", "def test_regressors_hypertune(setup):\n # Load the data\n train_features, test_features, train_target, test_target = setup\n # Call the function that we would like to test\n tuned, tune_time = regressors.hyperTune(RandomForestRegressor(), train_features, train_target,\n grid=grid.rf_paramgrid(), folds=2, iters=1, jobs=1)\n # Assert if tuned is a dictionary\n assert type(tuned) == dict\n # Assert if tune_time is a float\n assert type(tune_time) == float", "def __init__(self, features=None):\n self.features = features", "def test_text_classifier_update_training_samples(self):\n pass", "def findFeatures(self):\n\t\tpass", "def get_feature_set_SA(tweet):\n features= {}\n return features", "def test_all_features(self):\n to_create = ['looktest1', 'looktest2', 'looktest3']\n for f in to_create:\n Feature(f).activate()\n\n all_features = Feature.all_features()\n self.assertEqual(len(all_features), len(to_create))\n for f in to_create:\n self.assertTrue(f in all_features)", "def feat():\n pass", "def train(self, features, labels):\n pass", "def load_features(self, features):\n pass\n # self.features = features", "def test(self):\n self.training = False", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def __init__(self, num_features):\n super(TLU, self).__init__()\n self.num_features = num_features\n self.tau = nn.parameter.Parameter(torch.Tensor(1, num_features, 1, 1), requires_grad=True)\n self.reset_parameters()", "def get_feature_set_PB(tweet):\n features= {\n 'text_length': np.log(len(tweet.text))\n } #ADD ADDITIONAL FEATURES\n if tweet.nrof_sademoticons>0:\n features['sademoticons'] = tweet.nrof_sademoticons\n if tweet.nrof_happyemoticons>0:\n features['happyemoticons'] = tweet.nrof_happyemoticons\n \n return features", "def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model", "def setup_svm_classifier(training_data, y_training, testing_data, features, method=\"count\", ngrams=(1,1)):\n # generate x and y training data\n\n if method == \"count\":\n vec, x_training, x_testing = define_features_vectorizer(features, training_data, testing_data,ngramrange=ngrams)\n elif method == \"tfidf\":\n vec, x_training, x_testing = define_features_tfidf(features, training_data, testing_data,ngramrange=ngrams)\n else:\n print(\"Method has to be either count or tfidf\")\n return 1\n\n # train classifier\n\n model = SVMClassifier_scratch()\n model.fit(x_training, y_training)\n\n return model, vec, x_testing" ]
[ "0.6467389", "0.6120855", "0.5748472", "0.5742488", "0.5711489", "0.56948525", "0.56849676", "0.5643649", "0.5622461", "0.5612863", "0.5570076", "0.5564452", "0.5562412", "0.5538345", "0.5495508", "0.5485487", "0.54830647", "0.5479651", "0.5453535", "0.54154104", "0.5412377", "0.53939295", "0.5386679", "0.5345854", "0.53347707", "0.532772", "0.5326312", "0.53261715", "0.5326102", "0.53220785" ]
0.75750804
0
Testing Tuna's getTunasFeatures method works
def test_getTunaFeatures(self): tuna = Tuna("1", "2", "3", "4") array = ["1", "2", "3", "4"] self.assertEqual(tuna.getTunaFeatures(), array)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_setTunaFeatures(self):\n tuna = Tuna()\n array = [\"1\", \"2\", \"3\", \"4\"]\n tuna.setTunaFeatures(array)\n self.assertEqual(tuna.getTunaFeatures(), array)", "def findFeatures(self):\n\t\tpass", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def test_svm():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = r.randint(1, 10598)\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n\n # iris\n pres = \"Test pour le data set Iris (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Iris, quantum_instance)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Breast_cancer, quantum_instance)\n\n # digits (it's long so be careful)\n #pres = \"Test pour le data set Digits (difficile, classique)\"\n #test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n pres = \"Test pour le data set Wine (moyen, classique)\"\n test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func(pres, 25, 10, 2, True, Gaussian, quantum_instance)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func(pres, 10, 15, 14, True, Sequence, quantum_instance)", "def get_feature_set_SB(tweet):\n #pos-tag frequencies\n# print \"Tagged words in tweet: \", tweet.tagged_words\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# if tag=='PRtinf':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJS':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='ADJ':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='NP':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='DET':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n# elif tag=='P':\n# pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n# print \"Tag frequencies: \", pos_tag_freq\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n# print \"Additional frequencies: \", additional_freq\n# raw_input(\"Continue?\")\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n# print \"All features: \", features\n# raw_input(\"Continue?\")\n return features", "def test_all_features(self):\n to_create = ['looktest1', 'looktest2', 'looktest3']\n for f in to_create:\n Feature(f).activate()\n\n all_features = Feature.all_features()\n self.assertEqual(len(all_features), len(to_create))\n for f in to_create:\n self.assertTrue(f in all_features)", "def feat():\n pass", "def get_feature_set_SA(tweet):\n features= {}\n return features", "def get_feature_set_PB(tweet):\n features= {\n 'text_length': np.log(len(tweet.text))\n } #ADD ADDITIONAL FEATURES\n if tweet.nrof_sademoticons>0:\n features['sademoticons'] = tweet.nrof_sademoticons\n if tweet.nrof_happyemoticons>0:\n features['happyemoticons'] = tweet.nrof_happyemoticons\n \n return features", "def test_intent_classifier_get_training_samples(self):\n pass", "def test_ann_features():\n CQT(file_struct, FeatureTypes.ann_beatsync, sr=11025).features", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def get_feature_set_PA(tweet):\n features= {}\n return features", "def test_text_classifier_get_testing_samples(self):\n pass", "def supported_features(self):\n return SUPPORT_LGSMARTTV", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def test_intent_classifier_get_testing_samples(self):\n pass", "def test_Tuna(self):\n tuna = Tuna(\"1\", \"2\", \"3\", \"4\")\n self.assertIsNotNone(tuna)", "def get_features(self):\n return []", "def test_get_vocabulary(self):\n\n for m in self.models:\n vocab = m.vocabulary\n self.assertTrue(isinstance(vocab, turicreate.SArray))\n self.assertEqual(len(vocab), 25)", "def test_predictor():", "def base_sample(self, tns_dir):\n lib = CDLL('./libsample.so') \n input_dir = tns_dir.encode()\n lib.getBaseFeatures.argtypes = [c_char_p]\n lib.getBaseFeatures.restype = c_float_p\n baseFeatures = lib.getBaseFeatures(input_dir)\n return baseFeatures", "def other_features_(tweet, cleaned_tweet):\n #print(\"WARNING>>>>>>>>>>>>>>>>> VADERSENTIMENT DISABLED\")\n sentiment = nlp.sentiment_analyzer.polarity_scores(tweet)\n\n words = cleaned_tweet #Get text only\n\n syllables = textstat.syllable_count(words) #count syllables in words\n num_chars = sum(len(w) for w in words) #num chars in words\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n\n\n twitter_objs = count_twitter_objs(tweet) #Count #, @, and http://\n features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['compound'],\n twitter_objs[2], twitter_objs[1],]\n #features = pandas.DataFrame(features)\n return features", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def test_text_classifier_get_training_samples(self):\n pass", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def feature():\n pass", "def test_intent_classifier_tsne_get(self):\n pass" ]
[ "0.71556205", "0.6340565", "0.61837447", "0.5806672", "0.5796584", "0.57937485", "0.57770115", "0.57497746", "0.5737169", "0.5710892", "0.5675815", "0.5668341", "0.5659601", "0.55376226", "0.5520661", "0.55035144", "0.5499606", "0.5498502", "0.54887605", "0.5487713", "0.54858834", "0.5479532", "0.5478857", "0.54773146", "0.5467673", "0.54622316", "0.54622316", "0.54622316", "0.54590034", "0.5450048" ]
0.7249121
0
Create "can_approve_estimated_completion_date" permission and add it to the "Admin" group.
def add_permissions(apps, schema_editor): Permission = apps.get_model("auth", "Permission") Group = apps.get_model("auth", "Group") ContentType = apps.get_model("contenttypes", "ContentType") permission, created = Permission.objects.get_or_create( codename="can_approve_estimated_completion_date", defaults={ "name": "Can approve estimated completion date", "content_type": ContentType.objects.get_for_model( apps.get_model("barriers", "Barrier") ), }, ) admin_group = Group.objects.get(name="Administrator") admin_group.permissions.add(permission) print( 'Permission "can_approve_estimated_completion_date" added to the "Admin" group.' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_authorize(cls, user, obj):\n if not obj.delivery.deadline.assignment_group.is_examiner(user):\n raise PermissionDenied()", "def write_authorize(cls, user, obj):\n if not models.AssignmentGroup.published_where_is_examiner(user).filter(id=obj.deadline.assignment_group.id):\n raise PermissionDenied()\n cls.write_authorize_examinercommon(user, obj)", "def add_user_with_status_granted(caller, user):\r\n if _add_user(user, CourseCreator.GRANTED):\r\n update_course_creator_group(caller, user, True)", "def need_admin_approval(self, need_admin_approval):\n\n self._need_admin_approval = need_admin_approval", "def create_permission(permission, event):\n setDefaultRoles(permission.title, ('Manager',))", "def write_authorize(cls, user, obj):\n if not obj.assignment_group.is_examiner(user):\n raise PermissionDenied()", "def assign_permissions(sender, instance, created, **kwargs):\n if created:\n assign_perm('view_strand', instance.owner.group, instance)\n assign_perm('change_strand', instance.saver, instance)\n assign_perm('delete_strand', instance.saver, instance)\n assign_perm('view_strand', instance.saver, instance)", "def post_access_control_list_create(self, resource_dict):\n pass", "def remove_permissions(apps, schema_editor):\n\n Permission = apps.get_model(\"auth\", \"Permission\")\n Group = apps.get_model(\"auth\", \"Group\")\n\n permission = Permission.objects.get(\n codename=\"can_approve_estimated_completion_date\",\n )\n\n admin_group = Group.objects.get(name=\"Administrator\")\n admin_group.permissions.remove(permission)\n permission.delete()\n\n print(\n 'Permission \"can_approve_estimated_completion_date\" removed from the \"Admin\" group.'\n )", "def update_admin_permission(self) -> None:\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()", "def allowed_organization_access_create(user):\n return user.has_perm(\"vnswww.add_organization\")", "def add_admin(user):\n _add_owner(\n _lookup_user(user).biv_id,\n _add_model(pam.Admin())\n )", "def need_admin_approval(self):\n return self._need_admin_approval", "def test_add_permission(self):\r\n self.assertFalse(self.creator_admin.has_add_permission(self.request))", "def set_permission(sender, instance, created, **kwargs):\n if created:\n assign_perm(\n \"website.can_see\",\n instance.author,\n instance,\n )", "def add_view_permissions(sender, instance, created, **kwargs):\n if created:\n group = Group.objects.get(name=settings.DEFAULT_GROUP_NAME)\n assign_perm('view_tag', group, instance)", "def can_assign(userid, group):", "def pre_access_control_list_create(self, resource_dict):\n pass", "def get_assign_permission(userid, group):", "def assign_contributor_permissions(obj, contributor=None):\n obj.set_permission(Permission.highest(), contributor or obj.contributor)", "def can_approve(self, user, **data):\n raise Return(False)", "def add_permission(self, label, aws_account_id, action_name):\r\n return self.connection.add_permission(self, label, aws_account_id, action_name)", "def grant_set_account_detail_perms(self, user):\n tx = self.iroha.transaction(\n [\n self.iroha.command(\n \"GrantPermission\",\n account_id=f\"{self.creator_account_details.gov_id}@afyamkononi\",\n permission=can_set_my_account_detail,\n )\n ],\n creator_account=f\"{user.gov_id}@afyamkononi\",\n )\n IrohaCrypto.sign_transaction(tx, user.private_key)\n return self.send_transaction_and_return_status(tx)", "def create_custom_permissions(self) -> None:\n self.add_permission_view_menu(\"all_datasource_access\", \"all_datasource_access\")\n self.add_permission_view_menu(\"all_database_access\", \"all_database_access\")\n self.add_permission_view_menu(\"all_query_access\", \"all_query_access\")\n self.add_permission_view_menu(\"can_share_dashboard\", \"Superset\")\n self.add_permission_view_menu(\"can_share_chart\", \"Superset\")", "def user_requested_access(user):\r\n user = CourseCreator.objects.get(user=user)\r\n if user.state != CourseCreator.GRANTED:\r\n user.state = CourseCreator.PENDING\r\n user.save()", "def test_add_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.add_facility'))", "def RequestedPermissions(self) -> _n_6_t_0:", "def UpdateAccessApprovalSettings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def get_permissions_map(self, created):\n current_user = self.context['request'].user\n company = get_object_or_404(models.Company, pk=self.data['id'])\n admins = company.admins\n accountants = company.accountants\n current_user.groups.add(admins)\n current_user.groups.add(accountants)\n assign_perm(\"change_group\", admins, admins)\n assign_perm(\"change_group\", admins, accountants)\n assign_perm(\"delete_group\", admins, admins)\n assign_perm(\"delete_group\", admins, accountants)\n return {\n 'view_company': [admins, accountants],\n 'change_company': [admins],\n 'delete_company': [admins]\n }", "def job_post_save(sender, instance, created, **kwargs):\n\n if created:\n jp = JobPermission.objects.create(\n job=instance,\n content_object=instance.user,\n permission=JobPermissionLevel.ADMIN.value,\n )\n jp.save()" ]
[ "0.61045885", "0.5988743", "0.5814144", "0.5738462", "0.5411797", "0.5401533", "0.5399722", "0.5363404", "0.53633904", "0.53270596", "0.52686125", "0.523226", "0.52310044", "0.5126954", "0.51135796", "0.5107107", "0.5074804", "0.5071565", "0.50627375", "0.5031271", "0.5008885", "0.49773142", "0.49759695", "0.49613628", "0.49506733", "0.4934149", "0.49292016", "0.4926475", "0.49207848", "0.48922914" ]
0.76646936
0
Remove "can_approve_estimated_completion_date" permission and remove it from the "Admin" group.
def remove_permissions(apps, schema_editor): Permission = apps.get_model("auth", "Permission") Group = apps.get_model("auth", "Group") permission = Permission.objects.get( codename="can_approve_estimated_completion_date", ) admin_group = Group.objects.get(name="Administrator") admin_group.permissions.remove(permission) permission.delete() print( 'Permission "can_approve_estimated_completion_date" removed from the "Admin" group.' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delPermission(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\",\"perm_name\")\n request.getAuthNameObj().canDo(\"CHANGE ADMIN PERMISSIONS\")\n perm_actions.getActionManager().deletePermission(request[\"admin_username\"],request[\"perm_name\"])", "def update_admin_permission(self) -> None:\n session = self.appbuilder.get_session\n dag_resources = session.scalars(\n select(Resource).where(Resource.name.like(f\"{permissions.RESOURCE_DAG_PREFIX}%\"))\n )\n resource_ids = [resource.id for resource in dag_resources]\n\n perms = session.scalars(select(Permission).where(~Permission.resource_id.in_(resource_ids)))\n perms = [p for p in perms if p.action and p.resource]\n\n admin = self.find_role(\"Admin\")\n admin.permissions = list(set(admin.permissions) | set(perms))\n\n session.commit()", "def remove_permissions(self):\n self._activate()\n self.configure(state=\"disabled\")", "def add_permissions(apps, schema_editor):\n\n Permission = apps.get_model(\"auth\", \"Permission\")\n Group = apps.get_model(\"auth\", \"Group\")\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n\n permission, created = Permission.objects.get_or_create(\n codename=\"can_approve_estimated_completion_date\",\n defaults={\n \"name\": \"Can approve estimated completion date\",\n \"content_type\": ContentType.objects.get_for_model(\n apps.get_model(\"barriers\", \"Barrier\")\n ),\n },\n )\n\n admin_group = Group.objects.get(name=\"Administrator\")\n admin_group.permissions.add(permission)\n\n print(\n 'Permission \"can_approve_estimated_completion_date\" added to the \"Admin\" group.'\n )", "def remove_access(acl, list_to_edit):\n post_key = '%s_remove_' % list_to_edit\n removal_keys = [k for k in request.POST.keys() if k.startswith(post_key)]\n for key in removal_keys:\n model_type = models.UserGroup\n if list_to_edit.startswith('user'):\n model_type = models.UserProfile\n key_id = int(key.replace(post_key, ''))\n datastore_object = model_type.get_by_id(key_id)\n acl.__getattribute__(list_to_edit).remove(datastore_object.key())", "def DeleteAccessApprovalSettings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def remove_permission(self, label):\r\n return self.connection.remove_permission(self, label)", "def need_admin_approval(self, need_admin_approval):\n\n self._need_admin_approval = need_admin_approval", "def delPermissionValue(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\",\"perm_name\",\"perm_value\")\n request.getAuthNameObj().canDo(\"CHANGE ADMIN PERMISSIONS\")\n perm_actions.getActionManager().deleteFromPermValues(request[\"admin_username\"],request[\"perm_name\"],\n request[\"perm_value\"])", "def make_donor(self):\n self.user.is_staff = False\n self.user.is_superuser = False\n self.user.groups.remove(get_group_by_name(self.ADMIN_GROUP))\n self.user.groups.remove(get_group_by_name(self.AMBASSADOR_GROUP))\n self.user.save()", "def test_remove_facility_pt1(self):\n self.assertFalse(self.admin.has_perm('auth.remove_facility'))", "def unmake_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.remove_role_from_user(self, 'admin')\n db.session.commit()", "def Run(self, args):\n p = parent.GetParent(args)\n return settings.Delete(name=('%s/accessApprovalSettings' % p))", "def strip_restrict_access(self):\n att_name = \"restrictAccess\"\n att_dict = self.top_level_dataset.attrib\n if att_name in att_dict:\n del att_dict[att_name]", "def __check_removed_permissions(self) -> None:\n for permission in Permission.objects.all():\n if not self.__is_permission_allowed_to_delete(permission):\n continue\n\n if self.__is_permission_in_groups(permission.codename):\n raise PermissionInUse(f'Permission {permission.codename} is used in groups. Delete it first.')\n\n permission.delete()\n\n self.stdout.write(f'Removed {permission.codename} permission')", "def test_permission_remove_all_actions_for_user(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous *')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def remove_admin(user):\n user_biv_id = _lookup_user(user).biv_id\n admin = pam.Admin.query.select_from(pam.BivAccess).filter(\n pam.BivAccess.source_biv_id == user_biv_id,\n pam.BivAccess.target_biv_id == pam.Admin.biv_id\n ).one()\n db.session.delete(\n pam.BivAccess.query.filter(\n pam.BivAccess.source_biv_id == user_biv_id,\n pam.BivAccess.target_biv_id == admin.biv_id\n ).one()\n )\n db.session.delete(admin)", "def _remove_group_rights(object_id, workspace, request_user):\n group = group_api.get_group_by_id(object_id)\n workspace_api.remove_group_read_access_to_workspace(workspace, group, request_user)\n workspace_api.remove_group_write_access_to_workspace(workspace, group, request_user)", "def revoke_set_account_detail_perms(self, user):\n tx = self.iroha.transaction(\n [\n self.iroha.command(\n \"RevokePermission\",\n account_id=f\"{self.creator_account_details.gov_id}@afyamkononi\",\n permission=can_set_my_account_detail,\n )\n ],\n creator_account=f\"{user.gov_id}@afyamkononi\",\n )\n IrohaCrypto.sign_transaction(tx, user.private_key)\n return self.send_transaction_and_return_status(tx)", "def pre_access_control_list_delete(self, resource_id):\n pass", "def remove_admin(self, project_id, user_id):\n current_user = request.environ.get('repoze.who.identity')['user']\n user = controller_globals._get_user_from_email(current_user.email)\n\n # make sure we're actually the project lead\n if not self._current_user_leads_review(project_id):\n return \"<font color='red'>tsk, tsk. you're not the project lead, %s.</font>\" % user.fullname\n\n leader_to_remove = Session.query(model.User).filter_by(id=user_id).one()\n review = self._get_review_from_id(project_id)\n review.leaders.remove(leader_to_remove)\n Session.add(review)\n Session.commit()\n\n redirect(url(controller=\"review\", action=\"admin\", project_id=project_id))", "def remove_access(self, access_group):\n\n if self.has_auth_access(access_group):\n self.access_groups.remove(access_group)", "def admins_remove(request):\n if len(models.User.admins()) > 1:\n username = request.params['remove']\n user = models.User.get_by_username(username)\n user.admin = False\n return httpexceptions.HTTPSeeOther(\n location=request.route_url('admin_admins'))", "def write_authorize(cls, user, obj):\n if not obj.delivery.deadline.assignment_group.is_examiner(user):\n raise PermissionDenied()", "def test_remove_facility_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_facility'))", "def test_remove_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.remove_learner_group', self.learner_groups[1]))", "def on_model_delete(self, model):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n if not user_has_permission(current_user, 'can_delete', 'advisorapplicants'):\n abort(403)", "def has_remove_permissions(self, obj):\n return True", "def remove_permission(self, perm):\n if self.has_permission(perm):\n self.permissions -= perm", "def test_permission_remove_one_action_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('permission remove anonymous TICKET_MODIFY')\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)" ]
[ "0.63204193", "0.60559267", "0.5868938", "0.5853292", "0.5768508", "0.5714673", "0.5648865", "0.56440014", "0.5544115", "0.55229086", "0.55072224", "0.54663765", "0.5419387", "0.5413648", "0.53388613", "0.5335905", "0.53285515", "0.5316271", "0.5315049", "0.5301109", "0.529115", "0.5287806", "0.5279603", "0.5268119", "0.5261689", "0.52296805", "0.5201819", "0.5200636", "0.5189475", "0.5185729" ]
0.81578374
0
export GPU for AD
def export_gpu(entity=None): status = False exportGrp = config.geoGrp res = entity.task_res() libPath = entity.libPath() if res: abcName = entity.libName(config.libName.get('gpu'), res, ext='abc') # name without ext basename = os.path.splitext(abcName)[0] gpuName = '{0}/{1}'.format(libPath, abcName) start = pub_utils.file_time(gpuName) # export GPU command result = maya_utils.exportGPUCacheGrp(exportGrp, libPath, basename, time='still') end = pub_utils.file_time(gpuName) success = pub_utils.is_file_new(start, end) if success: return True, 'Success %s' % gpuName else: return False, 'Failed to export Gpu %s' % gpuName else: return False, 'No res found'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetGPU():\n return option['device_id']", "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def add_gpu_and_mpi_marks():\n pass", "def create_gpu_device():\n d1 = dpctl.SyclDevice(\"gpu\")\n d2 = dpctl.select_gpu_device()\n assert d1 == d2\n print_device(d1)\n return d1", "def SetGPU(id):\n global option\n option['device_id'] = id", "def test_gpu_cuda_code() -> None:\n if get_from_environ(\"DISABLE_GPU_FOR_TESTING\") is not None:\n print(\"GPU payload disabled for testing\")\n return\n\n # if the command exists it can run on the hardware below\n proc = subprocess.Popen([\"nvidia-smi\"], stdout=subprocess.PIPE)\n stdout, _ = proc.communicate()\n str_stdout = stdout.decode()\n assert \"NVIDIA-SMI\" in str_stdout, str_stdout\n assert proc.returncode == 0\n # search the history for the CUDA implementation", "def setup_gpu(use_gpu: int, silent=None) -> None:\n if silent is None:\n local_msg = Printer()\n else:\n local_msg = Printer(no_print=silent, pretty=not silent)\n if use_gpu >= 0:\n local_msg.info(f\"Using GPU: {use_gpu}\")\n require_gpu(use_gpu)\n else:\n local_msg.info(\"Using CPU\")\n if gpu_is_available():\n local_msg.info(\"To switch to GPU 0, use the option: --gpu-id 0\")", "def OnGPU(gpu_id):\n device_option = caffe2_pb2.DeviceOption()\n device_option.device_type = workspace.GpuDeviceType\n device_option.device_id = gpu_id\n return device_option", "def _copy_to_gpu(self):\n self.dispatch('on_texture')", "def set_gpu(gpus):\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus", "def cuda(self):\n if torch.cuda.is_available():\n self.automata = self.automata.cuda()\n self.inv_automata = self.inv_automata.cuda()\n self.action = self.action.cuda()\n self.inv_action = self.inv_action.cuda()", "def gpu(device_id=0):\n return Context('gpu', device_id)", "def gpu(device_id=0):\n return Context('gpu', device_id)", "def gpu(self, gpu):\n\n self._gpu = gpu", "def add_gpu(self, gpu):\n gpu_worker = GPUCmdRunner(self.host, 'gpu', gpu)\n self.gpu_workers[gpu] = gpu_worker\n gpu_worker.start()\n self.log.info('GPU worker %d added' % gpu)", "def run():\n # get arguments\n args = parse_args()\n assert args.batch_size % args.gpu_num == 0\n assert args.gru_hidden_size % 2 == 0\n\n # create a logger\n logger = logging.getLogger(\"GACM\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n check_path(args.save_dir)\n check_path(args.load_dir)\n check_path(args.result_dir)\n check_path(args.summary_dir)\n if args.log_dir:\n check_path(args.log_dir)\n file_handler = logging.FileHandler(args.log_dir + time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time())) + '.txt')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n logger.info('Running with args : {}'.format(args))\n\n logger.info('Checking the directories...')\n for dir_path in [args.save_dir, args.result_dir, args.summary_dir]:\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n \n global Dataset\n global Agent\n logger.info('Agent version: {}.0'.format(args.agent_version))\n logger.info('Dataset version: {}.0'.format(args.dataset_version))\n logger.info('Checking the directories...')\n Dataset = importlib.import_module('dataset{}'.format(args.dataset_version)).Dataset\n Agent = importlib.import_module('Agent{}'.format(args.agent_version)).Agent\n \n if args.pretrain:\n pretrain(args)\n if args.train:\n train(args)\n if args.test:\n test(args)\n if args.rank:\n rank(args)\n if args.generate_synthetic_dataset:\n generate_synthetic_dataset(args)\n logger.info('run done.')", "def set_gpu(gpu):\r\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu\r\n tf_config = tf.ConfigProto()\r\n tf_config.gpu_options.allow_growth = True\r\n return tf_config", "def cuda(self):\n for i in self.modules:\n if torch.cuda.is_available():\n self.modules[i] = self.modules[i].cuda()", "def benchmark_xla_fakedistort_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n distortions=True,\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)", "def __init__(self, *args, **kwargs):\n super(MadryEtAlMultiGPU, self).__init__(*args, **kwargs)\n self.structural_kwargs += ['ngpu']", "def main(argv):\n parser = OptionParser()\n parser.add_option(\n \"--output-dir\",\n help=\"Output directory for generated files. Defaults to chromium root \"\n \"directory.\")\n parser.add_option(\n \"-v\", \"--verbose\", action=\"store_true\", help=\"Verbose logging output.\")\n parser.add_option(\n \"-c\", \"--check\", action=\"store_true\",\n help=\"Check if output files match generated files in chromium root \"\n \"directory. Use this in PRESUBMIT scripts with --output-dir.\")\n\n (options, _) = parser.parse_args(args=argv)\n\n # This script lives under src/gpu/command_buffer.\n script_dir = os.path.dirname(os.path.abspath(__file__))\n assert script_dir.endswith(os.path.normpath(\"src/gpu/command_buffer\"))\n # os.path.join doesn't do the right thing with relative paths.\n chromium_root_dir = os.path.abspath(script_dir + \"/../..\")\n\n # Support generating files under gen/ and for PRESUBMIT.\n if options.output_dir:\n output_dir = options.output_dir\n else:\n output_dir = chromium_root_dir\n os.chdir(output_dir)\n\n # This script lives under gpu/command_buffer, cd to base directory.\n build_cmd_buffer_lib.InitializePrefix(\"WebGPU\")\n gen = build_cmd_buffer_lib.GLGenerator(\n options.verbose, \"2018\", _FUNCTION_INFO, _NAMED_TYPE_INFO,\n chromium_root_dir)\n gen.ParseGLH(\"gpu/command_buffer/webgpu_cmd_buffer_functions.txt\")\n\n gen.WriteCommandIds(\"gpu/command_buffer/common/webgpu_cmd_ids_autogen.h\")\n gen.WriteFormat(\"gpu/command_buffer/common/webgpu_cmd_format_autogen.h\")\n gen.WriteFormatTest(\n \"gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h\")\n gen.WriteGLES2InterfaceHeader(\n \"gpu/command_buffer/client/webgpu_interface_autogen.h\")\n gen.WriteGLES2ImplementationHeader(\n \"gpu/command_buffer/client/webgpu_implementation_autogen.h\")\n gen.WriteGLES2InterfaceStub(\n \"gpu/command_buffer/client/webgpu_interface_stub_autogen.h\")\n gen.WriteGLES2InterfaceStubImpl(\n \"gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h\")\n gen.WriteGLES2Implementation(\n \"gpu/command_buffer/client/webgpu_implementation_impl_autogen.h\")\n gen.WriteGLES2ImplementationUnitTests(\n \"gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h\")\n gen.WriteCmdHelperHeader(\n \"gpu/command_buffer/client/webgpu_cmd_helper_autogen.h\")\n # Note: No gen.WriteServiceImplementation\n # Note: No gen.WriteServiceUnitTests\n gen.WriteServiceUtilsHeader(\n \"gpu/command_buffer/service/webgpu_cmd_validation_autogen.h\")\n gen.WriteServiceUtilsImplementation(\n \"gpu/command_buffer/service/\"\n \"webgpu_cmd_validation_implementation_autogen.h\")\n\n build_cmd_buffer_lib.Format(gen.generated_cpp_filenames, output_dir,\n chromium_root_dir)\n\n if gen.errors > 0:\n print(\"build_webgpu_cmd_buffer.py: Failed with %d errors\" % gen.errors)\n return 1\n\n check_failed_filenames = []\n if options.check:\n for filename in gen.generated_cpp_filenames:\n if not filecmp.cmp(os.path.join(output_dir, filename),\n os.path.join(chromium_root_dir, filename)):\n check_failed_filenames.append(filename)\n\n if len(check_failed_filenames) > 0:\n print('Please run gpu/command_buffer/build_webgpu_cmd_buffer.py')\n print('Failed check on autogenerated command buffer files:')\n for filename in check_failed_filenames:\n print(filename)\n return 1\n\n return 0", "def configure_gpu_tf():\n\n try:\n # locate available devices & set required environment variables\n available_device_ids = GPUtil.getFirstAvailable(order='first', maxLoad=0.7, maxMemory=0.7, attempts=1, interval=10)\n available_device_id = available_device_ids[0]\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = str(available_device_id)\n print(f\"\\n GPU Found! running on GPU:{available_device_id}\\n\")\n\n # set GPU configuration (use all GPU memory if device 0, else use <50% of memory)\n tf.debugging.set_log_device_placement(False)\n physical_gpu = tf.config.experimental.list_physical_devices('GPU')[0]\n\n if available_device_id == 0:\n tf.config.experimental.set_memory_growth(physical_gpu, True)\n else:\n tf.config.experimental.set_virtual_device_configuration(\n physical_gpu,\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4500)]\n )\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n assert len(logical_gpus) == 1, \"error creating virtual GPU to fractionally use memory\"\n\n # if we can't find a GPU, or they are all busy, default to using CPU\n except RuntimeError:\n print(\"\\n No GPUs available... running on CPU\\n\")\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'", "def main():\n # Set hyperparameters: batch size, learning rate, hidden layers, activ. fn\n bs = 64\n epochs = 1000\n lr = 10 ** (-5)\n h_layers = [32, 16]\n a_fn = F.relu\n\n # Construct Dataset from file; form DataLoaders\n train_ds, valid_ds = form_datasets(DATA_PATH / SAMPLE_FILE)\n train_dl, valid_dl = form_dataloaders(train_ds, valid_ds, bs, preprocess)\n\n # Gather target inverse scaler fn\n t_inv_scaler = train_ds.target_scaler[\"stargazers\"]\n\n # Intialize model (w/ GPU support), optimization method, and loss function\n model = dff.DFF(D_in=21, D_hid=h_layers, D_out=1, a_fn=a_fn)\n model.to(DEV)\n opt = optim.Adam(model.parameters(), lr=lr)\n loss_func = F.mse_loss\n fit_args = (model, loss_func, opt, train_dl, valid_dl, t_inv_scaler)\n\n # Generate descriptive filename string for csv logs\n prefix = \"FINAL_\"\n model_str = dff.hyper_str(h_layers, lr, opt, a_fn, bs, epochs, prefix)\n print(model_str)\n\n # Train, validate, and store loss\n dff.fit(epochs, *fit_args, LOG_PATH, model_str)", "def load_device():", "def get_gpu_info(**kwargs):\n # Set GPU info fields\n conn_gpu_count = None\n source_db_gpu_count = None\n source_db_gpu_mem = None\n source_db_gpu_driver_ver = \"\"\n source_db_gpu_name = \"\"\n if kwargs[\"no_gather_conn_gpu_info\"]:\n logging.debug(\n \"--no-gather-conn-gpu-info passed, \"\n + \"using blank values for source database GPU info fields \"\n + \"[run_gpu_count, run_gpu_mem_mb] \"\n )\n else:\n logging.debug(\n \"Gathering source database GPU info fields \"\n + \"[run_gpu_count, run_gpu_mem_mb] \"\n + \"using pymapd connection info. \"\n )\n conn_hardware_info = kwargs[\"con\"]._client.get_hardware_info(\n kwargs[\"con\"]._session\n )\n conn_gpu_count = conn_hardware_info.hardware_info[0].num_gpu_allocated\n if conn_gpu_count == 0 or conn_gpu_count is None:\n no_gather_nvml_gpu_info = True\n if conn_gpu_count == 0:\n logging.warning(\n \"0 GPUs detected from connection info, \"\n + \"using blank values for source database GPU info fields \"\n + \"If running against cpu-only server, make sure to set \"\n + \"--no-gather-nvml-gpu-info and --no-gather-conn-gpu-info.\"\n )\n else:\n no_gather_nvml_gpu_info = kwargs[\"no_gather_nvml_gpu_info\"]\n source_db_gpu_count = conn_gpu_count\n try:\n source_db_gpu_mem = int(\n conn_hardware_info.hardware_info[0].gpu_info[0].memory\n / 1000000\n )\n except IndexError:\n logging.error(\"GPU memory info not available from connection.\")\n if no_gather_nvml_gpu_info:\n logging.debug(\n \"--no-gather-nvml-gpu-info passed, \"\n + \"using blank values for source database GPU info fields \"\n + \"[gpu_driver_ver, run_gpu_name] \"\n )\n elif (\n kwargs[\"conn_machine_name\"] == \"localhost\"\n or kwargs[\"gather_nvml_gpu_info\"]\n ):\n logging.debug(\n \"Gathering source database GPU info fields \"\n + \"[gpu_driver_ver, run_gpu_name] \"\n + \"from local GPU using pynvml. \"\n )\n import pynvml\n\n pynvml.nvmlInit()\n source_db_gpu_driver_ver = pynvml.nvmlSystemGetDriverVersion().decode()\n for i in range(source_db_gpu_count):\n handle = pynvml.nvmlDeviceGetHandleByIndex(i)\n # Assume all cards are the same, overwrite name value\n source_db_gpu_name = pynvml.nvmlDeviceGetName(handle).decode()\n pynvml.nvmlShutdown()\n # If gpu_count argument passed in, override gathered value\n if kwargs[\"gpu_count\"]:\n source_db_gpu_count = kwargs[\"gpu_count\"]\n if kwargs[\"gpu_name\"]:\n source_db_gpu_name = kwargs[\"gpu_name\"]\n gpu_info = {\n \"conn_gpu_count\": conn_gpu_count,\n \"source_db_gpu_count\": source_db_gpu_count,\n \"source_db_gpu_mem\": source_db_gpu_mem,\n \"source_db_gpu_driver_ver\": source_db_gpu_driver_ver,\n \"source_db_gpu_name\": source_db_gpu_name,\n }\n return gpu_info", "def test_nvidia_driver2():\r\n a = numpy.random.rand(10000).astype(\"float32\")\r\n cuda.shared_constructor(a)\r\n assert theano.sandbox.cuda.use.device_number is not None", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def device(request):\n d = request.param()\n\n # enable GPU error checking\n if isinstance(d, hoomd.device.GPU):\n d.gpu_error_checking = True\n\n return d", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(num_gpus=1)\n self._run_benchmark(params)", "def dist_setting(current_gpu, model, args):\n print(\"channels_last : {}\".format(args.channels_last))\n if args.channels_last:\n args.memory_format = torch.channels_last\n else:\n args.memory_format = torch.contiguous_format\n\n if args.apex:\n args.lr = args.lr*float(args.batch_size*args.world_size)/256.\n args.current_gpu = current_gpu\n if args.current_gpu is not None:\n print(\"Use GPU: {} for training\".format(args.current_gpu))\n\n if args.multigpus_distributed:\n args.rank = args.num_gpus * args.host_num + args.current_gpu\n dist.init_process_group(backend=args.backend,\n rank=args.rank, world_size=args.world_size)\n logger.info('Initialized the distributed environment: \\'{}\\' backend on {} nodes. '.format(\n args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(\n dist.get_rank(), args.num_gpus))\n else:\n args.rank = 0\n\n if args.sync_bn:\n import apex\n print(\"using apex synced BN\")\n model = apex.parallel.convert_syncbn_model(model)\n\n if args.multigpus_distributed:\n if args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n args.batch_size = int(args.batch_size / args.num_gpus)\n logger.info(\"Batch size for each GPU: {}\".format(args.batch_size))\n if not args.apex:\n model.cuda(args.current_gpu)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.current_gpu])\n else:\n if not args.apex:\n model.cuda()\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n if not args.apex:\n model = model.cuda(args.current_gpu)\n else:\n if not args.apex:\n model = torch.nn.DataParallel(model).cuda()\n\n return model, args" ]
[ "0.62095964", "0.6097989", "0.59566474", "0.5911981", "0.585888", "0.5828839", "0.57508516", "0.57136184", "0.5679205", "0.56655836", "0.5627468", "0.55777365", "0.55777365", "0.5537934", "0.55370134", "0.55342036", "0.54773647", "0.5465165", "0.54110056", "0.53959876", "0.53759116", "0.53304577", "0.5323701", "0.5319425", "0.5319053", "0.5317626", "0.53170764", "0.5316251", "0.5303475", "0.5291678" ]
0.70596236
0
Test to see if profile for leothelion can be viewed anon and logged in
def test_view_profile(self): LOGGER.debug("Test GET /rango/view/leothelion/ for anon user") anon_view_response = self.client.get('/rango/view/leothelion/') self.assertContains(anon_view_response, "[email protected]") LOGGER.debug("Test GET /rango/view/leothelion/ for logged in user") self.client.login(username='leothelion', password='rawr') logged_in_view_response = self.client.get('/rango/view/leothelion/') self.assertContains(logged_in_view_response, "[email protected]") """Test to see if profile for hungryhippo can be viewed anon and logged in""" LOGGER.debug("Test GET /rango/view/hungyhippo/ for anon user") anon_view_response = self.client.get('/rango/view/hungryhippo/') self.assertNotContains(anon_view_response, "[email protected]") self.assertContains(anon_view_response, "Hungry") LOGGER.debug("Test GET /rango/view/hungryhippo/ for logged in user") self.client.login(username='hungryhippo', password='food') logged_in_view_response = self.client.get('/rango/view/hungryhippo/') self.assertContains(logged_in_view_response, "[email protected]") self.assertContains(anon_view_response, "Hippo")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "def test_06_user_public_profile(self):\r\n # As Anonymou user\r\n url = \"/account/%s\" % self.name\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def test_06_user_public_profile(self):\r\n # As Anonymou user\r\n url = \"/account/%s\" % self.name\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should not be shown to anonymous users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should not be shown to authenticated users\"\r\n assert dom.find(id='enforce_privacy') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Public User Profile page should be shown to admin users\"\r\n assert dom.find(id='enforce_privacy') is None, err_msg\r\n self.signout()", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_council_privileges():\n return True\n return False", "def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text", "def user_auth_inst(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n inst = UserInformation.objects.get(user=user)\n if(inst.user_instructor):\n return True\n return False", "def test_func(self):\n member_to_view = self.get_object()\n is_self = self.request.user.rfid == member_to_view.rfid\n view_others = self.request.user.has_permission(\"core.view_member\")\n return view_others or is_self", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def is_logged_in():\n return 'user' in session", "def has_permission(self, request, view):\n usuario = request.user\n return str(usuario) == \"AnonymousUser\"", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_club_privileges():\n return True\n return False", "def logged_in(request):\n return request.current_user is not None", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def testPersonIsUser(self):\n member = self.portal.portal_membership.getMemberById('abc123')\n self.failUnless(member,\"%s\" % member)", "def test_anonymous_cannot_get_userprofileview(dclient):\n resp = dclient.get(\"/api/record/profile/\", follow=True)\n assert resp.status_code == 403", "def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})", "def test_profile_api_anon(self):\n self.client.logout()\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 403)", "def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user.is_staff or request.user.is_superuser:\n return True\n try:\n return request.user in obj.course.instructors.all()\n except AttributeError:\n # activitylevel => has no course element\n return request.user.is_instructor", "def test_view_all_users_profiles(self):\n self.authorize_user(self.user_login_details)\n response = self.client.get(self.profiles_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def can_access(user, page):\n page_groups = PageViewGroup.objects.filter(page=page)\n if user.is_anonymous():\n return page_groups.count() == 0\n else:\n groups = page_groups.filter(group__in=user.groups.all())\n return page_groups.count() == 0 or groups.count() > 0", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def can_view(self, user):\r\n return True", "def is_personal(self):\n return self.user_id is not None", "def get_is_interested(self, obj):\n # pylint: disable=no-member\n user = self.context['request'].user\n if not user.is_authenticated:\n return None\n profile = UserProfile.objects.get(user=user)\n return profile in obj.interested_users.all()", "def get_is_self(self, obj: Profile) -> bool:\n request: HttpRequest = self.context.get('request')\n if request:\n if request.user.is_authenticated:\n return obj == request.user.profile\n return False", "def has_read_permission(request):\n return request.user.is_authenticated", "def test_func(self):\n return self.request.user.is_active # any active user", "def user_in_session():\n return 'user_id' in login_session", "def test_user_role_anonymous(self):\r\n self.assertEqual(\r\n 'student',\r\n access.get_user_role(self.anonymous_user, self.course_key)\r\n )" ]
[ "0.69051546", "0.6754408", "0.67433447", "0.66899693", "0.6672601", "0.6667041", "0.6631646", "0.66299933", "0.6568902", "0.6545809", "0.6544475", "0.65014887", "0.6479417", "0.64605063", "0.64309174", "0.6402466", "0.63988775", "0.6398008", "0.6369444", "0.6363515", "0.6351834", "0.63441396", "0.63434225", "0.6338555", "0.63332486", "0.62838906", "0.6276851", "0.6271203", "0.6266736", "0.6258917" ]
0.70540863
0
Takes in the direction the camera is pointing and the camera origin and returns a cam2world matrix.
def create_cam2world_matrix(forward_vector, origin, device=None): """""" forward_vector = normalize_vecs(forward_vector) up_vector = torch.tensor([0, 1, 0], dtype=torch.float, device=device) \ .expand_as(forward_vector) left_vector = normalize_vecs( torch.cross(up_vector, forward_vector, dim=-1)) up_vector = normalize_vecs( torch.cross(forward_vector, left_vector, dim=-1)) rotation_matrix = torch.eye(4, device=device) \ .unsqueeze(0) \ .repeat(forward_vector.shape[0], 1, 1) rotation_matrix[:, :3, :3] = torch.stack( (-left_vector, up_vector, -forward_vector), axis=-1) translation_matrix = torch.eye(4, device=device) \ .unsqueeze(0) \ .repeat(forward_vector.shape[0], 1, 1) translation_matrix[:, :3, 3] = origin cam2world = translation_matrix @ rotation_matrix return cam2world
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_to_matrix(x, y):\n x_pos = round(x * ((MATRIX_SIZE_X - 1)/(FRAME_W - 1)))\n y_pos = round(y * ((MATRIX_SIZE_Y - 1)/(FRAME_H - 1)))\n\n x_pos = (MATRIX_SIZE_X - 1) - x_pos #invert x direction (left and right) to account for camera perspective\n\n return x_pos, y_pos", "def cam_to_world(cam_point, world_to_cam):\n # cam_point = np.array([cam_pose[0], cam_pose[1], cam_pose[2]])\n\n obj_vector = np.concatenate((cam_point, np.ones(1))).reshape((4, 1))\n world_point = np.dot(world_to_cam, obj_vector)\n\n world_point = [p[0] for p in world_point]\n return world_point[0:3]", "def camera_matrix(self) -> TransformationMatrixType:\n return numpy.matmul(\n self.rotation_matrix(*self.rotation),\n displacement_matrix(*-numpy.array(self.location)),\n )", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def camera_2_world(self, o, d):\r\n wo = self.camera2world_point @ ti.Vector([o.x, o.y, o.z, 1.0])\r\n wd = self.camera2world_vec @ d\r\n return ti.Vector([wo.x,wo.y,wo.z]), wd", "def camera_coords_to_world_coords(point, cam_height, cam_angle):\n\n # adjust the axis order\n point = np.array([point[2], point[0], point[1]])\n\n # calculate the vectors of the camera axis in the desired coordinate system\n cam_direction = np.array([np.cos(cam_angle), 0, -np.sin(cam_angle)])\n z = cam_direction\n x = np.cross(np.array([0, 0, 1]), cam_direction)\n y = np.cross(z, x)\n\n # transposed rotation matrix\n rotation = np.vstack([x, y, z])\n\n # translation vector\n translation = np.array([0, 0, cam_height])\n\n return rotation @ (point - translation)", "def compute_right_camera_pose(left_camera_to_world, left_to_right):\n left_world_to_camera = np.linalg.inv(left_camera_to_world)\n right_world_to_camera = np.matmul(left_to_right, left_world_to_camera)\n right_camera_to_world = np.linalg.inv(right_world_to_camera)\n return right_camera_to_world", "def translation_matrix(direction):\n M = numpy.identity(4)\n M[:3, 3] = direction[:3]\n return M", "def camera_matrix(e, p, t):\n # Translates all points such that the camera is centered at the origin.\n T = np.array([[1, 0, 0, -e[0]],\n [0, 1, 0, -e[1]],\n [0, 0, 1, -e[2]],\n [0, 0, 0, 1]])\n\n # Set up orthonormal basis.\n w = e - p\n w = w / np.linalg.norm(w)\n u = np.cross(t, w)\n u = u / np.linalg.norm(u)\n v = np.cross(w, u)\n\n # Rotate points such that camera is aligned with UVW-axes (g -> -z-axis).\n R = np.array([[u[0], u[1], u[2], 0],\n [v[0], v[1], v[2], 0],\n [w[0], w[1], w[2], 0],\n [ 0, 0, 0, 1]])\n return R.dot(T)", "def camera_to_world(self, X):\n raise NotImplementedError", "def world_to_camera(self, X):\n raise NotImplementedError", "def VIC_direction_matrix(lat_step, lon_step):\n base = (\n (0, 0), # filler - 0 is not used in the encoding\n (1, 0), # 1 = north\n (1, 1), # 2 = northeast\n (0, 1), # 3 = east\n (-1, 1), # 4 = southeast\n (-1, 0), # 5 = south\n (-1, -1), # 6 = southwest\n (0, -1), # 7 = west\n (1, -1), # 8 = northwest\n (0, 0), # 9 = outlet\n )\n lat_dir = int(math.copysign(1, lat_step))\n lon_dir = int(math.copysign(1, lon_step))\n return tuple(\n (lat_dir * lat_base, lon_dir * lon_base) for lat_base, lon_base in base\n )", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n\n origin = np.array([location.x, location.y, location.z])\n return matrix, origin", "def transform_camera_pose_to_world_pose(self):\n for pose in self.close_positions_camera:\n self.close_positions_world.append(self.get_world_pose_for_camera_pose(pose))\n\n for pose in self.medium_positions_camera:\n self.medium_positions_world.append(self.get_world_pose_for_camera_pose(pose))\n\n for pose in self.far_positions_camera:\n self.far_positions_world.append(self.get_world_pose_for_camera_pose(pose))", "def get_projection_matrix(left, right, bottom, top):\r\n zNear = -25.0\r\n zFar = 25.0\r\n inv_z = 1.0 / (zFar - zNear)\r\n inv_y = 1.0 / (top - bottom)\r\n inv_x = 1.0 / (right - left)\r\n mat = [[(2.0 * inv_x), 0.0, 0.0, (-(right + left) * inv_x)],\r\n [0.0, (2.0 * inv_y), 0.0, (-(top + bottom) * inv_y)],\r\n [0.0, 0.0, (-2.0 * inv_z), (-(zFar + zNear) * inv_z)],\r\n [0.0, 0.0, 0.0, 1.0]]\r\n return mat", "def determine_rotation_matrix(self, origin, angle, scale):\n # scaling will be ignored at this step\n rotation_matrix = cv2.getRotationMatrix2D(origin, angle * 180 / np.pi, scale)\n return rotation_matrix", "def worldToCameraCentricXform(self):\n return self.rotateAlignXform().dot(self.translateToOriginXform())", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def local_coords(origin_object, target_location) -> MyVec3:\n\t# Originally by GooseFairy https://github.com/ddthj/Gosling/blob/master/Episode%203%20Code/Util.py\n\torigin_loc = MyVec3(origin_object.location)\n\ttarget_location = MyVec3(target_location)\n\tx = (target_location - origin_loc) * origin_object.rotation.matrix[0]\n\ty = (target_location - origin_loc) * origin_object.rotation.matrix[1]\n\tz = (target_location - origin_loc) * origin_object.rotation.matrix[2]\n\treturn MyVec3(x, y, z)", "def get_direction_matrix(self) -> int:", "def polarCameraToCartesian(self):\n x = self.cameraPolar[0] * np.sin(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n y = self.cameraPolar[0] * np.cos(self.cameraPolar[2] * np.pi / 180)\n z = self.cameraPolar[0] * np.cos(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n self.cameraPosition = [x, y, z]", "def get_rotation_matrix_2D(transform):\n yaw = np.deg2rad(transform.rotation.yaw)\n cy = np.cos(yaw)\n sy = np.sin(yaw)\n\n rotation_matrix_2D = np.array([[cy, -sy],\n [sy, cy]])\n return rotation_matrix_2D", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def get_matrix(transform):\n\n rotation = transform.rotation\n location = transform.location\n c_y = np.cos(np.radians(rotation.yaw))\n s_y = np.sin(np.radians(rotation.yaw))\n c_r = np.cos(np.radians(rotation.roll))\n s_r = np.sin(np.radians(rotation.roll))\n c_p = np.cos(np.radians(rotation.pitch))\n s_p = np.sin(np.radians(rotation.pitch))\n matrix = np.matrix(np.identity(4))\n matrix[0, 3] = location.x\n matrix[1, 3] = location.y\n matrix[2, 3] = location.z\n matrix[0, 0] = c_p * c_y\n matrix[0, 1] = c_y * s_p * s_r - s_y * c_r\n matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r\n matrix[1, 0] = s_y * c_p\n matrix[1, 1] = s_y * s_p * s_r + c_y * c_r\n matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r\n matrix[2, 0] = s_p\n matrix[2, 1] = -c_p * s_r\n matrix[2, 2] = c_p * c_r\n return matrix", "def camera_transformation_from_pose(azimutal, elevation):\n azimutal, elevation = azimutal * 2. * np.pi / 360., elevation * 2. * np.pi / 360.\n azimutal *= -1.\n elevation *= -1.\n r_y = np.array([[np.cos(elevation), 0, np.sin(elevation)],\n [0, 1, 0],\n [-np.sin(elevation), 0, np.cos(elevation)]])\n r_z = np.array([[np.cos(azimutal), -np.sin(azimutal), 0],\n [np.sin(azimutal), np.cos(azimutal), 0],\n [0, 0, 1]])\n r = r_z.dot(r_y)\n # world_to_camera matrix, camera_to_world matrix\n return r, np.linalg.inv(r)", "def _calculate_camera_array(self):\n look_list = []\n\n row_step_vec = normalize(self.look_up) * self.interspatial_distance\n col_step_vec = self._get_look_right() * self.interspatial_distance\n\n # Start at the top left camera position\n for i in range(self.spatial_rows):\n row_movement = row_step_vec * (-i)\n row_look_from = self.look_from + row_movement\n row_look_to = self.look_to + row_movement\n\n for j in range(self.spatial_cols):\n col_movement = col_step_vec * j\n cam_look_from = row_look_from + col_movement\n cam_look_to = row_look_to + col_movement\n\n look_list.append((cam_look_from, cam_look_to))\n\n return look_list", "def pixel2cam(self, depth, intrinsics_inv):\n b, _, h, w = depth.size()\n i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]\n j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]\n ones = torch.ones(1,h,w).type_as(depth)\n pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]\n ###pixel_coords is an array of camera pixel coordinates (x,y,1) where x,y origin is the upper left corner of the image.\n current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).view(b,3,-1) #.contiguous().view(b, 3, -1) # [B, 3, H*W]\n #cam_coords = intrinsic_inv.expand(b,3,3).bmm(current_pixel_coords).view(b,3,h,w)\n cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b,3,h,w)\n return cam_coords * depth", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def transformation_matrix(self) -> TransformationMatrixType:\n # camera translation\n if self._transformation_matrix is None:\n self._transformation_matrix = numpy.matmul(\n self.projection_matrix,\n self.camera_matrix,\n )\n\n return self._transformation_matrix", "def get_projection_mapping(self, cam_pos, cam_rot, local_frame=False, range1=True):\n\n cam_pos = cam_pos.copy()\n cam_pos[2] += self.h_offset\n\n K = self.make_camera_matrix()\n R_opt = self.make_optical_rotation_matrix()\n T_opt = affines.compose([0, 0, 0], R_opt, [1.0, 1.0, 1.0])\n T_opt_inv = np.linalg.inv(T_opt)\n T = self.make_world_to_camera_mat(cam_pos, cam_rot)\n Tinv = np.linalg.inv(T)\n\n # Get the map position encodings (MxMx3)\n pts_w = self.get_world_coord_grid()[..., np.newaxis]\n\n # Get the coordinates in camera frame:\n if not local_frame:\n # If we're using a global map frame, transform the map coordinates into the camera frame\n pts_cam = np.matmul(Tinv[np.newaxis, ...], pts_w)\n else:\n # If we're using local frame, camera is centered in the map, but pitch must still be taken into account!\n # TODO: Fix this and add pitch\n pts_cam = pts_w\n pts_cam[:, 0:2] = pts_cam[:, 0:2] - self.map_world_size_px / 2\n\n # Get the coordinates in optical frame\n pts_opt = np.matmul(T_opt_inv[np.newaxis, ...], pts_cam)\n\n # Get the 3D coordinates of the map pixels in the image frame:\n pts_img = np.matmul(K[np.newaxis, ...], pts_opt[:, 0:3, :])\n\n # Convert to homogeneous (image-plane) coordinates\n valid_z = pts_img[:, 2:3, :] > 0\n\n pts_img = pts_img / (pts_img[:, 2:3] + 1e-9)\n #pts_img[:, 0] = pts_img[:, 0] / (pts_img[:, 2] + 1e-9)\n #pts_img[:, 1] = pts_img[:, 1] / (pts_img[:, 2] + 1e-9)\n\n # Mask out all the map elements that don't project on the image\n valid_y1 = pts_img[:, 0:1, :] > 0\n valid_y2 = pts_img[:, 0:1, :] < self.res_x\n valid_x1 = pts_img[:, 1:2, :] > 0\n valid_x2 = pts_img[:, 1:2, :] < self.res_y\n\n # Throw away the homogeneous Z coordinate\n pts_img = pts_img[:, 0:2]\n\n valid = valid_y1 * valid_y2 * valid_x1 * valid_x2 * valid_z\n\n # PyTorch takes projection mappings in -1 to 1 range:\n if range1:\n pts_img[:, 0] = (-pts_img[:, 0] + self.res_x / 2) / (self.res_x / 2)\n pts_img[:, 1] = (-pts_img[:, 1] + self.res_y / 2) / (self.res_y / 2)\n\n # Make sure the invalid points are out of range\n pts_img = pts_img * valid + 2 * np.ones_like(pts_img) * (1 - valid)\n else:\n pts_img = pts_img * valid\n\n # Remove the extra 1-length dimension\n pts_img = pts_img.squeeze()\n\n # Reshape into the 2D map representation\n pts_img = np.reshape(pts_img, [self.map_size_px, self.map_size_px, 2])\n\n return pts_img" ]
[ "0.6586465", "0.6576377", "0.6565738", "0.6500614", "0.64989525", "0.6413747", "0.61989534", "0.6179077", "0.6152811", "0.6126261", "0.59229445", "0.5909412", "0.59015703", "0.5897311", "0.57085705", "0.568021", "0.5642607", "0.5634", "0.56108457", "0.558792", "0.5587084", "0.558518", "0.5580177", "0.5580177", "0.55495024", "0.55494285", "0.55394554", "0.5538579", "0.5532079", "0.5524214" ]
0.78033996
0
Perturb z_vals and points; Samples a camera position and maps points in camera space to world space.
def transform_sampled_points(points, z_vals, ray_directions, device, h_stddev=1, v_stddev=1, h_mean=math.pi * 0.5, v_mean=math.pi * 0.5, mode='normal'): bs, num_rays, num_steps, channels = points.shape points, z_vals = perturb_points(points, z_vals, ray_directions, device) camera_origin, pitch, yaw = sample_camera_positions( bs=bs, r=1, horizontal_stddev=h_stddev, vertical_stddev=v_stddev, horizontal_mean=h_mean, vertical_mean=v_mean, device=device, mode=mode) forward_vector = normalize_vecs(-camera_origin) cam2world_matrix = create_cam2world_matrix(forward_vector, camera_origin, device=device) points_homogeneous = torch.ones( (points.shape[0], points.shape[1], points.shape[2], points.shape[3] + 1), device=device) points_homogeneous[:, :, :, :3] = points # (bs, 4, 4) @ (bs, 4, num_rays x n_samples) -> (bs, 4, num_rays x n_samples) -> (bs, num_rays, n_samples, 4) transformed_points = torch.bmm( cam2world_matrix, points_homogeneous.reshape(bs, -1, 4).permute(0, 2, 1)) \ .permute(0, 2, 1) \ .reshape(bs, num_rays, num_steps, 4) transformed_points = transformed_points[..., :3] # (bs, num_rays, n_samples, 3) # (bs, 3, 3) @ (bs, 3, num_rays) -> (bs, 3, num_rays) -> (bs, num_rays, 3) transformed_ray_directions = torch.bmm( cam2world_matrix[..., :3, :3], ray_directions.reshape(bs, -1, 3).permute(0, 2, 1)) \ .permute(0, 2, 1) \ .reshape(bs, num_rays, 3) homogeneous_origins = torch.zeros((bs, 4, num_rays), device=device) homogeneous_origins[:, 3, :] = 1 # (bs, 4, 4) @ (bs, 4, num_rays) -> (bs, 4, num_rays) -> (bs, num_rays, 4) transformed_ray_origins = torch.bmm( cam2world_matrix, homogeneous_origins) \ .permute(0, 2, 1) \ .reshape(bs, num_rays, 4) transformed_ray_origins = transformed_ray_origins[..., :3] # (bs, num_rays, 3) return transformed_points, z_vals, transformed_ray_directions, transformed_ray_origins, pitch, yaw
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perturb_points(points,\n z_vals,\n ray_directions,\n device):\n distance_between_points = z_vals[:, :, 1:2, :] - z_vals[:, :, 0:1, :] # (n, num_rays, 1, 1)\n offset = (torch.rand(z_vals.shape, device=device) - 0.5) \\\n * distance_between_points # [-0.5, 0.5] * d, (n, num_rays, n_samples, 1)\n z_vals = z_vals + offset\n\n points = points + \\\n offset * ray_directions.unsqueeze(2) # (n, num_rays, n_samples, 3)\n return points, z_vals", "def __ComputeApproximateVals_RzRyRz(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0.2, 0.2, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def project(points, camera_params):\n # print(camera_params.shape)\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = 2360*np.ones(camera_params.shape[0])\n # np.ones()\n # n = np.sum(points_proj**2, axis=1)\n r = 1\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def convert_depth_frame_to_pointcloud(depth_image, camera_intrinsics ):\r\n\t\r\n\t[height, width] = depth_image.shape\r\n\r\n\tnx = np.linspace(0, width-1, width)\r\n\tny = np.linspace(0, height-1, height)\r\n\tu, v = np.meshgrid(nx, ny)\r\n\tx = (u.flatten() - camera_intrinsics.ppx)/camera_intrinsics.fx\r\n\ty = (v.flatten() - camera_intrinsics.ppy)/camera_intrinsics.fy\r\n\r\n\tz = depth_image.flatten() / 1000;\r\n\tx = np.multiply(x,z)\r\n\ty = np.multiply(y,z)\r\n\r\n\tx = x[np.nonzero(z)]\r\n\ty = y[np.nonzero(z)]\r\n\tz = z[np.nonzero(z)]\r\n\r\n\treturn x, y, z", "def prepare_data(cameras, frame_points_3d, frame_points_2d, keyframe_idx):\n camera_params = np.empty((0, 9))\n for c in cameras:\n R, _ = cv2.Rodrigues(c.R_mat)\n camera = build_camera(R, c.t)\n camera_params = np.append(camera_params, [camera], axis=0)\n\n camera_indices = []\n point_indices = []\n points_2d = np.empty((0, 2))\n points_3d = np.empty((0, 3))\n\n camera_id = 0\n pt_id_counter = 0\n for k, pts_2d in enumerate(frame_points_2d):\n if k > 0:\n halfway_idx = keyframe_idx[k] - keyframe_idx[k - 1] - 1\n points_2d = np.vstack((points_2d, frame_points_2d[k-1][halfway_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-1][halfway_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-1][halfway_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-1][halfway_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-1][halfway_idx])\n\n if k > 1:\n end_idx = keyframe_idx[k + 1] - keyframe_idx[k - 1] - 3\n points_2d = np.vstack((points_2d, frame_points_2d[k-2][end_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-2][end_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-2][end_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-2][end_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-2][end_idx])\n\n points_2d = np.vstack((points_2d, frame_points_2d[k][0]))\n points_3d = np.vstack((points_3d, frame_points_3d[k][0]))\n camera_indices += [camera_id for _ in range(pts_2d.shape[1])]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + pts_2d.shape[1])]\n\n camera_id += 1\n pt_id_counter = pt_id_counter + pts_2d.shape[1]\n\n return camera_params, np.asarray(camera_indices), np.asarray(point_indices), points_3d, points_2d", "def convert_depth_frame_to_pointcloud(depth_image, camera_intrinsics ):\n\t\n\t[height, width] = depth_image.shape\n\n\tnx = np.linspace(0, width-1, width)\n\tny = np.linspace(0, height-1, height)\n\tu, v = np.meshgrid(nx, ny)\n\tx = (u.flatten() - camera_intrinsics.ppx)/camera_intrinsics.fx\n\ty = (v.flatten() - camera_intrinsics.ppy)/camera_intrinsics.fy\n\n\tz = depth_image.flatten() / 1000;\n\tx = np.multiply(x,z)\n\ty = np.multiply(y,z)\n\n\tx = x[np.nonzero(z)]\n\ty = y[np.nonzero(z)]\n\tz = z[np.nonzero(z)]\n\n\treturn x, y, z", "def project_points(self, points_3d, camera):\n batch_size = points_3d.shape[0]\n device = points_3d.device\n cam_t = torch.stack([camera[:, 1], camera[:, 2], 2 * self.focal_length / (self.img_res * camera[:, 0] + 1e-09)], dim=-1)\n camera_center = camera.new_zeros([batch_size, 2])\n rot_t = torch.eye(3, device=device, dtype=points_3d.dtype).unsqueeze(0).expand(batch_size, -1, -1)\n joints_2d = perspective_projection(points_3d, rotation=rot_t, translation=cam_t, focal_length=self.focal_length, camera_center=camera_center)\n return joints_2d", "def as_point_cloud(self):\n far = 1000.0 # max depth in meters.\n intrinsic_mat = self.camera_setup.get_intrinsic_matrix()\n width, height = self.camera_setup.width, self.camera_setup.height\n # 2d pixel coordinates\n pixel_length = width * height\n u_coord = repmat(np.r_[0:width:1], height, 1).reshape(pixel_length)\n v_coord = repmat(np.c_[0:height:1], 1, width).reshape(pixel_length)\n normalized_depth = np.reshape(self.frame, pixel_length)\n\n # p2d = [u,v,1]\n p2d = np.array([u_coord, v_coord, np.ones_like(u_coord)])\n\n # P = [X,Y,Z]\n p3d = np.dot(inv(intrinsic_mat), p2d)\n p3d *= normalized_depth * far\n\n # [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]\n locations = np.asarray(np.transpose(p3d))\n # Transform the points in 3D world coordinates.\n to_world_transform = self.camera_setup.get_unreal_transform()\n point_cloud = to_world_transform.transform_points(locations)\n return point_cloud", "def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()", "def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()", "def ImageToGround_GivenZ(self, imagePoints, Z_values):\n cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = cameraPoints.T\n pars = self.exteriorOrientationParameters\n X0 = pars[0]\n Y0 = pars[1]\n Z0 = pars[2]\n\n T = np.array([[X0], [Y0], [Z0]])\n\n omega = pars[3]\n phi = pars[4]\n kappa = pars[5]\n R = Compute3DRotationMatrix(omega, phi, kappa)\n\n f = self.camera.focalLength\n\n # allocating memory for return array\n groundPoints = []\n\n for i in range(len(cameraPoints[1])):\n camVec = np.insert(cameraPoints[:, i], np.size(cameraPoints), -f)\n lam = (Z_values - Z0) / (np.dot(R[2, :], camVec))\n\n X = X0 + lam * np.dot(R[0, :], camVec)\n Y = Y0 + lam * np.dot(R[1, :], camVec)\n\n xy = [X, Y, Z_values]\n groundPoints.append(xy)\n\n groundPoints = np.array(groundPoints)\n\n return groundPoints", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def debug_filter_points(self, points):\n cloud_msg = PointCloud2()\n cloud_msg.header.frame_id = \"map\"\n cloud_msg.header.stamp = rospy.Time.now() \n xyz = [[p.pose.position.x, p.pose.position.y, p.pose.position.z] for p in points] \n point_cloud = pc2.create_cloud_xyz32(cloud_msg.header, xyz)\n self._points_publisher.publish(point_cloud)", "def __ComputeApproximateVals(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def get_things1(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_max):\n # Initializing the arrays\n points_3d = []\n points_2d = []\n camera_ind = []\n points_ind = []\n cam_params = []\n\n dst_3d = kp_3d\n dst_2d = kp_2d\n src_3d = map_3d\n src_2d = map_2d\n src_cam = map_cam\n low_bound = []\n up_bound = []\n my_min = 0\n\n # Updating the Camera parameters in map and setting the bounds for the update \n for i in range(my_min,my_max+1):\n cam_param = [map_view[i,0], map_view[i,1], map_view[i,2], map_view[i,3], map_view[i,4], map_view[i,5], f,0,0]\n cam_params.append(cam_param)\n\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n \n # Updating the Camera parameters for frame and setting the bounds for the update\n r = (R.from_matrix((H[0:3, 0:3]))).as_rotvec()\n t = H[:,3]\n cam_param = [r[0], r[1], r[2], t[0], t[1], t[2], f, 0, 0]\n cam_params.append(cam_param)\n \n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n\n new_cam = len(cam_params)-1\n cam_params = np.array(cam_params).reshape(-1,9)\n count = 0\n \n # listing variables to iterate \n l1 = []\n l2 = []\n count = 0\n \n for m in comp_list:\n count+=1\n l1.append(m.queryIdx)\n l2.append(m.trainIdx)\n\n l1 = np.array(l1).reshape(1,-1)\n l2 = np.array(l2).reshape(1,-1)\n l = np.vstack((l1,l2))\n l_fin = l[:,l[1, :].argsort()]\n j = 0\n count = len(points_3d)\n prev = -1\n final_l1 = []\n final_l2 = []\n final_des = []\n\n # Iterating through the list made and making sure no duplicates\n while(j<(len(l_fin[0]))):\n i1 = l_fin[0,j]\n i2 = l_fin[1,j]\n if(i2!=prev):\n # Map points insertion\n \n check = 0\n for ii in range(len(src_2d[i1])):\n m_2d = src_2d[i1][ii]\n check = 1\n ind = int(src_cam[i1][ii])\n points_2d.append([int((m_2d[0]%(2*cx))-cx), int((m_2d[1]%(2*cy))-cy),0])\n\n points_ind.append(count)\n camera_ind.append(ind)\n final_l1.append(i1)\n final_l2.append(0)\n \n # Taking Mean Desciptor if needed un comment 2 lines below\n # x = ((map_des[i1]*len(src_2d[i1]))+des[i2])/(len(src_2d[i1])+1)\n # map_des[i1] = x\n \n if(check==1):\n # Frame points insersion\n points_2d.append([int((dst_2d[i2,0])-cx), int((dst_2d[i2,1])-cy), 0])\n points_ind.append(count)\n camera_ind.append(new_cam)\n final_l1.append(i2)\n final_l2.append(1)\n wld_pt = src_3d[i1]\n points_3d.append([wld_pt[0], wld_pt[1], wld_pt[2]])\n prev = i2\n count = len(points_3d)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n src_2d[i1].append([int((dst_2d[i2,0])), int((dst_2d[i2,1]))])\n j+=1\n \n # Final Output\n cam_params = np.array(cam_params).reshape(-1,9)\n points_3d = np.array(points_3d)\n points_2d = np.array(points_2d)\n camera_ind = np.array(camera_ind).reshape(len(camera_ind))\n points_ind = np.array(points_ind).reshape(len(points_ind))\n final_l1 = np.array(final_l1)\n final_l2 = np.array(final_l2)\n return cam_params, points_3d, points_2d, camera_ind, points_ind, final_l1, final_l2, low_bound, up_bound, map_des, src_2d", "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array([-points[..., 0] * self.focallength_x_px / points[..., 2] + self.center_x_px,\n points[..., 1] * self.focallength_y_px / points[..., 2] + self.center_y_px]).T\n if hide_backpoints:\n transformed_points[points[..., 2] > 0] = np.nan\n return transformed_points", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def getProjections(self): \n x, y, z = self.XYZCoordinate\n origin = self.SkeletonPoints[0]\n self.coorOrigin = origin\n self.XYProjections = [GeometryToolBox.projected_point(p, origin, x, y) for p in self.SkeletonPoints]\n self.XZProjections = [GeometryToolBox.projected_point(p, origin, x, z) for p in self.SkeletonPoints]", "def GroundToImage_RzRyRz(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix_RzRyRz\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))", "def get_real_samples(self):\n # Define the camera poses\n if not self.opt.same_view:\n if self.opt.full_sphere_sampling:\n self.cam_pos = uniform_sample_sphere(\n radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n axis=None, angle=self.opt.angle,\n theta_range=np.deg2rad(self.opt.theta), phi_range=np.deg2rad(self.opt.phi))\n else:\n self.cam_pos = uniform_sample_sphere(\n radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n axis=None, angle=self.opt.angle,\n theta_range=np.deg2rad(self.opt.theta),\n phi_range=np.deg2rad(self.opt.phi))\n if self.opt.full_sphere_sampling_light:\n self.light_pos1 = uniform_sample_sphere(\n radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n axis=None, angle=self.opt.angle,\n theta_range=np.deg2rad(self.opt.theta),\n phi_range=np.deg2rad(self.opt.phi))\n # self.light_pos2 = uniform_sample_sphere(radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n # axis=self.opt.axis, angle=np.deg2rad(40),\n # theta_range=self.opt.theta, phi_range=self.opt.phi)\n else:\n print(\"inbox\")\n light_eps = 0.15\n self.light_pos1 = np.random.rand(self.opt.batchSize,3)*self.opt.cam_dist + light_eps\n self.light_pos2 = np.random.rand(self.opt.batchSize,3)*self.opt.cam_dist + light_eps\n\n # TODO: deg2rad in all the angles????\n\n # Create a splats rendering scene\n large_scene = create_scene(self.opt.width, self.opt.height,\n self.opt.fovy, self.opt.focal_length,\n self.opt.n_splats)\n lookat = self.opt.at if self.opt.at is not None else [0.0, 0.0, 0.0, 1.0]\n large_scene['camera']['at'] = tch_var_f(lookat)\n\n # Render scenes\n data, data_depth, data_normal, data_cond = [], [], [], []\n inpath = self.opt.vis_images + '/'\n for idx in range(self.opt.batchSize):\n # Save the splats into the rendering scene\n if self.opt.use_mesh:\n if 'sphere' in large_scene['objects']:\n del large_scene['objects']['sphere']\n if 'disk' in large_scene['objects']:\n del large_scene['objects']['disk']\n if 'triangle' not in large_scene['objects']:\n large_scene['objects'] = {\n 'triangle': {'face': None, 'normal': None,\n 'material_idx': None}}\n samples = self.get_samples()\n\n large_scene['objects']['triangle']['material_idx'] = tch_var_l(\n np.zeros(samples['mesh']['face'][0].shape[0],\n dtype=int).tolist())\n large_scene['objects']['triangle']['face'] = Variable(\n samples['mesh']['face'][0].cuda(), requires_grad=False)\n large_scene['objects']['triangle']['normal'] = Variable(\n samples['mesh']['normal'][0].cuda(),\n requires_grad=False)\n else:\n if 'sphere' in large_scene['objects']:\n del large_scene['objects']['sphere']\n if 'triangle' in large_scene['objects']:\n del large_scene['objects']['triangle']\n if 'disk' not in large_scene['objects']:\n large_scene['objects'] = {\n 'disk': {'pos': None,\n 'normal': None,\n 'material_idx': None}}\n large_scene['objects']['disk']['radius'] = tch_var_f(\n np.ones(self.opt.n_splats) * self.opt.splats_radius)\n large_scene['objects']['disk']['material_idx'] = tch_var_l(\n np.zeros(self.opt.n_splats, dtype=int).tolist())\n large_scene['objects']['disk']['pos'] = Variable(\n samples['splats']['pos'][idx].cuda(),\n requires_grad=False)\n large_scene['objects']['disk']['normal'] = Variable(\n samples['splats']['normal'][idx].cuda(),\n requires_grad=False)\n\n # Set camera position\n if not self.opt.same_view:\n large_scene['camera']['eye'] = tch_var_f(self.cam_pos[idx])\n else:\n large_scene['camera']['eye'] = tch_var_f(self.cam_pos[0])\n\n large_scene['lights']['pos'][0,:3]=tch_var_f(self.light_pos1[idx])\n #large_scene['lights']['pos'][1,:3]=tch_var_f(self.light_pos2[idx])\n\n # Render scene\n res = render(large_scene,\n norm_depth_image_only=self.opt.norm_depth_image_only,\n double_sided=True, use_quartic=self.opt.use_quartic)\n\n # Get rendered output\n if self.opt.render_img_nc == 1:\n depth = res['depth']\n im_d = depth.unsqueeze(0)\n else:\n depth = res['depth']\n im_d = depth.unsqueeze(0)\n im = res['image'].permute(2, 0, 1)\n target_normal_ = get_data(res['normal'])\n target_normalmap_img_ = get_normalmap_image(target_normal_)\n im_n = tch_var_f(\n target_normalmap_img_).view(im.shape[1], im.shape[2],\n 3).permute(2, 0, 1)\n\n # Add depth image to the output structure\n if self.iteration_no % self.opt.save_image_interval == 0:\n imsave((inpath + str(self.iteration_no) +\n 'real_normalmap_{:05d}.png'.format(idx)),\n target_normalmap_img_)\n imsave((inpath + str(self.iteration_no) +\n 'real_depth_{:05d}.png'.format(idx)), get_data(depth))\n # imsave(inpath + str(self.iteration_no) + 'real_depthmap_{:05d}.png'.format(idx), im_d)\n # imsave(inpath + str(self.iteration_no) + 'world_normalmap_{:05d}.png'.format(idx), target_worldnormalmap_img_)\n data.append(im)\n data_depth.append(im_d)\n data_normal.append(im_n)\n data_cond.append(large_scene['camera']['eye'])\n # Stack real samples\n real_samples = torch.stack(data)\n real_samples_depth = torch.stack(data_depth)\n real_samples_normal = torch.stack(data_normal)\n real_samples_cond = torch.stack(data_cond)\n self.batch_size = real_samples.size(0)\n if not self.opt.no_cuda:\n real_samples = real_samples.cuda()\n real_samples_depth = real_samples_depth.cuda()\n real_samples_normal = real_samples_normal.cuda()\n real_samples_cond = real_samples_cond.cuda()\n\n # Set input/output variables\n\n self.input.resize_as_(real_samples.data).copy_(real_samples.data)\n self.input_depth.resize_as_(real_samples_depth.data).copy_(real_samples_depth.data)\n self.input_normal.resize_as_(real_samples_normal.data).copy_(real_samples_normal.data)\n self.input_cond.resize_as_(real_samples_cond.data).copy_(real_samples_cond.data)\n self.label.resize_(self.batch_size).fill_(self.real_label)\n # TODO: Remove Variables\n self.inputv = Variable(self.input)\n self.inputv_depth = Variable(self.input_depth)\n self.inputv_normal = Variable(self.input_normal)\n self.inputv_cond = Variable(self.input_cond)\n self.labelv = Variable(self.label)", "def surfcut_points(**kwargs):\n npoints = kwargs.get( 'npoints', 240 )\n origin = kwargs.get( 'origin', vec3(0.,0.,0.)) \n normal = kwargs.get( 'normal', (np.pi/2., 0.) ) \n lims0 = kwargs.get( 'lims0', (-50., 50.) ) \n lims1 = kwargs.get( 'lims1', (-50., 50.) ) \n extents = kwargs.get( 'extents', None) \n \n if extents is not None:\n lims0 = (-extents, extents)\n lims1 = (-extents, extents)\n \n # Make the unit vectors that define the plane\n unit = vec3()\n th = normal[0]\n ph = normal[1]\n unit.set_spherical( 1, th, ph) \n orth0 = vec3( -1.*np.sin(ph), np.cos(ph), 0. )\n orth1 = cross(unit,orth0)\n \n t0 = np.linspace( lims0[0], lims0[1], npoints )\n t1 = np.linspace( lims1[0], lims1[1], npoints ) \n \n # Obtain points on which function will be evaluated\n T0,T1 = np.meshgrid(t0,t1)\n X = origin[0] + T0*orth0[0] + T1*orth1[0] \n Y = origin[1] + T0*orth0[1] + T1*orth1[1]\n Z = origin[2] + T0*orth0[2] + T1*orth1[2] \n \n\n # If given an axes it will plot the reference surface to help visusalize\n # the surface cut\n \n # Note that the axes needs to be created with a 3d projection. \n # For example: \n # fig = plt.figure( figsize=(4.,4.) ) \n # gs = matplotlib.gridspec.GridSpec( 1,1 ) \n # ax0 = fig.add_subplot( gs[0,0], projection='3d' ) \n \n ax0 = kwargs.get( 'ax0', None ) \n if ax0 is not None: \n\n # Plot the reference surface\n ax0.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, linewidth=0.)\n ax0.set_xlabel('X')\n ax0.set_ylabel('Y')\n ax0.set_zlabel('Z')\n lmin = min([ ax0.get_xlim()[0], ax0.get_ylim()[0], ax0.get_zlim()[0] ] )\n lmax = max([ ax0.get_xlim()[1], ax0.get_ylim()[1], ax0.get_zlim()[1] ] )\n ax0.set_xlim( lmin, lmax )\n ax0.set_ylim( lmin, lmax )\n ax0.set_zlim( lmin, lmax )\n ax0.set_yticklabels([])\n ax0.set_xticklabels([])\n ax0.set_zticklabels([])\n \n # If given an axes and a potential it will plot the surface cut of the \n # potential \n\n ax1 = kwargs.get( 'ax1', None) \n pot = kwargs.get( 'potential', None) \n\n if (ax1 is not None) and (pot is not None):\n # Evaluate function at points and plot\n EVAL = pot.evalpotential(X,Y,Z)\n\n im =ax1.pcolormesh(T0, T1, EVAL, cmap = plt.get_cmap('jet')) \n # cmaps: rainbow, jet\n\n plt.axes( ax1)\n cbar = plt.colorbar(im)\n cbar.set_label(pot.unitlabel, rotation=0 )#self.unitlabel\n \n return T0, T1, X, Y, Z", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n X_camera = np.matmul(R, X) + T\n X_camera = X_camera / X_camera[2, :] # Normalize\n\n if distortion_flag:\n radiusSq = (X_camera[0, :] * X_camera[0, :]) + (X_camera[1, :] * X_camera[1, :])\n X_camera = X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # X_camera = (X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # + (2 * distortion_params[2] * X_camera[0,:] * X_camera[1,:]) + distortion_params[3] * (radiusSq + (2 * X_camera * X_camera)))\n\n X_camera[2, :] = 1.0\n X_camera = np.matmul(K, X_camera)\n X_camera = X_camera[:2, :]\n\n return X_camera", "def convert_pointcloud_to_depth(pointcloud, camera_intrinsics):\r\n\r\n\tassert (pointcloud.shape[0] == 3)\r\n\tx_ = pointcloud[0,:]\r\n\ty_ = pointcloud[1,:]\r\n\tz_ = pointcloud[2,:]\r\n\r\n\tm = x_[np.nonzero(z_)]/z_[np.nonzero(z_)]\r\n\tn = y_[np.nonzero(z_)]/z_[np.nonzero(z_)]\r\n\r\n\tx = m*camera_intrinsics.fx + camera_intrinsics.ppx\r\n\ty = n*camera_intrinsics.fy + camera_intrinsics.ppy\r\n\r\n\treturn x, y", "def fun(params, n_cameras, n_points, camera_indices, point_indices, points_2d, theta):\n \n camera_params = params[:n_cameras * 9].reshape((n_cameras, 9))\n points_3d = params[n_cameras * 9:].reshape((n_points, 3))\n points_proj = project(points_3d[point_indices], camera_params[camera_indices], theta)\n print(\"Residual is: \", (points_proj - points_2d).ravel())\n return (points_proj - points_2d).ravel()", "def sample_flow_at_points(self, x: NDArrayFloat, y: NDArrayFloat, z: NDArrayFloat):\n\n # Check that x, y, z are all the same length\n if not len(x) == len(y) == len(z):\n raise ValueError(\"x, y, and z must be the same size\")\n\n return self.floris.solve_for_points(x, y, z)", "def project_points_undist(self, points3d):\n pts2d, _ = cv2.projectPoints(points3d,\n self.rvec,\n self.tvec,\n self.K_new, 0)\n pts2d = np.squeeze(pts2d)\n if len(pts2d.shape) == 1:\n pts2d = np.expand_dims(pts2d, axis=0)\n return pts2d", "def main():\n # Placing imports here so it will be imported only if user want to test algorithm, not when importing\n # Class DepthCameraServer\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n import sensors_classes as sensors\n from images_processing_class import ImagesProcessing\n import struct\n import time\n\n # Starting Thread which receives data from VideoCamera, port od thread's socket must be the same as the port at\n # which data from VideoCamera is redirected, to be sure check where VideoCamera data stream is send in script env.py\n depth_camera_server = DepthCameraServer('localhost', 60012)\n depth_camera_server.run()\n\n pose_server = sensors.Pose_server('localhost', 60007)\n pose_server.run()\n\n # Waiting 1 sec to be sure than depth_camera_server has received minimum 1 image, because program will crash if\n # depth_camera_server doesn't have time to receive an image\n time.sleep(1)\n\n points = depth_camera_server.get_points()\n\n lista_punktow = []\n x = []\n y = []\n z = []\n\n data_pose_dict = pose_server.get_all()\n pose_x = data_pose_dict['x']\n pose_y = data_pose_dict['y']\n pose_z = data_pose_dict['z']\n\n yawp = data_pose_dict['yaw']\n pitchp = data_pose_dict['pitch']\n rollp = data_pose_dict['roll']\n\n # Each 3D point is a set of float(x,y,z). Each point has a size of 12 bytes because\n # 3*sizeof(float) = 12 bytes, that's why we are dividing data into parts with size of 12 and then\n # converting this data to tuple with 3 float (xyz).\n\n #\n # Processing cloud of points to seperate x, y and z was copied from dcam_old.py\n #\n\n for i in range(0, len(points) - 12, 12):\n xyz = struct.unpack('fff', points[i:i + 12])\n\n # rotation is included\n x1p, y1p, z1p = rotation(xyz[2], xyz[0], xyz[1], yawp, pitchp, rollp)\n\n # data from pose is included\n xp = round(x1p + pose_x, 1)\n yp = round(y1p + pose_y, 1)\n zp = round(z1p + pose_z, 1)\n temp = [xp, yp, zp]\n lista_punktow.append(temp)\n\n # Choosing only these points which have minimum 0.45 meters at z-axis, but why???\n for i in lista_punktow:\n x.append(i[0])\n y.append(i[1])\n z.append(i[2])\n\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x, y, z, cmap='viridis', linewidth=0.5)\n ax.scatter(x[0], y[0], z[0], c='red')\n ax.scatter(x[1], y[1], z[1], c='yellow')\n ax.scatter(x[2], y[2], z[2], c='black')\n ax.scatter(pose_x, pose_y, pose_z, c='green')\n plt.show()", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * np.arctan2(points[..., 1], np.sqrt(\n points[..., 0] ** 2 + points[..., 2] ** 2)) + self.center_y_px]).T\n\n # return the points\n return transformed_points" ]
[ "0.6049411", "0.60473865", "0.6045746", "0.58854824", "0.5848199", "0.58449686", "0.58036494", "0.5799261", "0.5791324", "0.5775069", "0.5753067", "0.5746451", "0.5705016", "0.56961274", "0.56343555", "0.56067497", "0.5599403", "0.5573139", "0.5569328", "0.55522996", "0.5527905", "0.55180377", "0.5455866", "0.5448502", "0.54321927", "0.5422437", "0.5411323", "0.5369622", "0.5355658", "0.5307609" ]
0.65007573
0
Converts an Rmd document as a string into a list of ``Cell`` objects for easier handling with code designed originally for Jupyter notebooks.
def rmd_to_cells(rmd_string): cells, cell_lines, cell_type, in_block, in_begin = [], [], "markdown", False, False for line in rmd_string.split("\n"): if in_block and (line.strip() == "```" or re.match(END_REGEX, line)): in_block = False # collect cell_lines into a new cell cell = create_cell(cell_type, "\n".join(cell_lines + [line])) cells.append(cell) cell_type, cell_lines = "markdown", [] elif line.startswith("```") or re.match(BEGIN_REGEX, line): in_block = True # collect cell_lines into a new cell if cell_lines: cell = create_cell(cell_type, "\n".join(cell_lines)) cells.append(cell) cell_type = "code" if line.startswith("```{r") and "}" in line else "markdown" cell_lines = [line] else: cell_lines.append(line) # collect remaining cell lines into a new cell if cell_lines: cell = create_cell(cell_type, "\n".join(cell_lines)) cells.append(cell) return cells
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def markdown_cells(notebook):\n cells = all_cells(notebook)\n return [cell[\"source\"] for cell in cells if cell[\"cell_type\"] == \"markdown\"]", "def convert(cell):\r\n\r\n markdownResult=\"\"\r\n if cell['cell_type'] == 'code':\r\n markdownResult += '```\\n'\r\n\r\n for line in cell['source']: \r\n markdownResult += line\r\n\r\n if cell['cell_type'] == 'code':\r\n markdownResult += '\\n```'\r\n \r\n debugPrint(markdownResult)\r\n markdownResult += '\\n\\n'\r\n \r\n return markdownResult", "def markdown_cells(self):\n for cell in self.content.cells:\n if cell.cell_type == \"markdown\" and not cell.source.startswith(NOTEBOOK_HEADER_TAG) \\\n and not cell.source.startswith(NAVBAR_TAG):\n yield cell", "def cells_list(self):\n # self.cell_list_base = []\n # self.cell_list_act = []\n\n pattern = \"cell \\(.*?\\)\"\n \n cell_list_base = [cell for cell in re.findall(pattern, self.base_doc) if 'cell ()' not in cell] # List of cells present in base lib\n cellformat_base = [cell.replace('(','\\(').replace(')','\\)') for cell in cell_list_base] # Formated string for regex pattern\n cells_group_base = [re.search(cell, self.base_doc) for cell in cellformat_base] # Group of regex matches\n cells_dict_base = {base_cell.group(0):{'index': index, 'start': base_cell.start(), 'end': base_cell.end()} for index, base_cell in enumerate(cells_group_base)}\n\n cell_list_act = [cell for cell in re.findall(pattern, self.cell_doc) if 'cell ()' not in cell] # List of cells present in the active lib\n common_cells = [cell for cell in cell_list_act if cell in cell_list_base] # Common cells needs to be deleted\n\n return cells_dict_base, cell_list_act, common_cells", "def read_as_notebook(rmd_path):\n with open(rmd_path) as f:\n lines = [l.strip(\"\\n\") for l in f.readlines()]\n\n new_lines = []\n in_comment = False\n in_solution_region, just_closed_solution_region = False, False\n has_prompt = False\n for i, l in enumerate(lines):\n # prevent excess whitespace in the student version of the notebook caused by the removal of\n # the lines containing the solution\n if just_closed_solution_region:\n just_closed_solution_region = False\n if l == \"\":\n continue\n\n if in_comment and l.strip() == HTML_COMMENT_END:\n new_lines.append(\"<!-- #endraw -->\")\n in_comment = False\n\n elif l.startswith(HTML_COMMENT_START):\n if HTML_COMMENT_END in l:\n if CONFIG_START_REGEX.search(l):\n if \"begin\" in l.lower() and \"prompt\" in l.lower():\n has_prompt = True\n if new_lines[len(new_lines) - 1].strip() == \"\":\n new_lines.pop(len(new_lines) - 1)\n\n if has_prompt:\n if \"begin\" in l.lower() and \"solution\" in l.lower():\n has_prompt = False\n if new_lines[len(new_lines) - 1].strip() == \"\":\n new_lines.pop(len(new_lines) - 1)\n\n elif \"end\" in l.lower() and \"prompt\" not in l.lower():\n has_prompt = False\n\n new_lines.append(\"<!-- #raw -->\")\n new_lines.append(EXTRACT_COMMENT_REGEX.match(l).group(1))\n new_lines.append(\"<!-- #endraw -->\")\n\n else:\n if l == \"\"\"<!-- #region tags=[\"otter_assign_solution_cell\"] -->\"\"\":\n in_solution_region = True\n elif in_solution_region and l == \"<!-- #endregion -->\":\n in_solution_region, just_closed_solution_region = False, True\n\n new_lines.append(l)\n\n elif l.strip() == HTML_COMMENT_START:\n if i + 1 < len(lines) and CONFIG_START_REGEX.match(lines[i + 1]):\n new_lines.append(\"<!-- #raw -->\")\n in_comment = True\n\n else:\n new_lines.append(l)\n\n else:\n new_lines.append(l)\n\n if in_comment:\n raise ValueError(\"R Markdown file ends with an unclosed HTML comment\")\n\n nb = jupytext.reads(\"\\n\".join(new_lines), \"Rmd\", as_version=NBFORMAT_VERSION)\n nb[\"metadata\"][\"kernelspec\"] = {\"language\": \"r\"}\n\n return nb", "def split_markdown(source: str) -> List[Dict[str, str]]:\n cells: List[Dict] = []\n in_code = False\n in_tab = False\n cur_code_mark = None\n cur_tag = None\n cur_src = []\n\n def _add_cell(cur_src: List[str], cells: List[Dict]):\n if cur_src:\n src = '\\n'.join(cur_src).strip()\n if in_code:\n cells.append({\n 'type': 'code',\n 'fence': cur_code_mark,\n 'class': cur_tag,\n 'source': src})\n else:\n if not src and not cur_tag:\n return\n cells.append({'type': 'markdown', 'source': src})\n if cur_tag:\n cells[-1]['class'] = cur_tag\n\n for l in source.splitlines():\n code = common.md_code_fence.match(l)\n tab = common.md_mark_pattern.match(l)\n if code:\n # code can be nested\n if in_tab or (in_code and code.groups()[0] != cur_code_mark):\n cur_src.append(l)\n else:\n _add_cell(cur_src, cells)\n cur_src = []\n cur_code_mark, cur_tag = code.groups()\n in_code ^= True\n elif tab:\n begin = tab.groups()[0] == 'begin_tab'\n end = tab.groups()[0] == 'end_tab'\n if in_code or (not begin and not end):\n cur_src.append(l)\n else:\n _add_cell(cur_src, cells)\n cur_src = []\n if begin:\n cur_tag = tab.groups()[1]\n else:\n cur_tag = None\n in_tab = begin\n else:\n cur_src.append(l)\n _add_cell(cur_src, cells)\n return cells", "def append_cell_contents(notebook):\n Cell = namedtuple('Cell', ['label', 'contents'])\n cells = []\n for cell in notebook['cells']:\n label = cell.get('metadata', {}).get('label', None)\n ref_labels = cell.get('metadata', {}).get('ref_labels', [])\n if label is not None:\n cells.append(Cell(label, cell['source']))\n elif ref_labels:\n cell['source'] = '\\n\\n'.join(cell.contents for cell in cells if cell.label in ref_labels).strip()\n\n return notebook", "def from_diagram(diagram: str) -> List['GridQubit']:\n lines = diagram.strip().split('\\n')\n no_qubit_characters = ['.', '-', ' ']\n qubits = []\n for row, line in enumerate(lines):\n for col, c in enumerate(line.strip()):\n if c not in no_qubit_characters:\n if not c.isalnum():\n raise ValueError(\"Input string has invalid character\")\n qubits.append(GridQubit(row, col))\n return qubits", "def split_content_into_document(content: str) -> List[TextContent]:\n return [TextContent(content=content)]", "def _cells(notebook):\n if notebook.nbformat < 4:\n for ws in notebook.worksheets:\n for cell in ws.cells:\n yield cell\n else:\n for cell in notebook.cells:\n yield cell", "def join_markdown_cells(cells: List[Dict]) -> str:\n src = []\n for c in cells:\n cell_src = []\n if c['type'] == 'markdown':\n if 'class' in c:\n cell_src.append(f':begin_tab:{c[\"class\"]}')\n cell_src.append(c['source'])\n if 'class' in c:\n if cell_src[-1].endswith('\\n'):\n cell_src[-1] = cell_src[-1][:-1]\n cell_src.append(':end_tab:')\n else:\n cell_src += [c['fence'] + c['class'], c['source'], c['fence']]\n src.append('\\n'.join(cell_src).strip())\n return '\\n\\n'.join(src) + '\\n'", "def is_markdown_cell(cell):\n return cell[\"cell_type\"] == \"markdown\"", "def markdown_figs(self):\n return self.findall_markdown_cells(MARKDOWN_FIG)", "def get_tokens(input_cell):\n return ast.literal_eval(input_cell)", "def text_contents_from_document_body(\n content: str, granularity=\"document\"\n) -> List[TextContent]:\n\n return text_content_split_functions[granularity](content)", "def get_cell_content(browser, author):\n content = list()\n cells = browser.find_all(class_='t t2')\n for cell in cells:\n if cell.find(class_='r_two').b.string != author:\n continue\n for cell_content in cell.find(class_=['tpc_content do_not_catch', 'tpc_content']).strings:\n content.append(cell_content.strip())\n return \"\\n\".join(content)", "def get_cells(self, tag):\n cells = []\n for nb in self.notebooks:\n cells.extend(nb.get_cells(tag))\n nb = new_notebook(cells=cells)\n nb[\"metadata\"][\"kernelspec\"] = {\"name\": \"python3\"}\n return nbformat.writes(nb)", "def string_list_to_cells(lst):\n cells = np.ndarray(len(lst), dtype = 'object')\n for i in range(len(lst)):\n cells[i] = lst[i]\n return cells", "def main(path):\n with open(path, 'r') as f:\n notebook = json.load(f)\n notebook[\"cells\"] = [\n cell for cell in notebook[\"cells\"] if cell[\"cell_type\"] == \"markdown\"\n ]\n with open(path.replace(\".ipynb\", \".tmp.ipynb\"), 'w') as f:\n f.write(json.dumps(notebook))", "def create_cells(self, blocks):\n cells = []\n for block in blocks:\n if (block['type'] == self.code) and (block['IO'] == 'input'):\n code_cell = self.create_code_cell(block)\n cells.append(code_cell)\n\n elif (block['type'] == self.code and\n block['IO'] == 'output' and\n cells[-1].cell_type == 'code'):\n cells[-1].outputs = self.create_outputs(block)\n\n elif block['type'] == self.markdown:\n markdown_cell = self.create_markdown_cell(block)\n cells.append(markdown_cell)\n\n else:\n raise NotImplementedError(\"{} is not supported as a cell\"\n \"type\".format(block['type']))\n\n return cells", "def parse(text):\n md = markdown.Markdown(['codehilite', 'tables', ])\n\n for iref in re.findall(img_ref_re, text):\n img_id = iref[7]\n try:\n image = FlatPageImage.objects.get(pk=int(img_id))\n md.references[img_id] = (image.image_path.url, '')\n except ObjectDoesNotExist:\n pass\n\n for lref in re.findall(reference_re, text):\n doc_name = lref[7]\n try:\n doc = File.objects.get(name=doc_name)\n md.references[doc_name]= (doc.url, doc.name)\n except ObjectDoesNotExist:\n pass\n\n return md.convert(text)", "def _read_rendered_notebook(nb_str):\n # add debug cells\n nb = nbformat.reads(nb_str, as_version=nbformat.NO_CONVERT)\n nbformat_v = nbformat.versions[nb.nbformat]\n\n source = \"\"\"\n# Debugging settings (this cell will be removed before saving)\n# change the current working directory to directory of the session that\n# invoked the jupyter app to make relative paths work\nimport os\n{}\n\"\"\".format(chdir_code(Path('.').resolve()))\n\n cell = nbformat_v.new_code_cell(source,\n metadata={'tags': ['debugging-settings']})\n nb.cells.insert(0, cell)\n\n return nb", "def _parse_cells(self):\n self.cells_with_solutions = []\n self.cells_without_solutions = []\n for cell in self.original_cells:\n if is_test_cell(cell):\n self.tests.append(read_test(cell))\n else:\n self.cells_with_solutions.append(cell)\n self.cells_without_solutions.append(replace_cell_solutions(cell))", "def htmlForMarkdown(md):\n return mdProcessor.convert(md)", "def create_markdown_cell(block):\n kwargs = {'cell_type': block['type'],\n 'source': block['content']}\n markdown_cell = nbbase.new_markdown_cell(**kwargs)\n return markdown_cell", "def replace_cell_solutions(cell):\n if is_markdown_solution_cell(cell):\n return copy.deepcopy(MARKDOWN_ANSWER_CELL_TEMPLATE)\n elif is_code_cell(cell):\n source = get_source(cell)\n stripped_source = replace_solutions(source)\n new_cell = copy.deepcopy(cell)\n new_cell.source = \"\\n\".join(stripped_source)\n return new_cell\n else:\n return copy.deepcopy(cell)", "def code_cells(notebook):\n\n cells = all_cells(notebook)\n return [cell[\"source\"] for cell in cells if cell[\"cell_type\"] == \"code\"]", "def convert(self, markdown: str) -> str:\n lines = markdown.split(NEWLINE)\n iterator = LineIterator(lines)\n\n while not iterator.is_done():\n for element in self.__elements:\n if element.is_relevant(iterator.value):\n element.replace(iterator)\n iterator.advance()\n return NEWLINE.join(iterator.lines)", "def convert(md_text):\n # separate by line\n md_text = md_text.split('\\n')\n\n # save the html content for return\n html_text = ''\n\n # begin looping from the first line\n index = -1\n while index < len(md_text) - 1:\n index += 1\n line = md_text[index]\n\n # code segment\n if len(line) >= 3 and line[:3] == '```':\n html_line = \"\"\n language = line[3:].replace(' ', '')\n if len(language) == 0:\n language = False\n order_index = index + 1\n find_end = False\n while order_index < len(md_text):\n if md_text[order_index][:3] == '```':\n find_end = True\n break\n else:\n temp_line = md_text[order_index]\n temp_line = temp_line.replace('<', '&lt;')\n temp_line = temp_line.replace('>', '&gt;')\n temp_line = temp_line.replace(' ', '&nbsp;')\n html_line += temp_line + '<br />'\n order_index += 1\n\n if find_end:\n # if language is not False:\n # html_text += ('<pre><code class=\"' + language + '\">' + html_line + '</code></pre>')\n # else:\n html_text += ('<code>' + html_line + '</code>')\n # print(language)\n index = order_index\n continue\n\n # inline code\n\n\n # header\n is_header, html_line = check_header(line)\n if is_header:\n html_text = html_text + html_line\n continue\n\n # horizontal rule\n is_horizontal_rule, html_line = check_horizontal_rule(line)\n if is_horizontal_rule:\n html_text = html_text + html_line\n continue\n\n # paragraph\n line = check_paragraph(line)\n\n # deal with ordered list\n if len(line.split('.')) != 0 and '1.' == line[:2]:\n html_line = '<ol>'\n order_index = index\n while order_index < len(md_text)\\\n and len(md_text[order_index].split('.')) != 0\\\n and (str(order_index - index + 1) == md_text[order_index].split('.')[0]\n or '1' == md_text[order_index].split('.')[0]):\n to_replace = [str(order_index - index + 1) + '.', '1.']\n for replace_content in to_replace:\n md_text[order_index] = md_text[order_index].replace(replace_content, '')\n html_line = html_line + '<li>' + md_text[order_index] + '</li>'\n\n order_index += 1\n index = order_index - 1\n html_line = html_line + '</ol>'\n line = html_line\n\n # deal with unordered list\n is_unordered_list, html_line = check_unordered_list(line)\n if is_unordered_list:\n line = html_line\n\n # deal with strong\n line = strong(line)\n\n # Scratch\n line = scratch(line)\n\n # italics\n line = italics(line)\n\n # image\n while len(re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<img src=\"' + link + '\" alt=\"' + alt_text + '\">'\n line = pre_text + img_html + after_text\n\n # link\n while len(re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<a href=\"' + link + '\">' + alt_text + '</a>'\n line = pre_text + img_html + after_text\n\n html_text = html_text + line\n if not is_unordered_list:\n html_text = html_text + '<br>'\n\n return html_text", "def html2ipynb(path):\n # I don't understand why click isn't handling this?\n path = Path(path)\n if path.is_file() and path.suffix == '.html':\n print(f\"Checking {path}\")\n # Read notebook\n with path.open('r') as f:\n nb = nbformat.v4.new_notebook()\n\n html = f.read()\n soup = BeautifulSoup(html, 'lxml')\n \n for d in soup.findAll(\"div\"):\n if 'class' in d.attrs.keys():\n for clas in d.attrs[\"class\"]:\n if clas in [\"text_cell_render\", \"input_area\"]:\n # code cell\n if clas == \"input_area\":\n cell = nbformat.v4.new_code_cell(d.get_text())\n nb.cells.append(cell)\n\n else:\n cell = nbformat.v4.new_code_cell(d.decode_contents())\n nb.cells.append(cell)\n\n \n outpath = path.with_suffix('.ipynb')\n nbformat.write(nb, outpath.open('w'))" ]
[ "0.659269", "0.57998616", "0.54991925", "0.5440537", "0.54145265", "0.5360483", "0.5280241", "0.5191896", "0.51772344", "0.51759666", "0.51577157", "0.51485837", "0.5141294", "0.51053214", "0.50847465", "0.50440603", "0.5038209", "0.49959463", "0.4994819", "0.49679536", "0.49241027", "0.49148706", "0.4908764", "0.4897422", "0.488082", "0.4871031", "0.48640695", "0.4862303", "0.48370135", "0.48178488" ]
0.74898815
0
Collapses all runs of cells with empty sources into a single cell with an empty source
def collapse_empty_cells(cells): in_run, run_start = False, 0 replacements = [] for i, cell in enumerate(cells): if in_run and cell["source"].strip(): if (run_start > 0 and cells[run_start-1]["source"].endswith("\n")) or cell["source"].startswith("\n"): replacement = [] else: replacement = [create_cell("markdown", "")] replacements.append((run_start, i, replacement)) in_run = False elif not in_run and not cell["source"].strip(): in_run = True run_start = i replacements.reverse() for rs, re, rep in replacements: cells[rs:re] = rep
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collapse(self):\n # TODO: not implemented\n pass", "def collapsed(self) -> None:", "def collapseUp(self):\n retval = False\n for cStartInd in range(self.col):\n lst = [self.get_cell(i) for i in range(cStartInd, self.length, self.col)]\n lst, tmp = self.collapseRow(lst)\n x = 0\n for i in range(cStartInd, self.length, self.col):\n self.set_cell(i, lst[x])\n x += 1\n retval = retval or tmp\n return retval", "def empty(self):\n return [cell for cell in self.compact if not cell.peg]", "def collapseLeft(self):\n retval = False\n for rStartInd in [i * self.col for i in range(self.row)]:\n cSlice = self.Range[rStartInd: rStartInd + self.col]\n lst = [self.get_cell(i) for i in cSlice]\n lst, tmp = self.collapseRow(lst)\n for i in range(self.col):\n self.set_cell(cSlice[i], lst[i])\n retval = retval or tmp\n return retval", "def consolidate(self):\n\t\tprint \"\\tConsolidating breakends\"\n\t\tmap(lambda X: self._consolidateEmptyBreakend(X), self)", "def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValues)\n if (match):\n blankColumns.append(str(columnIndex))\n for column in blankColumns[::-1]:\n self.DeleteRange(',' + str(column), True)\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def collapseDown(self):\n retval = False\n for cStartInd in range(self.col):\n lst = [self.get_cell(i) for i in range(cStartInd, self.length, self.col)]\n lst.reverse()\n lst, tmp = self.collapseRow(lst)\n lst.reverse()\n x = 0\n for i in range(cStartInd, self.length, self.col):\n self.set_cell(i, lst[x])\n x += 1\n retval = retval or tmp\n return retval", "def consolidate_empty_blocks(self):\n new_blocks = []\n for block in self.blocks:\n if isinstance(block, BasicBlock) and not block.statements:\n self.remove_block(block)\n else:\n new_blocks.append(block)\n self.blocks = new_blocks", "def reset(self):\n width = len(self.cell)\n height = len(self.cell[0])\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def empty_board():\n return [['','',''],\n ['','',''],\n ['','','']]", "def clear(self):\n self._cells = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]", "def _filter_empty(lst):\n return [cell for cell in lst if cell is not Sudoku.EMPTY_CELL]", "def empty_cells(self) -> List[Cell]:\n return list(ob.pos[0] for ob in self.new_obs())", "def remove_assigned_cells(self):\r\n cells = list(self.cells)\r\n for cell in ifilter(lambda cell: cell.symbol is not None, cells):\r\n cell.remove_group(self)\r\n self.cells.remove(cell)\r\n return len(cells) != len(self.cells)", "def remove_empty_sources(self):\n for source in [\"dxf\", \"edilizia\", \"easyroom\", \"merged\"]:\n if source in self and not self[source]:\n del self[source]", "def get_empty_cells(self):\n empty_cells = []\n for cell_row in self.board:\n for current_cell in cell_row:\n if current_cell is not None:\n if current_cell.get_cell_state() == 0:\n empty_cells.append(current_cell)\n return empty_cells", "def get_empty_cells(grid):\n\tempty = []\n\tfor j,row in enumerate(grid):\n\t\tfor i,val in enumerate(row):\n\t\t\tif not val:\n\t\t\t\tempty.append((j,i))\n\treturn empty", "def empty_cells(state):\r\n cells = []\r\n for x, row in enumerate(state):\r\n for y, cell in enumerate(row):\r\n if cell == 0:\r\n cells.append([x, y])\r\n\r\n return cells", "def test_structural_remove_columns_all_1_0(self):\n cp = Plotter.from_smiles(['CCCC', 'CCCC'], sim_type=\"structural\")\n self.assertTrue(cp._Plotter__df_descriptors.empty)", "def reset(self):\r\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \r\n for dummy_row in range(self._grid_height) ]\r\n \r\n \r\n self.new_tile()\r\n self.new_tile()", "def check_and_clear_rows(self):\n # if board is full, then there will be a '#' in the first row\n if '#' in self.board[0]:\n return 'Game Over! Top has been reached.'\n for row in xrange(self.height):\n # if any given row is full, then that row won't have any blank spaces\n if not ' ' in self.board[row]:\n del self.board[row]\n self.board.insert(0, [' '] * self.width)", "def get_empty_cells(grid):\n empty = []\n for j,row in enumerate(grid):\n for i,val in enumerate(row):\n if not val:\n empty.append((j,i))\n return empty", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def getNonEmptyCells(self):\n nonemptys = []\n for ri in range(self.nRow):\n for ci in range(self.nCol):\n val = self.vals[ri][ci]\n if not self.isEmpty(val):\n row = ri+1\n col = ci+1\n nonemptys.append(CellDesc(row=row, col=col, val=val))\n return nonemptys", "def fill_blanks_randomly(grid):\n for row in grid:\n for i in range(len(row)):\n if row[i] is None:\n row[i] = get_random_char()", "def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)", "def clear(self):\n\n for cell in self.cells:\n cell.clear()", "def full(self):\n return [cell for cell in self.compact if cell.peg]", "def blank(self, index=-1):\n self.displays[0].start() # call only once to support shift chain\n if index < 0:\n for d in self.displays:\n d.blank()\n else:\n self.displays[index].blank()\n self.displays[0].latch() # call only once to support shift chain" ]
[ "0.5843393", "0.5662654", "0.5656175", "0.5561178", "0.5532706", "0.55238676", "0.5440617", "0.5401576", "0.53815573", "0.5375235", "0.5353397", "0.5295118", "0.5245139", "0.5245116", "0.5235098", "0.5224786", "0.51737785", "0.517261", "0.51705515", "0.5094087", "0.50495225", "0.5044577", "0.503259", "0.50259745", "0.5021186", "0.5018424", "0.4988282", "0.49845996", "0.49813086", "0.4978578" ]
0.7354216
0
Storage service is unavailable.
def test_store_is_unavailable(self, mock_current_session): mock_store = mock.MagicMock() mock_store.is_available.return_value = False mock_current_session.return_value = mock_store with self.assertRaises(ServiceUnavailable): controllers.service_status()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_service_unavailable():\n if WithingsDataManager.service_available is not False:\n _LOGGER.error(\"Looks like the service is not available at the moment\")\n WithingsDataManager.service_available = False\n return True", "def _async_device_unavailable(\n _service_info: bluetooth.BluetoothServiceInfoBleak,\n ) -> None:\n push_lock.reset_advertisement_state()", "def unavailable(self):\n print(\"\\n**Sorry this Service is unavailable**\\n\")\n self.get_input()", "def service_unavailable(request):\n response = render(request, ERROR_TEMPLATE, {\n 'maintenance_message': settings.SYSTEM_MAINTENANCE_MESSAGE,\n }, status=503)\n\n # Invoke log_response manually (causing the default logging to be\n # skipped), since otherwise Django will treat a 503 status as an\n # error requiring a verbose log message and email to admins\n log_response(\n 'Service Unavailable: %s', request.path,\n request=request,\n response=response,\n level='warning',\n )\n return response", "def test_update_volume_stats_error(self):\n self._fail_host_storage = True\n actual = self.driver.get_volume_stats(True)\n self.assertEqual('HGST', actual['vendor_name'])\n self.assertEqual('hgst', actual['storage_protocol'])\n self.assertEqual('unknown', actual['total_capacity_gb'])\n self.assertEqual('unknown', actual['free_capacity_gb'])\n self.assertEqual(0, actual['reserved_percentage'])", "def test_upload_service_unavailable(self):\n self._retryable.side_effect = requests.HTTPError('Fail')\n\n payload = dict(id=\"stub_id\", data={\"some\": \"data\"})\n resp = self.client.post(self.url, json=payload)\n\n assert resp.status_code == 500\n assert resp.get_json() == {\n 'status': 'Error',\n 'type': 'HTTPError',\n 'status_code': 500,\n 'message': \"Unable to access upload-service\"\n }", "def unavailable(self):\r\n\r\n self._available = False\r\n self.owner.trigger(\"on_unavailable\")", "def _default_handler(self, iq):\n raise XMPPError('service-unavailable')", "def storage_available(self):\n logger.debug('Function storage_available start')\n \n # 2.9 GB\n max_size = 2.9*10**9\n \n if self.total_image_data_size >= max_size:\n logger.info(\"Storage not available\")\n return False\n else:\n logger.info(\"Storage available\")\n return True\n\n logger.debug('Function storage_available end')", "def cbr_not_avalible():\n return \"CBR service is unavailable\", 503", "def is_no_storage_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_no_storage_enabled\")", "def is_no_storage_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_no_storage_enabled\")", "def unrecognised_service(service_name):\n print('Service {} not (yet) supported.'.format(service_name))\n pass", "def set_unavailable(self):\n self[\"available\"] = False", "def snmpqosqos_error_api_ses_notready(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_api_ses_notready\n\t\texcept Exception as e:\n\t\t\traise e", "async def test_get_device_device_unavailable(hass):\n with patch(\n \"axis.vapix.Vapix.request\", side_effect=axislib.RequestError\n ), pytest.raises(axis.errors.CannotConnect):\n await axis.device.get_device(hass, host=\"\", port=\"\", username=\"\", password=\"\")", "def async_mark_unavailable(self):\n self._available = False", "def is_available():", "def delete_unavailable_devices():\n _run_command('delete unavailable')", "def test_cannot_get_service_from_store_that_does_not_exist(self):\n get_response = self.client.get('/navyget-api/v1/store/5a2bc733791e4bbc9a26f7a5/service/', headers=self.my_header)\n self.assertEqual(get_response.status, \"404 NOT FOUND\")\n self.assertIn(\"That Store does not exist.\", str(get_response.data))", "def test_create_experiment_bad_storage(self):\n name = \"oopsie_bad_storage\"\n # Make sure there is no existing storage singleton\n\n with pytest.raises(NotImplementedError) as exc:\n create_experiment(\n name=name,\n storage={\"type\": \"legacy\", \"database\": {\"type\": \"idontexist\"}},\n )\n\n assert \"Could not find implementation of Database, type = 'idontexist'\" in str(\n exc.value\n )", "def testDetectStorageFail(self):\n explorer_object = explorer.Explorer()\n explorer_object.docker_directory = 'this_dir_shouldnt_exist'\n\n expected_error_message = (\n 'this_dir_shouldnt_exist is not a Docker directory')\n with self.assertRaises(errors.BadStorageException) as err:\n explorer_object.SetDockerDirectory('this_dir_shouldnt_exist')\n self.assertEqual(expected_error_message, err.exception.message)", "def snmpqosqos_error_api_ses_notreadyrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_api_ses_notreadyrate\n\t\texcept Exception as e:\n\t\t\traise e", "def storage(self) -> storage.Storage:\n raise ValueError('Not implemented.')", "def ex_destroy_storage_service(self, name):\n\n response = self._perform_storage_service_delete(self._get_storage_service_path(name))\n self.raise_for_response(response, 200)\n\n return True", "def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')", "def _check(self):\n\t\tif not self._raven:\n\t\t\traise NoDeviceFoundException", "async def on_guild_unavailable(self, guild: discord.Guild):\n if guild.id != RushGuild.id:\n return\n\n self._guild_available.clear()", "def utilize_ephemeral_storage(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"utilize_ephemeral_storage\")", "def device_out_of_memory(self) -> bool:\n return pulumi.get(self, \"device_out_of_memory\")" ]
[ "0.668266", "0.61864495", "0.60823864", "0.6079669", "0.6038685", "0.59237933", "0.59164035", "0.5869149", "0.5832894", "0.572674", "0.5605072", "0.5605072", "0.5580025", "0.55637956", "0.5485156", "0.5483856", "0.5440075", "0.5376526", "0.5344619", "0.5323179", "0.5311567", "0.5304461", "0.52713794", "0.5250253", "0.52291185", "0.5222623", "0.5221545", "0.52172893", "0.5214818", "0.52137935" ]
0.64220196
1
The preview already exists.
def test_already_exists(self, mock_current_session): mock_store = mock.MagicMock() mock_store.deposit.side_effect = store.PreviewAlreadyExists mock_current_session.return_value = mock_store with self.assertRaises(Conflict): # 409 Conflict controllers.deposit_preview(self.source_id, self.checksum, self.stream)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upd_preview(self):\n\n if self.data_type != \"layer\":\n self.dlg.uLabelImgPreview.clear()\n self.dlg.uLabelImgPreview.setText(\"No preview available\")\n return\n\n if self.get_preview(\"300x200\", 0.5):\n return\n if self.get_preview(\"150x100\", 5):\n return\n\n self.dlg.uLabelImgPreview.clear()\n self.dlg.uLabelImgPreview.setText(\"No preview available\")", "def test_exists(self, mock_current_session):\n added = datetime.now(UTC)\n mock_store = mock.MagicMock()\n mock_store.get_preview_checksum.return_value = 'foopdfchex=='\n mock_current_session.return_value = mock_store\n\n data, code, headers = \\\n controllers.check_preview_exists(self.source_id, self.checksum)\n self.assertEqual(code, status.OK, 'Returns 200 OK')\n self.assertEqual(headers['ETag'], 'foopdfchex==',\n 'ETag is set to the preview checksum')", "def removePreview(self):\n logger.debug(\"Func: removePreview\")\n\n if self._currentPreviewCamera:\n previewName = self._currentPreviewCamera\n previewFile = self._currentPreviewsDict[self._currentPreviewCamera]\n os.remove(os.path.join(self.projectDir, self._currentPreviewsDict[self._currentPreviewCamera]))\n del self._currentPreviewsDict[self._currentPreviewCamera]\n self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"Preview\"] = self._currentPreviewsDict\n self._dumpJson(self._currentSceneInfo, self._baseScenesInCategory[self.currentBaseSceneName])\n logger.info(\"\"\"Preview file deleted and removed from database successfully \n Preview Name: {0}\n Path: {1}\n \"\"\".format(previewName, previewFile))", "def is_existing(self):\n return self.backend.is_existing", "def preview(self) -> Optional[bool]:\n return pulumi.get(self, \"preview\")", "def delete_preview(self):\n logger.debug(\"Deleting previews\")\n for item in os.listdir(self._pathpreview):\n if item.startswith(\".gui_training_preview\") and item.endswith(\".jpg\"):\n fullitem = os.path.join(self._pathpreview, item)\n logger.debug(\"Deleting: '%s'\", fullitem)\n os.remove(fullitem)\n for fname in self._previewcache[\"filenames\"]:\n if os.path.basename(fname) == \".gui_preview.jpg\":\n logger.debug(\"Deleting: '%s'\", fname)\n try:\n os.remove(fname)\n except FileNotFoundError:\n logger.debug(\"File does not exist: %s\", fname)\n self._clear_image_cache()", "def is_preview(self) -> Optional[bool]:\n return pulumi.get(self, \"is_preview\")", "def test_does_not_exist(self, mock_current_session):\n mock_store = mock.MagicMock()\n mock_store.get_preview_checksum.side_effect = store.DoesNotExist\n mock_current_session.return_value = mock_store\n\n with self.assertRaises(NotFound):\n controllers.check_preview_exists(self.source_id, self.checksum)", "def check_preview_exists(source_id: str, checksum: str) -> Response:\n data, code, headers = controllers.check_preview_exists(source_id, checksum)\n response: Response = make_response(jsonify(data), code, headers)\n return response", "def create(self):\n if not self.Create(self.title):\n #raise TextOutputMediaException(\"Unable to create custom viewer\")\n return False\n return True", "def test_exists(self, mock_current_session):\n added = datetime.now(UTC)\n mock_store = mock.MagicMock()\n mock_store.get_preview.return_value = Preview(\n source_id=self.source_id,\n checksum=self.checksum,\n metadata=Metadata(added=added, checksum='foopdfchex==',\n size_bytes=1_234),\n content=Content(stream=io.BytesIO(b'fakecontent'))\n )\n mock_current_session.return_value = mock_store\n\n data, code, headers = \\\n controllers.get_preview_content(self.source_id, self.checksum)\n self.assertEqual(code, status.OK, 'Returns 200 OK')\n self.assertEqual(headers['ETag'], 'foopdfchex==',\n 'ETag is set to the preview checksum')\n self.assertEqual(data.read(), b'fakecontent', 'Returns content stream')", "def test_is_preview(self):\r\n self.assertTrue(self.unit.q(css=\".discussion-preview\").present)\r\n self.assertFalse(self.unit.q(css=\".discussion-show\").present)", "def on_pre_close(self):\n if not self.view.settings().has(\"preview_view_id\"):\n return\n\n vistaPreview = self.__recuperarVistaDePreview()\n vistaPreview.close()", "def removePreviewIcone(self, iconeFile):\n if os.path.exists(iconeFile):\n try:\n os.remove(iconeFile)\n self.log.info(\"Remove preview icone: %s\" % pFile.conformPath(iconeFile))\n except:\n self.log.error(\"Can't remove preview icone: %s !!!\" % pFile.conformPath(iconeFile))", "def preview_file_cleanup(sender, **kwargs):\n\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def test_does_not_exist(self, mock_current_session):\n mock_store = mock.MagicMock()\n mock_store.get_preview.side_effect = store.DoesNotExist\n mock_current_session.return_value = mock_store\n\n with self.assertRaises(NotFound):\n controllers.get_preview_content(self.source_id, self.checksum)", "def is_new_file(self):\n return self.filename is None", "def exists(self):\r\n try:\r\n self.refresh()\r\n except:\r\n return False\r\n return True", "def exists(self):\n\n if self:\n pass", "def preview_history(self, window, history_entry):\n self.current_history_entry = history_entry\n\n # track the view even if we won't be previewing it (to support quick-open and remove from history quick keys)\n self.__track_calling_view(window)\n\n # Only preview the view if the user wants to see it\n if not self.SHOW_FILE_PREVIEW:\n return\n\n filepath = history_entry['filename']\n if os.path.exists(filepath):\n # asynchronously open the preview (improves perceived performance)\n sublime.set_timeout_async(lambda: self.__open_preview(window, filepath), 0)\n else:\n # Close the last preview and remove the non-existent file from the history\n self.__close_preview(window)\n self.__remove(self.get_current_project_key(), filepath)\n self.__save_history()", "def object_exists(self, fname):\n return False", "def is_previewable(self, **parameters):\n if not hasattr(self, '_is_previewable'):\n self._is_previewable = True\n return self._is_previewable", "def test_create_dup(self):\n obj = self.provision_single_asset()\n p = self.post('widget', 409, params={'name': u'Testing'})\n assert 'duplicate value already' in p['message']", "def exists(self):\n return True", "def exists(self):\n return True", "def test_existing_file_name(self):\n\t\ttp = self.sess.query(sql.Post).filter(sql.Post.reddit_id == 't3_ahal9v').first()\n\t\tfile = ng.choose_file_name(tp.urls[0], tp, sql.session(), album_size=1)\n\t\tself.assertTrue(file.endswith(' - 2'), msg='Failed to increment duplicate post!')", "def test_exists_false(self):\n self.assertFalse(SampleTemplate.exists(self.new_study.id))", "def test_exists(self, mock_current_session):\n added = datetime.now(UTC)\n mock_store = mock.MagicMock()\n mock_store.get_metadata.return_value = \\\n Metadata(added=added, checksum='foopdfchex==', size_bytes=1_234)\n mock_current_session.return_value = mock_store\n\n data, code, headers = \\\n controllers.get_preview_metadata(self.source_id, self.checksum)\n self.assertEqual(code, status.OK, 'Returns 200 OK')\n self.assertEqual(headers['ETag'], 'foopdfchex==',\n 'ETag is set to the preview checksum')\n self.assertDictEqual(data, {'checksum': 'foopdfchex==',\n 'added': added,\n 'size_bytes': 1234},\n 'Returns metadata about the preview')", "def upload_url(self, url, preview):\n return super(PicovicoMusic, self).upload_url(url, preview_url=preview)", "def object_exists(self, fname):\n return True" ]
[ "0.6237067", "0.5998938", "0.59585685", "0.5936331", "0.58514047", "0.5833355", "0.58192414", "0.58160555", "0.57726073", "0.5763644", "0.5718817", "0.57129294", "0.5625983", "0.560919", "0.55987835", "0.5598265", "0.5556675", "0.54856384", "0.5413032", "0.53611124", "0.535947", "0.5343865", "0.5331318", "0.5318115", "0.5318115", "0.5313201", "0.5310967", "0.5295926", "0.52928555", "0.529072" ]
0.6862947
0
The preview is deposited successfully.
def test_deposit_successful(self, mock_current_session): mock_store = mock.MagicMock() added = datetime.now(UTC) def mock_deposit(obj, overwrite, **kwargs): """Deposit implementation sets metadata on Preview.""" return Preview(source_id=obj.source_id, checksum=obj.checksum, metadata=Metadata(added=added, checksum='foopdfchex==', size_bytes=1_234)) mock_store.deposit.side_effect = mock_deposit mock_current_session.return_value = mock_store data, code, headers = \ controllers.deposit_preview(self.source_id, self.checksum, self.stream) self.assertEqual(code, status.CREATED, 'Returns 201 Created') self.assertEqual(headers['ETag'], 'foopdfchex==', 'ETag is set to the preview checksum') self.assertDictEqual(data, {'checksum': 'foopdfchex==', 'added': added, 'size_bytes': 1234}, 'Returns metadata about the preview')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mock_deposit(obj, overwrite, **kwargs):\n return Preview(source_id=obj.source_id,\n checksum=obj.checksum,\n metadata=Metadata(added=added,\n checksum='foopdfchex==',\n size_bytes=1_234))", "def on_Deposit_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def dismiss_transfer_result(self):\n self.close_previewed_transfer()", "def test_preview_post(self):\n pass", "def deposit_preview(source_id: str, checksum: str) -> Response:\n content_type: Optional[str] = request.headers.get('Content-type')\n content_checksum: Optional[str] = request.headers.get('ETag', None)\n overwrite = bool(request.headers.get('Overwrite', 'false') == 'true')\n stream: IO[bytes] = request.stream # type: ignore\n data, code, headers = controllers.deposit_preview(\n source_id, checksum,\n stream,\n content_type,\n overwrite=overwrite,\n content_checksum=content_checksum\n )\n response: Response = make_response(jsonify(data), code, headers)\n return response", "def landlord_button_deposite_received(self):\n payment_id = False\n acc_pay_form = self.env.ref(\n 'account.view_account_payment_form')\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'sale')], limit=1)\n payment_obj = self.env['account.payment']\n payment_method_id = self.env.ref(\n 'account.account_payment_method_manual_in')\n for tenancy_rec in self:\n if tenancy_rec.acc_pay_dep_rec_id and \\\n tenancy_rec.acc_pay_dep_rec_id.id:\n return {\n 'view_type': 'form',\n 'view_id': acc_pay_form.id,\n 'view_mode': 'form',\n 'res_model': 'account.payment',\n 'res_id': tenancy_rec.acc_pay_dep_rec_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }\n if tenancy_rec.deposit == 0.00:\n raise Warning(_('Please Enter Deposit amount.'))\n if tenancy_rec.deposit < 0.00:\n raise Warning(\n _('The deposit amount must be strictly positive.'))\n vals = {\n 'partner_id': tenancy_rec.property_owner_id.parent_id.id,\n 'partner_type': 'customer',\n 'journal_id': account_jrnl_obj.id,\n 'payment_type': 'inbound',\n 'communication': 'Deposit Received',\n 'tenancy_id': tenancy_rec.id,\n 'amount': tenancy_rec.deposit,\n 'property_id': tenancy_rec.property_id.id,\n 'payment_method_id': payment_method_id.id\n }\n payment_id = payment_obj.create(vals)\n return {\n 'view_mode': 'form',\n 'view_id': acc_pay_form.id,\n 'view_type': 'form',\n 'res_id': payment_id and payment_id.id,\n 'res_model': 'account.payment',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n 'domain': '[]',\n 'context': {\n 'close_after_process': True,\n }\n }", "def deposit(self, amount):\n message = self.account.deposit(float(amount))\n if message:\n return message\n else:\n self.myView.displayAccount()\n return \"success\"", "def upd_preview(self):\n\n if self.data_type != \"layer\":\n self.dlg.uLabelImgPreview.clear()\n self.dlg.uLabelImgPreview.setText(\"No preview available\")\n return\n\n if self.get_preview(\"300x200\", 0.5):\n return\n if self.get_preview(\"150x100\", 5):\n return\n\n self.dlg.uLabelImgPreview.clear()\n self.dlg.uLabelImgPreview.setText(\"No preview available\")", "def _preview():\n context = get_factcheck_context()\n return make_response(render_template('factcheck.html', **context))", "def test_deposit_fails(self, mock_current_session):\n mock_store = mock.MagicMock()\n mock_store.deposit.side_effect = store.DepositFailed\n mock_current_session.return_value = mock_store\n\n with self.assertRaises(InternalServerError):\n controllers.deposit_preview(self.source_id, self.checksum,\n self.stream)", "def test_draft_component_preview_html(self):\r\n modulestore('draft').convert_to_draft(self.vertical.location)\r\n draft_video = modulestore('draft').convert_to_draft(self.video.location)\r\n self.validate_preview_html(draft_video, 'student_view',\r\n can_edit=True, can_reorder=True, can_add=False)", "def landlord_button_deposite_pay(self):\n payment_id = False\n acc_pay_form = self.env.ref(\n 'account.view_account_payment_form')\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'purchase')], limit=1)\n payment_obj = self.env['account.payment']\n payment_method_id = self.env.ref(\n 'account.account_payment_method_manual_in')\n for tenancy_rec in self:\n if tenancy_rec.acc_pay_dep_rec_id and \\\n tenancy_rec.acc_pay_dep_rec_id.id:\n return {\n 'view_type': 'form',\n 'view_id': acc_pay_form.id,\n 'view_mode': 'form',\n 'res_model': 'account.payment',\n 'res_id': tenancy_rec.acc_pay_dep_rec_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }\n if tenancy_rec.deposit == 0.00:\n raise Warning(_('Please Enter Deposit amount.'))\n if tenancy_rec.deposit < 0.00:\n raise Warning(\n _('The deposit amount must be strictly positive.'))\n vals = {\n 'partner_id': tenancy_rec.property_owner_id.parent_id.id,\n 'partner_type': 'supplier',\n 'journal_id': account_jrnl_obj.id,\n 'payment_type': 'outbound',\n 'communication': 'Deposit Received',\n 'tenancy_id': tenancy_rec.id,\n 'amount': tenancy_rec.deposit,\n 'property_id': tenancy_rec.property_id.id,\n 'payment_method_id': payment_method_id.id\n }\n payment_id = payment_obj.create(vals)\n return {\n 'view_mode': 'form',\n 'view_id': acc_pay_form.id,\n 'view_type': 'form',\n 'res_id': payment_id and payment_id.id,\n 'res_model': 'account.payment',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n 'domain': '[]',\n 'context': {\n 'close_after_process': True,\n }\n }", "def on_VDepositValue_editingFinished(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n value = self.VDepositValue.text()\n if JudgeStr2Float(value):\n self.depositValue = float(value)\n print(f\"Validator Deposit with {value} MAN\")\n else:\n self.vdepositValue = 0\n self.VDepositValue.clear()\n # self.VDepositValue.setFocus()", "def view(self):\n\t\tself.done(1)", "def after_successful_edit(self):\n pass", "def test_deposit_return_malformed(self, mock_current_session):\n mock_store = mock.MagicMock()\n # Doesn't add metadata.\n mock_store.deposit.side_effect = lambda obj, **kw: obj\n mock_current_session.return_value = mock_store\n\n with self.assertRaises(InternalServerError):\n controllers.deposit_preview(self.source_id, self.checksum,\n self.stream)", "def on_pre_close(self):\n if not self.view.settings().has(\"preview_view_id\"):\n return\n\n vistaPreview = self.__recuperarVistaDePreview()\n vistaPreview.close()", "def preview():\n return render_template(\"controls/preview.html\")", "def deposit_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.deposit_money(credentials)\n start_again() if result else BankOperationsUi.deposit_money()", "def finalize_preview(request, pk, step=0):\n ts = get_timeslot()\n if not hasattr(ts, 'resultoptions'):\n raise PermissionDenied(\"Results menu is not yet visible.\")\n else:\n if not get_timeslot().resultoptions.Visible:\n raise PermissionDenied(\"Results menu is not yet visible.\")\n dstr = get_object_or_404(Distribution, pk=pk)\n if not hasattr(dstr, 'presentationtimeslot'):\n raise PermissionDenied('This student does not have a presentation planned. Please plan it first.')\n\n if not request.user.is_superuser and \\\n request.user != dstr.Proposal.Track.Head and \\\n request.user != dstr.Proposal.ResponsibleStaff and \\\n get_grouptype('3') not in request.user.groups.all() and \\\n request.user not in dstr.presentationtimeslot.Presentations.Assessors.all():\n raise PermissionDenied(\"You do not have the correct permissions to view print preview.\")\n return render(request, \"results/finalize_grades.html\", {\n \"dstr\": dstr,\n \"catresults\": dstr.results.all(),\n \"final\": all(f.Final is True for f in dstr.results.all()) if dstr.results.all() else False,\n \"finalgrade\": dstr.TotalGradeRounded(),\n \"preview\": True,\n })", "def test_get_small_and_light_fee_preview(self):\n pass", "def save(self, *args, **kwargs):\n super(Preview, self).save(*args, **kwargs)\n self.use_effect()", "def on_MDepositValue_editingFinished(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n value = self.MDepositValue.text()\n if JudgeStr2Float(value):\n self.mdepositValue = float(value)\n print(f\"Validator Deposit with {value} MAN\")\n else:\n self.mdepositValue = 0\n self.MDepositValue.clear()\n # self.MDepositValue.setFocus()", "def test_draft_container_preview_html(self):\r\n draft_unit = modulestore('draft').convert_to_draft(self.vertical.location)\r\n draft_child_container = modulestore('draft').convert_to_draft(self.child_container.location)\r\n draft_child_vertical = modulestore('draft').convert_to_draft(self.child_vertical.location)\r\n self.validate_preview_html(draft_unit, self.container_view,\r\n can_edit=True, can_reorder=True, can_add=True)\r\n self.validate_preview_html(draft_child_container, self.container_view,\r\n can_edit=True, can_reorder=True, can_add=True)\r\n self.validate_preview_html(draft_child_vertical, self.reorderable_child_view,\r\n can_edit=True, can_reorder=True, can_add=True)", "def deposit():\n\n if request.method == \"POST\":\n if not request.form.get(\"deposit\"):\n return apology(\"Must enter amount to deposit\")\n\n deposit = request.form.get(\"deposit\")\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n cash = entry[0]['cash'] + float(deposit)\n\n db.execute(\"UPDATE users SET cash=:cash WHERE id = :id\",\n cash=cash, id=session['user_id'])\n\n return redirect(url_for(\"index\"))\n\n else:\n return render_template(\"deposit.html\")", "def call_transfer_fund(self):\n ## 1) Create expense line for current student\n ## 2) Create Deposite lines for oney transfer student\n\n ## 1\n student_pool = self.env['op.student']\n partner_obj = self.env['res.partner']\n employee_pool = self.env['hr.employee']\n\n if not self.pin_varification:\n raise except_orm(_('Warning!'),\n _(\"Enter Valid PIN to proceed!\"))\n\n\n student_id = student_pool.search([('user_id', '=', self._uid)])\n\n ## Validate Enter PIN\n if student_id:\n self.validate_current_user_pin(student_id)\n\n expense_vals = {\n 'name': student_id.id,\n 'amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s\" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n 'create_invoice': False,\n # 'student_id': student_id.id,\n }\n\n student_expenses_id = self.env['student.expenses'].sudo().create(expense_vals)\n self.total_expense_balance = student_id.stud_balance_amount\n\n ## Get employee form account id\n employee_id = employee_pool.sudo().search([('ean13', '=', self.account_no)])\n\n ## Search EMployee By Employee ID\n search_by_id_employee_id = employee_pool.sudo().search([('identification_id', '=', self.account_no)])\n\n ## Search by student matrix ID\n search_by_id_student_id = student_pool.sudo().search([('gr_no', '=', self.account_no)])\n\n if not self.account_no:\n ## Logic for search by User Name\n employee_id = self.pass_employee_id.sudo()\n student_id = self.pass_student_id.sudo()\n else:\n ## Get partner form account id\n student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if student_id:\n deposite_vals = {\n 'name': student_id.id,\n # 'amount': self.amount_to_transfer,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n if not self.account_no:\n trans_student_id = student_id.sudo()\n else:\n trans_student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if trans_student_id:\n self.total_deposite_balance = trans_student_id.stud_balance_amount\n elif employee_id:\n deposite_vals = {\n 'name': employee_id.id,\n 'employee_id': employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = employee_id.available_balance\n\n elif search_by_id_employee_id:\n deposite_vals = {\n 'name': search_by_id_employee_id.id,\n 'employee_id': search_by_id_employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_employee_id.available_balance\n\n elif search_by_id_student_id:\n deposite_vals = {\n 'name': search_by_id_student_id.id,\n 'employee_id': search_by_id_student_id.gr_no,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_student_id.stud_balance_amount\n\n # return True\n compose_form = self.env.ref('deposite_management.transfer_confirmation_popup_view', False)\n\n try:\n template_id = self.env.ref('deposite_management.email_template_student_fund_transfer', False)\n except ValueError:\n template_id = False\n values = self.env['email.template'].generate_email(template_id.id, self.id)\n\n ## Append Student email id to send mail\n if values and 'email_to' in values:\n values['email_to'] = student_id.sudo().email\n mail_id = self.env['mail.mail'].sudo().create(values)\n if mail_id:\n mail_send_id = mail_id.send()\n\n try:\n template_id_new = self.env.ref('deposite_management.email_template_student_fund_transfer_self_notification', False)\n except ValueError:\n template_id_new = False\n values_new = self.env['email.template'].generate_email(template_id_new.id, self.id)\n ## Append email id to send mail\n if values_new and 'email_to' in values_new:\n if student_id and trans_student_id:\n values_new['email_to'] = trans_student_id.email\n elif employee_id:\n values_new['email_to'] = employee_id.sudo().work_email\n mail_id_new = self.env['mail.mail'].sudo().create(values_new)\n if mail_id_new:\n mail_send_id = mail_id_new.send()\n ## return wizard after click on Fund Transfer Button\n return {\n 'name': _('Fund Transfer Done'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'fund.confirmation.msg',\n 'view_id': compose_form.id,\n 'target': 'new',\n }", "def test_deposit_amount_view(self):\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_deposit')\n amount_1 = random.randint(10, 50000)\n amount_2 = random.randint(10, 50000)\n\n request_1 = client.post(url, {'amount': amount_1}, format='json')\n self.account.refresh_from_db()\n\n request_2 = client.post(url, {'amount': amount_2}, format='json')\n self.account.refresh_from_db()\n\n self.assertEqual(amount_1 + amount_2, self.account.current_balance)", "def form_valid(self, form):\n auth_login(self.request, form.get_user())\n if self.request.session.get('payment'):\n Payment.objects.filter(id=self.request.session['payment']).update(\n user_id=self.request.user.revolvuserprofile, entrant_id=self.request.user.revolvuserprofile)\n payment = Payment.objects.get(id=self.request.session['payment'])\n Tip.objects.filter(id=payment.tip_id).update(user_id=self.request.user.revolvuserprofile)\n Project.objects.get(id=payment.project_id).donors.add(self.request.user.revolvuserprofile)\n AnonymousUserDonation.objects.filter(payment_id=self.request.session['payment']).delete()\n del self.request.session['payment']\n\n # messages.success(self.request, 'Logged in as ' + self.request.POST.get('username'))\n # return redirect(reverse('project:view', kwargs={'title':title})+'?amount='+amount+'&tip='+tip)\n messages.success(self.request, 'Logged in as ' + self.request.POST.get('username'))\n return redirect(self.next_url)", "def preview_handler(self, _, __):\r\n template = self.system.render_template('lti_form.html', self.get_context())\r\n return Response(template, content_type='text/html')", "def preview(context):\n command = (\n f\"docker run -t \"\n f\"-e INPUT_LEANPUB-API-KEY={LEANPUB_API_KEY} \"\n f\"-e INPUT_LEANPUB-BOOK-SLUG={LEANPUB_BOOK_SLUG} \"\n f\"-e INPUT_PREVIEW=true\"\n f\"{IMAGE_NAME}:{IMAGE_VER}\"\n )\n # print(f\"{command}\") # Commenting out as this can print secrets\n context.run(f\"{command}\", pty=True)" ]
[ "0.6338141", "0.5979675", "0.59421855", "0.57830966", "0.57375795", "0.56633556", "0.5594401", "0.5583082", "0.55299044", "0.54851264", "0.54364324", "0.5425671", "0.5414658", "0.5412717", "0.5406029", "0.53997284", "0.5370651", "0.5346766", "0.53250915", "0.5285307", "0.52581805", "0.5230774", "0.51707286", "0.515265", "0.5139825", "0.5136498", "0.5127192", "0.5080897", "0.50754434", "0.5064563" ]
0.68985367
0
Deposit implementation sets metadata on Preview.
def mock_deposit(obj, overwrite, **kwargs): return Preview(source_id=obj.source_id, checksum=obj.checksum, metadata=Metadata(added=added, checksum='foopdfchex==', size_bytes=1_234))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deposit_successful(self, mock_current_session):\n mock_store = mock.MagicMock()\n added = datetime.now(UTC)\n\n def mock_deposit(obj, overwrite, **kwargs):\n \"\"\"Deposit implementation sets metadata on Preview.\"\"\"\n return Preview(source_id=obj.source_id,\n checksum=obj.checksum,\n metadata=Metadata(added=added,\n checksum='foopdfchex==',\n size_bytes=1_234))\n\n mock_store.deposit.side_effect = mock_deposit\n mock_current_session.return_value = mock_store\n\n data, code, headers = \\\n controllers.deposit_preview(self.source_id, self.checksum,\n self.stream)\n self.assertEqual(code, status.CREATED, 'Returns 201 Created')\n self.assertEqual(headers['ETag'], 'foopdfchex==',\n 'ETag is set to the preview checksum')\n self.assertDictEqual(data, {'checksum': 'foopdfchex==',\n 'added': added,\n 'size_bytes': 1234},\n 'Returns metadata about the preview')", "def initial_metadata(self):\n raise NotImplementedError()", "def set_metadata(self, data):\r\n pass", "def _post_process(self):\n # merge extendedMetadata into metadata\n if 'instance' in self._metadata and self._metadata['instance'] is not None:\n if 'metadata' in self._metadata['instance']:\n if 'extendedMetadata' in self._metadata['instance']:\n v = self._metadata['instance'].pop('extendedMetadata')\n self._metadata['instance']['metadata'].update(v)\n else:\n if 'extendedMetadata' in self._metadata['instance']:\n v = self._metadata.pop('extendedMetadata')\n self._metadata['metadata'] = v\n\n # change vnic's id to vnicId\n if 'vnics' in self._metadata:\n for i in range(len(self._metadata['vnics'])):\n v = self._metadata['vnics'][i].pop('id')\n self._metadata['vnics'][i]['vnicId'] = v", "def metadata(self): # -> None:\n ...", "def __metadata__(self):\n raise NotImplementedError", "def deposit_preview(source_id: str, checksum: str) -> Response:\n content_type: Optional[str] = request.headers.get('Content-type')\n content_checksum: Optional[str] = request.headers.get('ETag', None)\n overwrite = bool(request.headers.get('Overwrite', 'false') == 'true')\n stream: IO[bytes] = request.stream # type: ignore\n data, code, headers = controllers.deposit_preview(\n source_id, checksum,\n stream,\n content_type,\n overwrite=overwrite,\n content_checksum=content_checksum\n )\n response: Response = make_response(jsonify(data), code, headers)\n return response", "def update_draft(self, identity, data=None, record=None, **kwargs):\n record.metadata = data.get('metadata', {})", "def test_set_metadata_for_rate_plan(self):\n pass", "def save(self, *args, **kwargs):\n super(Preview, self).save(*args, **kwargs)\n self.use_effect()", "def get_initial(self):\n\t\n\t#Getting the initial data and setting it\n initial = super(UpdateView, self).get_initial()\n\timage_ref = default_value.get_setting('compute', 'image_ref') \n flavor_ref = default_value.get_setting('compute', 'flavor_ref')\n initial.update({'test_id': self.kwargs['test_id'], 'image_ref': image_ref, 'flavor_ref': flavor_ref})\n return initial", "def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def metadata(self, metadata):\n\n self._metadata = metadata", "def _persist(self):\n trunk.set(self.uuid, self.json)", "def _store_package_metadata(self):", "def addDemographics(self):\n p = self.p\n demographics_data = {\n 'dob': p.dob,\n 'gender': p.gender,\n 'email': p.email,\n 'fname': p.fname,\n 'lname': p.lname,\n 'hphone': p.home,\n 'cphone': p.cell,\n 'country': p.country,\n 'city': p.city,\n 'pcode': p.pcode,\n 'region': p.region,\n 'street': p.street,\n }\n self.demographics_doc = DEMOGRAPHICS.sub(demographics_data).done()", "def test_preview_post(self):\n pass", "def on_Deposit_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def add_metadata(self, metadata: dict) -> None:", "def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMIEditForm, self).setContentData(content)", "def update(self):\n if self._data_provider_state is not None:\n self._state = self._data_provider_state()\n \n if self._data_provider_attributes is not None:\n self._attributes = self._data_provider_attributes()", "def save(self, metadata):\n pass", "def populate_initial_valid_metadata(self):\n pass", "def set_metadata(self, metadata):\n self.metadata = metadata\n return self" ]
[ "0.63907486", "0.5510283", "0.54667133", "0.53128654", "0.5308596", "0.51845413", "0.5147838", "0.5139438", "0.51325536", "0.51286876", "0.51013607", "0.5079927", "0.5060331", "0.5060331", "0.5060331", "0.5060331", "0.5060331", "0.5060331", "0.5060331", "0.50552124", "0.4892999", "0.48928958", "0.48844126", "0.48705626", "0.4852376", "0.4829425", "0.48245448", "0.48091954", "0.47840607", "0.4763599" ]
0.7150155
0
Empty the linked list O(n)
def clear(self): trav = self.head while trav is not None: nxt = trav.nxt trav.prev = trav.nxt trav.data = None trav = nxt self.head = None self.tail = None trav = None self.size = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n self.head = None", "def clear(self):\n SortedList.clear(self)\n self.head = None", "def clear(self):\n self._head = None\n self._tail = None\n self._size = 0", "def delete_node_at_start(self):\n if not self.head:\n print('List already empty.')\n return\n self.head = self.head.next", "def __remove_first(self):\n if self.__head is not None:\n self.__length -= 1\n self.__head = self.__head.next()\n if self.__length == 0: # when there are no more elements in the list,\n self.__last = None # remove the pointer to the last element", "def delete_node_at_end(self):\n if not self.head:\n print('List already empty')\n return\n temp = self.head\n while temp.next:\n if not temp.next.next:\n break\n temp = temp.next\n temp.next = None", "def clear(self) -> None:\n self.node.prev = self.node.next = self.node", "def delete_list(self): \n temp_node = self.head\n while temp_node is not None:\n prev_node = temp_node\n temp_node = temp_node.next\n # prev_node.val += \": deleted\" # for sanity check\n # reset data\n prev_node.val = None\n prev_node.next = None", "def test_iter_empty_sll(self):\n sll = SinglyLinkedList()\n a = Node('a')\n sll.insert_beg(a)\n sll.delete(a,a)\n print [i for i in sll]", "def clear(self):\r\n\t\t# re-initialize self._buckets\r\n\t\tself._buckets = []\r\n\t\tself.size = 0\r\n\t\tfor i in range(self.capacity):\r\n\t\t\tself._buckets.append(LinkedList())", "def deleteHead(self):\n if not self._head:\n return\n\n if self._head is self._tail:\n self._head = None\n self._tail = None\n else:\n self._head = self._head.next\n self._size -= 1", "def clear(self):\n\n for i in range(self.capacity):\n self._buckets[i].head = None # Empty out the LinkedList in each bucket\n self._buckets[i].size = 0\n self.size = 0", "def remove(self):\r\n if self.first() is not None:\r\n self.dec_size()\r\n self.set_first(self.first().next())\r\n if self.size() == 0: # when there are no more elements in the list,\r\n self.__last = None # remove the pointer to the last element\r", "def clear(self):\n self.__list = []", "def clear(self):\n self._items.clear()\n self._first = None\n self._last = None", "def clear(self):\n self._list.clear()", "def clear(self):\n while len(self.nodes) > 0:\n self.nodes[0].remove()\n\n self.has_been_modified = False", "def __cleanup(self):\n while self.levels > 1 and self.head.next == None:\n temp = self.head\n self.head = self.head.below\n del temp\n self.levels -=1", "def erase(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n last_node = current_node\r\n current_node = current_node.next\r\n if current_index == index:\r\n last_node.next = current_node.next\r\n return\r\n current_index += 1", "def clear(self):\n self._ll_tree.clear()", "def remove_all(self, number):\n if self.head.data.number() == number:\n self.head = self.head.next\n self._size -= 1\n\n if self.head is not None:\n cur_node = self.head\n while cur_node.next is not None:\n if cur_node.next.data.number() == number:\n cur_node.next = cur_node.next.next\n self._size -= 1\n else:\n cur_node = cur_node.next", "def clear(self) -> None:\n self._items = []\n self._size = 0", "def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def remove_duplicates_slow(linked_list):\n current = linked_list.head\n while current:\n runner = current\n while runner:\n if runner.next_node and runner.next_node.value == current.value:\n # delete this duplicate\n runner.next_node = runner.next_node.next_node\n runner = runner.next_node\n current = current.next_node", "def clear(self) -> None:\n self._used = set()\n self.search_pos = 1", "def clear(self):\n del self.__tree\n self.__tree = AVLTree()\n print(\"Set is empty now\")", "def clear(self):\n self.nodes = list()\n self.inputs = list()\n self.nodes += [self]", "def del_empty(list):\n for x in range(len(list)):\n if len(list[x - 1]) == 0:\n del list[x - 1]\n return list", "def unique(self) -> None:\n def unique_list(node: Node) -> Node: #recursive function to remove common elements\n \"\"\"unique helper\"\"\"\n if node is self.node:\n return node\n if node.next.val == node.val:\n temp = node.prev\n temp.next = node.next\n node.next.prev = temp\n unique_list(node.next)\n unique_list(self.node.next)" ]
[ "0.7618228", "0.758952", "0.7434152", "0.716568", "0.71470666", "0.71089613", "0.70733416", "0.7044231", "0.69825834", "0.68761986", "0.6806461", "0.67757773", "0.6737707", "0.66606444", "0.65375364", "0.6524759", "0.65140676", "0.64742666", "0.6388031", "0.63806", "0.63748556", "0.6343613", "0.63434255", "0.63377666", "0.632202", "0.6321315", "0.6310667", "0.631005", "0.6291493", "0.6287488" ]
0.7664343
0
Obtain data from head of linked list O(1)
def peek_first(self): if self.is_empty(): raise RuntimeError("Empty list") return self.head.data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first(self):\r\n if self.head == None: #check if first(head) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.head.data #return the data of head node\r", "def get(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n current_node = current_node.next\r\n if current_index == index: return current_node.data\r\n current_index += 1", "def get(self, key):\n # Your code here \n index = self.hash_index(key) \n cur = self.data[index].head \n\n if cur==None:\n print(\"linked list is empty\")\n elif cur.key== key:\n return cur.value\n else:\n while cur.next:\n cur= cur.next\n if cur.key ==key: \n return cur.value", "def peek(self):\n if self.is_empty():\n return None\n\n return self.linked_list.head.data", "def peek(self):\n if self.is_empty():\n return None\n return self.list.head.data", "def get_element(self, pos):\n curr = self.head\n count = 1\n\n while curr != None:\n if count == pos:\n return curr.data\n\n curr = curr.link\n count += 1\n return None", "def get(self, index):\n if index < 0:\n return -1\n # print('index:',index)\n p = self.head\n while index and p:\n p = p.next\n index -= 1\n # print('after,index:',index)\n if index:\n return -1\n if p and p.next:\n return p.next.val\n return -1\n # self.printList()", "def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._next._element # front aligned with head of list", "def first(self):\r\n return self.__head", "def get_first(self):\n if self.is_empty():\n raise self.NoSuchNodeException()\n\n return self.head.data", "def first_value(self):\n if not self.is_empty():\n return self.data[self.head]\n return None", "def peek(self):\n if self.__size == 0:\n return None\n else:\n return self.__head.get_data()", "def first(self):\n return self.__head", "def peek(self):\n return self.list.head.data", "def front(self):\n if self.empty():\n return \"Linked List is Empty\"\n return self.head.data", "def get(self, index):\n cur = self.head\n while cur and index>0:\n cur = cur.next\n index -= 1\n if cur:\n return cur.val\n else:\n return -1", "def value_at(self, index):\n if self.empty():\n return \"Linked List Empty\"\n\n idx = 1\n l = self.head\n while l.next is not None:\n if idx is index:\n break\n\n l = l.next\n idx += 1\n return l.data", "def element_at(ll, position):\n curr = ll.head\n i = 1\n while curr != None:\n if i == position:\n break\n i += 1\n curr = curr.link\n\n if curr == None:\n return \"Index out of range\"\n else:\n return curr.data", "def get(self, index):\n if index >= self.len:\n return -1\n p = self.head.next\n while index > 0:\n index -= 1\n p = p.next\n return p.val", "def first(self):\n return self.head and self.head.value or None", "def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._element # front aligned with head of list", "def read_index(self, index):\n current = self.head\n if index == 0:\n return current.data\n elif index >= self.size() :\n return None\n else:\n position = 0\n while position < index:\n current = current.next_node\n position += 1\n return current.data", "def first(self):\n if self.is_empty():\n raise Empty('La cola está vacía')\n return self._head._element # frente alineado con la cabeza de la lista", "def extract_head(data):\n tl = data['tls'][data['i']];\n br = data['brs'][data['i']];\n head = extract_area(data,(tl,br));\n return head;", "def get(self, index):\n if index < 0 or index >= self.length:\n return -1\n curr = self.head\n for i in range(1, index + 1):\n curr = curr.next\n return curr.val", "def head(self) -> object:\n if not self._head:\n raise EmptyListException(\"The list is empty.\")\n return self._head", "def get_item(self,index):\n current = self.head\n count = 0\n \n while current != None and count <= index:\n count+=1\n current =current.get_next()\n \n if count!=index:\n print('Index out of bound')", "def naive(head: ListNode) -> ListNode:\n if head is None or head.next is None: # Not possible to have a cycle\n return None\n seen = {} # A hash-set would work better\n curr = head\n while curr is not None:\n if curr in seen:\n return curr\n else:\n seen[curr] = True\n curr = curr.next\n return None", "def peek(self):\n return self.list.head", "def find(self, key):\n if self.head is None:\n return\n itr = self.head\n while itr:\n if itr.data == key:\n return itr.data\n itr = itr.next\n return None" ]
[ "0.7262619", "0.7033946", "0.69992024", "0.69802266", "0.688625", "0.6812085", "0.6749111", "0.6720148", "0.6715752", "0.6712102", "0.6700454", "0.668674", "0.65855867", "0.65336317", "0.65283626", "0.64966935", "0.64835984", "0.6479097", "0.646512", "0.6455154", "0.6453674", "0.6435599", "0.6434158", "0.64261144", "0.6413549", "0.6381134", "0.63779056", "0.63550323", "0.6331737", "0.63136214" ]
0.7192319
1
Obtain data from tail of linked list O(1)
def peek_last(self): if self.is_empty(): raise RuntimeError("Empty list") return self.tail.data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_last(self):\n if self.is_empty():\n raise self.NoSuchNodeException()\n\n tail = self.getNode(self.list_size - 1)\n tail_data = tail.data\n\n if self.list_size == 1:\n self.head = None\n else:\n before_tail = self.getNode(self.list_size - 2)\n before_tail.next_node = None\n\n self.list_size -= 1\n\n return tail_data", "def get_tail(self):\n pointer = self.head\n while pointer.next_node:\n pointer = pointer.next_node\n return pointer", "def last(self):\r\n if self.tail == None: #check if last(tail) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.tail.data #return the data of tail node\r", "def back(self):\n if self.empty():\n return \"Linked List is Empty\"\n\n h = self.head\n while h.next is not None:\n h = h.next\n\n return h.data", "def peek(self):\n if self.is_empty():\n return None\n\n return self.linked_list.head.data", "def get(self, index):\n if index < 0:\n return -1\n # print('index:',index)\n p = self.head\n while index and p:\n p = p.next\n index -= 1\n # print('after,index:',index)\n if index:\n return -1\n if p and p.next:\n return p.next.val\n return -1\n # self.printList()", "def pop(self):\n if self.__size == 0:\n return None\n else:\n data = self.__head.get_data()\n self.__head = self.__head.get_next()\n self.__size -= 1\n return data", "def peek(self):\n if self.is_empty():\n return None\n return self.list.head.data", "def getLast(self):\n\n if self.firstItem == None:\n raise Exception(\"cannot getLast - linked list is empty\")\n\n # 1. Find the last item\n lastItem = self.firstItem\n while lastItem.next != None:\n lastItem = lastItem.next\n\n # 2. Return the value\n return lastItem", "def pop_back(self):\n if self.empty():\n return \"Empty Linked List\"\n\n h = self.head\n while h is not None:\n if h.next.next is None:\n data = h.next.data\n h.next = None\n break\n h = h.next\n return data", "def value_n_from_end(self, n):\n size = self.size()\n if n < 0:\n return \"The value passed cannot be negative\"\n if n > size:\n return \"the value passed cannot be greater than the size\"\n\n h = self.head\n\n # MY SOLUTION - O(2n) TIme O(1) Space - For a Huge List it will take more Time to Traverse 2 times.\n # idx = 0\n # remainder = size - n\n # while h is not None:\n # if idx == remainder:\n # return h.data\n # idx += 1\n # h = h.next\n\n # BETTER SOLUTION - O(n) Time and O(m) Space\n # BEST SOLUTION - Check Cracking the Coding Interview Q-2.2\n arr = list()\n while h is not None:\n if len(arr) == n + 1:\n arr.pop(0)\n arr.append(h.data)\n h = h.next\n return arr[0]", "def get(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n current_node = current_node.next\r\n if current_index == index: return current_node.data\r\n current_index += 1", "def peek(self):\n if self.__size == 0:\n return None\n else:\n return self.__head.get_data()", "def pop(self):\n if self.head is None:\n return None\n else:\n data = self.head._data\n self.head = self.head._next\n self.count -= 1\n return data", "def get_item(self,index):\n current = self.head\n count = 0\n \n while current != None and count <= index:\n count+=1\n current =current.get_next()\n \n if count!=index:\n print('Index out of bound')", "def peek(self):\n size = self._list.size()\n if size == 0:\n return None\n return self._list.tail.data", "def pop(self):\n\n traverse = self.head\n\n while traverse.next is not None:\n\n t1 = traverse.next\n if t1.next is None:\n traverse.next = None\n return t1.data\n traverse = traverse.next", "def pop(self):\n\n traverse = self.head\n\n if self.head == None:\n return -1\n\n if self.head.next == None:\n self.head = None\n print(traverse.data)\n\n while traverse.next is not None:\n\n t1 = traverse.next\n\n if t1.next is None:\n traverse.next = None\n return t1.data\n traverse = traverse.next", "def pop(self):\n if self.head is not None:\n currNode = self.head\n self.head = currNode.next\n return currNode.data\n return None", "def pop(self):\n\n traverse = self.head\n\n if self.head == None:\n return -1\n\n if self.head.next == None:\n self.head = None\n\n return traverse.data\n\n while traverse.next is not None:\n\n t1 = traverse.next\n if t1.next is None:\n traverse.next = None\n\n return t1.data\n traverse = traverse.next", "def tail(self):\n return self._tail", "def tail(self):\n return self._tail", "def peek(self):\n # TODO: Return top item, if any\n print(\"self.list P\", self.list)\n print(\"length\", self.length())\n if self.is_empty():\n return None\n else:\n return self.list[self.length()-1]\n # do n-1\n # return self.list[-]", "def pop_tail(self):\n if self.is_empty():\n return None\n\n current = self._tail._previ\n node = self._tail\n current._next = None\n self._tail = current\n data = node._data\n node = Node(None)\n\n self._size -= 1\n\n return data", "def nth_node_from_end(self, n):\n\n length = 0\n\n if self.head:\n current = self.head\n while current:\n length += 1\n current = current.next\n\n count = 0\n current = self.head\n while count < (length - n): \n count += 1\n current = current.next\n return current.data", "def after(self,p):\r\n \r\n current = self.tail #test from the tail node\r\n \r\n if p == current: #if the tail node = p\r\n return 'null' #there cannot be a node after it\r\n \r\n while current !=p: #else keep cheking the elements until it reaches p\r\n current = current.prev\r\n return current.next.data #now current = p, so return the node after it\r", "def last(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._tail._prev._element", "def pop(self):\n temp_data = self.head.data\n self.head = self.head.next_node\n return temp_data", "def next_data(self):\n return self.data.pop()", "def get(self, index):\n if index >= self.len:\n return -1\n p = self.head.next\n while index > 0:\n index -= 1\n p = p.next\n return p.val" ]
[ "0.6827692", "0.68033373", "0.6787032", "0.6713353", "0.6656795", "0.66173166", "0.6605389", "0.6585034", "0.656764", "0.65581566", "0.65382594", "0.65274656", "0.65145355", "0.6505743", "0.6500952", "0.64953417", "0.64916885", "0.64655113", "0.6456848", "0.64051723", "0.63992953", "0.63992953", "0.6377566", "0.6373603", "0.6371171", "0.63432574", "0.6299206", "0.629225", "0.6282757", "0.6281012" ]
0.7195302
0
Convert from ParseResults to normal list.
def result2list(foo): if isinstance(foo, ParseResults): return [result2list(bar) for bar in foo] else: return foo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_result_list(self,res):\n res_list = []\n for r in res:\n res_list.append(r)\n\n return res_list", "def __to_list(__results):\n rows = []\n for row in __results:\n rows.append(row)\n \n __results.close()\n\n return rows", "def _ibmq_result_transform(self, result: Result) -> List[List]:\n all_results = []\n for i in range(len(result.results)):\n # Convert ['101', '110', ...] to [[1, 0, 1], [0, 1, 1], ...]\n circ_mem = result.get_memory(i) # ['101', '110', ...]\n for shot_mem in circ_mem:\n shot_list = [int(mem) for mem in shot_mem]\n shot_list.reverse()\n all_results.append(shot_list)\n\n return all_results", "def parse(self) -> List[List[Union[str,int]]]:\n return self.__create_list(cp(self.tokens))", "def parse_list(cls, data):\n results = ResultSet()\n data = data or []\n for obj in data:\n if obj:\n results.append(cls.parse(obj))\n return results", "def convert_all_coordinates(results: List[ResponseObject]) -> List[ResponseObject]:\n results = [convert_lat_long_dict(result) for result in results]\n results = [convert_lat_long_list(result) for result in results]\n return results", "def parse(self):\n return []", "def tolist(self, flat=0):\n pass", "def convert_lat_long_list(result: ResponseObject):\n # Format inner record if present, e.g. for search results\n if 'record' in result:\n result['record'] = convert_lat_long_list(result['record'])\n return result\n\n if ',' in (result.get('location') or ''):\n result['location'] = [try_float(coord) for coord in result['location'].split(',')]\n return result", "def aslist(self):\n try:\n return [x.aslist() for x in self]\n except Exception:\n pass\n return [x for x in self]", "def post_process_result(self, result: np.ndarray) -> np.ndarray:\n to_cut = len(\"_tag\")\n return np.asarray([[tag[:-to_cut] for tag in list_of_tags] for list_of_tags in result])", "def __parse_list(self) -> list:\r\n self.idx += 1\r\n l = []\r\n while self.data[self.idx: self.idx + 1] != b'e':\r\n l.append(self.__parse())\r\n self.idx += 1\r\n return l", "def resulttolist(result, feedback = 0):\n\n newlist = []\n\n if feedback == 2:\n for i in result:\n j = \" \".join(i)\n k = list(j.split(\" \"))\n newlist.append(k)\n elif feedback == 3:\n for i in result:\n j = \" \".join(i)\n k = list(j.split(\" \"))\n newlist.append(k)\n else:\n for i in result:\n j = \"\".join(i)\n newlist.append(j)\n\n return newlist", "def toMoves(self, results):\n moves = ArrayList()\n for result in results:\n moves.add(Move(result.get(1)))\n return moves", "def convert_result_to_object(self, result):\n keys = self.COLUMN_TO_FILED\n if len(result) == 0:\n return None\n else:\n list_object = list()\n for r in result:\n list_object.append(dict(zip(keys, r)))\n return list_object", "def to_list(self):\n return list(self.data)", "def to_list(self):\n return self.main_list[:self.num_elements]", "def _decode_result(self, result):\n if isinstance(result, list):\n return [self._decode_result(r) for r in result]\n elif isinstance(result, SimpleString):\n return result.value\n elif isinstance(result, SimpleError):\n return self._decode_error(result)\n else:\n return result", "def to_list(self):\n _return = []\n pointer = self.first\n while pointer is not None:\n _return.append(pointer.data)\n pointer = pointer.next\n return _return", "def strings_to_elements(self, results: List[str]) -> Iterable[T]:\n ...", "def _to_pylist(self):\r\n\t\tpylist = []\r\n\t\tdef record_values(i, list):\r\n\t\t\tpylist.append(list._value)\r\n\t\t\treturn True\r\n\t\tself._traverse(record_values)\r\n\t\treturn pylist", "def convert(self, format):\n self._plugin = kurt.plugin.Kurt.get_plugin(format)\n return list(self._normalize())", "def tolist (self) :\r\n if self.complex :\r\n result = []\r\n for x in xrange(0,len(self)) :\r\n result.append(self[x])\r\n return result\r\n else :\r\n return self.impl.tolist()", "def tolist(self) -> List[T]:\n if isinstance(self.array, list):\n return self.array\n return list(self.array)", "def gremlin_results_to_dict(result: Any) -> List[Dict[str, Any]]:\n res = []\n\n # For lists or paths unwind them\n if isinstance(result, (list, Path)):\n for x in result:\n res.append(GremlinParser._parse_dict(x))\n\n # For dictionaries just add them\n elif isinstance(result, dict):\n res.append(result)\n\n # For everything else parse them\n else:\n res.append(GremlinParser._parse_dict(result))\n return res", "def _ProcessQueryResult(self, result):\n self.__more_results = result.more_results()\n\n if self.__keys_only:\n return [Key._FromPb(e.key()) for e in result.result_list()]\n else:\n return [Entity._FromPb(e) for e in result.result_list()]", "def toRoles(self, results):\n roles = ArrayList()\n for result in results:\n roles.add(Role(name))\n return roles", "def tolist(self):\n \n ret = []\n \n for e in self:\n ret.append(e)\n \n return ret", "def etree_to_list(self, etree):\n return [x.strip()\n for x in lxml.etree.tostring(etree).split(b'\\n')\n if x.strip()]", "def to_list_flat(self):\n return self.rep.entries()" ]
[ "0.67065823", "0.6454018", "0.6392541", "0.61657304", "0.6103048", "0.60968775", "0.600516", "0.5954104", "0.58549696", "0.5853532", "0.5851375", "0.5830539", "0.5825065", "0.5800057", "0.5794257", "0.57913077", "0.57792944", "0.5751782", "0.5738138", "0.57195807", "0.5705452", "0.56934255", "0.5664899", "0.5661306", "0.5660596", "0.56387365", "0.5628334", "0.5627505", "0.56256574", "0.5620394" ]
0.7606971
0