query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Transfer data from one column to another in the Measurement Set | def transfer_data(self, column_in, column_out):
self.open_msfile()
data_in = self.read_col(column_in)
self.open_msfile(nomodify=False)
self.tb.putcol(column_out, data_in)
self.close_msfile() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def manipulateData(columnNumber, dataset):\r\n\r\n #Make data the first colum and columnNumber column only\r\n uuid = dataset.at[0, columnNumber]\r\n newData = dataset.iloc[1:, 0: columnNumber + 1: columnNumber]\r\n timesColumn = dataset.loc[1:, 1]\r\n newData = newData.astype(float)\r\n newData = newData[~np.isnan(newData.iloc[:, 1])]\r\n\r\n newData.insert(loc = 0, column = \"Time(UTC)\", value = timesColumn)\r\n standardDeviationColumn = newData.iloc[:, 2].rolling(100).std()\r\n\r\n #Reset column index number to be sequential\r\n newData.columns = range(newData.shape[1])\r\n\r\n #Create a new column and initialize values to 0\r\n #lastColumn = dataset.iloc[:, dataset.shape[1] - 1]\r\n newData.insert(newData.shape[1], newData.shape[1], 0)\r\n newData.insert(newData.shape[1], newData.shape[1], 0)\r\n newData.insert(newData.shape[1], newData.shape[1], 0)\r\n\r\n #make new column as the previous value\r\n originalColumn = newData.loc[1:, 2]\r\n newData = newData.astype({3:'float64', 4:'float64', 5:'float64'})\r\n for index in range(newData.index.shape[0] - 1):\r\n dataIndex = newData.index[index]\r\n newData.at[dataIndex, 3] = originalColumn.at[newData.index[index - 1]]\r\n newData.at[dataIndex, 4] = originalColumn.at[newData.index[index - 2]]\r\n newData.at[dataIndex, 5] = originalColumn.at[newData.index[index + 1]]\r\n\r\n newData = pd.DataFrame(newData)\r\n\r\n newData.insert(loc = newData.shape[1], column = \"Standard Deviation\", value = standardDeviationColumn)\r\n newData = newData.iloc[newData.index[99]: , :]\r\n\r\n #save the file as the testing dataset -- FOR TESTING\r\n testDataString = \"TestingData/\" + uuid + \".csv\"\r\n newData.to_csv(testDataString)\r\n\r\n\r\n Dictionary.add(dataset.loc[0, columnNumber], newData)\r\n return newData",
"def normalize_dataset(self):",
"def change_to_object(column, data):\n data[column] = data[column].astype('object')",
"def extend_model_data(self, newdata):\n logger.warning(\"Extend data is untested and may have unexpected consequences\")\n data_temp = newdata.copy()\n data_temp['X'] -= self.origin[0]\n data_temp['Y'] -= self.origin[1]\n data_temp['Z'] -= self.origin[2]\n data_temp['X'] /= self.scale_factor\n data_temp['Y'] /= self.scale_factor\n data_temp['Z'] /= self.scale_factor\n self.data.concat([self.data, data_temp], sort=True)",
"def to_(self, units) -> None:\n _to(self, units, inplace=True)",
"def process_measurements(measurements: pd.DataFrame) -> pd.DataFrame:\n out = (\n measurements.rename(columns=NEW_COLNAMES)\n .pipe(util.make_columns_lower_case)\n .dropna(subset=DROPNA_COLS, axis=0)\n ).copy()\n for col in [\"x1\", \"x2\", \"y\"]:\n out[col] = out[col].astype(float)\n out[\"x1:x2\"] = out[\"x1\"] * out[\"x2\"]\n return out",
"def map (a_data,a_column,a_old,a_new) :\n loc_new_data = a_data\n a_data[a_column].replace(a_old,a_new,inplace=True)",
"def transform(self, X):\n\n X = super().transform(X)\n\n X[self.columns] = self.value\n\n return X",
"def transform(self, dataframe: DataFrame) -> DataFrame:",
"def switchColumn(data_file, column1, column2):\n\tdata = []\n\tfor dataLine in readData(data_file):\n\t\ttmp = dataLine[column1-1]\n\t\tdataLine[column1-1] = dataLine[column2-1]\n\t\tdataLine[column2-1] = tmp\n\t\tdata.append(dataLine)\n\twriteData(data_file, data)",
"def unit_convert(df, coln1, coln2, unit, conversion_factor, coln3):\n df.loc[df[coln2] == unit, coln1] = conversion_factor * df[coln3]\n return df",
"def _extend(self, other_field, memo) -> None:\n if other_field.data.ndim != self.data.ndim:\n raise ValueError(\n f\"Field '{self.name}' cannot be extended. Dimensions must be equal. ({other_field.data.ndim} != {self.data.ndim})\"\n )\n\n old_id = id(self.data)\n if self.data.dtype < other_field.data.dtype:\n # Increase size of self.data.dtype before inserting\n new_data = np.insert(self.data.astype(other_field.data.dtype), self.num_obs, other_field.data, axis=0)\n else:\n new_data = np.insert(self.data, self.num_obs, other_field.data, axis=0)\n memo[old_id] = (self.data, new_data)\n self.data = new_data",
"def add_geomean_to_product_data(product, prop_name, geomean_val):\n\tfor prop_data_list in product['data']:\n\t\tif prop_data_list[0] == prop_name:\n\t\t\t# prop_data_list.append(geomean_val)\n\t\t\tprop_data_list.insert(len(prop_data_list)-1, roundData(prop_name, geomean_val)) # inserts geomean before Measured column\n\treturn product",
"def set_data(self, data):\n\n # Convert voltages to currents and overwrite\n if self.use_unit == 'A':\n data['data'] = self.convert_to_unit(data['data'], self.use_unit)\n\n super(RawDataPlot, self).set_data(data)",
"def map_value(self, df, from_column, to_column, value = None,\n values_map_column = None, values_map = None):\n if from_column not in df.columns:\n return df\n\n if value:\n df[to_column] = np.where(df[from_column].notnull(), value, None)\n elif values_map_column and values_map:\n #add unit value regardless if there is a measure value\n df[to_column] = df[values_map_column].map(values_map)\n #reset all unit values where there's no corresponding measure\n df[to_column] = df[to_column].mask(df[from_column].isnull(), None)\n\n return df",
"def transform(self, data: pd.DataFrame):\n raise NotImplementedError",
"def augment_column(self, col: pd.Series,) -> pd.Series:",
"def _wrangle(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_SONG_DATA)\n\n dataframe = dataframe \\\n .dropDuplicates(['artist_id']) \\\n .where(dataframe.artist_id != '') \\\n .select([\n 'artist_id',\n 'artist_name',\n 'artist_location',\n 'artist_latitude',\n 'artist_longitude'\n ]) \\\n .withColumnRenamed('artist_name', 'name') \\\n .withColumnRenamed('artist_location', 'location') \\\n .withColumnRenamed('artist_latitude', 'latitude') \\\n .withColumnRenamed('artist_longitude', 'longitude')\n\n self._cache.set_source(config.DATAFRAME_ARTISTS, dataframe)",
"def transform(self, data):\n data[self.field] = data[self.field].fillna(-1)\n return data",
"def transform(self, df: DataFrame) -> DataFrame:\n df = deepcopy(df) # don't overwrite input df\n\n for i, column in enumerate(self.columns):\n df[column] = df[column].values + self.delta_mus[i]\n\n return df",
"def _add_converted_units(self, dataframe, parameter, key='VALUE'):\n convert_unit = self.parameters.get_converter(parameter)\n try:\n dataframe[key] = dataframe['DATA_VALUE'].apply(convert_unit)\n except KeyError:\n log.warn(\"Missing 'VALUE': no unit conversion.\")\n else:\n dataframe.unit = self.parameters.unit(parameter)",
"def copy_measurement_setup(n1, n2):\n save_measurement_setup(n2, measurement_setup(n1))",
"def data_column_conversion(data:pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame:\n data = data.assign(W = (data.label == 'W') + 0,D = (data.label == 'D') + 0,L = (data.label == 'L') + 0)\n data = data.drop(\"label\",axis=1)\n return data",
"def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join",
"def data_unification(self, data1, data2):\r\n data = data1 + data2\r\n return data",
"def dst(df):\n pass",
"def attach_units(column: pd.Series, units: ureg.Quantity) -> np.array:\n a = np.array(column)\n with_units = Q_(a, units)\n return with_units",
"def insert_new_measurement(database: Database, data_model, metric: Dict, measurement: Dict) -> Dict:\n if \"_id\" in measurement:\n del measurement[\"_id\"]\n metric_type = data_model[\"metrics\"][metric[\"type\"]]\n direction = metric.get(\"direction\") or metric_type[\"direction\"]\n for scale in metric_type[\"scales\"]:\n value = calculate_measurement_value(data_model, metric, measurement[\"sources\"], scale)\n status = determine_measurement_status(metric, direction, value)\n measurement[scale] = dict(value=value, status=status, direction=direction)\n for target in (\"target\", \"near_target\", \"debt_target\"):\n measurement[scale][target] = determine_target(\n metric, measurement, metric_type, scale, cast(TargetType, target))\n measurement[\"start\"] = measurement[\"end\"] = iso_timestamp()\n database.measurements.insert_one(measurement)\n del measurement[\"_id\"]\n return measurement",
"def de_normalize_data(self, df):\n if len(df) == 0:\n return df\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.permitted_range[feature_name][1]\n min_value = self.permitted_range[feature_name][0]\n result[feature_name] = (\n df[feature_name]*(max_value - min_value)) + min_value\n return result",
"def update_data(self, data):\n start_time = data.index[-1].strftime(\"%Y-%m-%dT%H:%M:%S.000000Z\")\n temp_data = self.gather_data(start=start_time)\n temp_data = self._list_to_df(temp_data)\n if (len(temp_data) > 1):\n # temp_data[0] is the same as data[-1]\n out_data = data.append(temp_data[1:])\n return out_data"
] | [
"0.58232236",
"0.5593612",
"0.5589331",
"0.55474955",
"0.55278325",
"0.5509298",
"0.54993767",
"0.5491314",
"0.53649133",
"0.52494544",
"0.52450836",
"0.52394146",
"0.52272606",
"0.5226081",
"0.52146274",
"0.52061564",
"0.51992697",
"0.5190929",
"0.5175776",
"0.51704514",
"0.51279026",
"0.5103821",
"0.50982547",
"0.50977963",
"0.5087627",
"0.508488",
"0.50829417",
"0.50731397",
"0.50703526",
"0.50546736"
] | 0.5719916 | 1 |
Convert Measurement set to uvfits file | def convert_to_uvfits(self, uvfits_name, overwrite=False):
# NOTE: The phase center shifts to (0.0, 0.0) during the conversion
ctk.exportuvfits(self.msfile, uvfits_name, overwrite=overwrite) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_uem(uemf, uem, n_digits=3):\n with open(uemf, 'wb') as f:\n for file_id in sorted(iterkeys(uem)):\n for onset, offset in sorted(uem[file_id]):\n line = ' '.join([file_id,\n '1',\n format_float(onset, n_digits),\n format_float(offset, n_digits)\n ])\n f.write(line.encode('utf-8'))\n f.write(b'\\n')",
"def save_vtu_file(arr, name, filename, sample_fp=None):\n if sample_fp == None:\n sample_fp = vda.get_sorted_fps_U(self.settings.DATA_FP)[0]\n\n ug = vtktools.vtu(sample_fp) #use sample fp to initialize positions on grid\n\n ug.AddScalarField('name', arr)\n ug.Write(filename)",
"def vtp(self, f_vtu, f_vtp):\r\n reader = vtk.vtkXMLUnstructuredGridReader()\r\n reader.SetFileName(f_vtu)\r\n reader.Update()\r\n ugrid = reader.GetOutput()\r\n geometryFilter = vtk.vtkGeometryFilter()\r\n geometryFilter.SetInputData(ugrid)\r\n geometryFilter.Update()\r\n polydata = geometryFilter.GetOutput()\r\n writer =vtk.vtkXMLPolyDataWriter()\r\n writer.SetFileName(f_vtp)\r\n writer.SetInputData(polydata)\r\n writer.Write()\r\n print(\"vtp file created.\")",
"def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6",
"def onevtkfile():\n basedir = '/home/amit/WorkSpace/UCLA/simulations/PhaseDiagram/RawData'\n with hp.File('VTKFile.h5', 'w') as onefile:\n allvtk = np.empty((600, 500, 3, 216), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n vtkfilepath = '{}/Run{}/VTKFile-{}.h5'.format(basedir, i, j+1)\n with hp.File(vtkfilepath, 'r') as vtkfile:\n for t in range(500):\n allvtk[j, t, i, :] = vtkfile['T{}/Points'.format(2*t)][:].ravel()\n onefile.create_dataset('Points', data=allvtk, chunks=(1, 50, 3, 216), \n compression='gzip', compression_opts=9)",
"def dump_tube_intensities(filename,raw):\n import math\n outfile = open(filename,\"w\")\n outfile.write(\"#Dump of detector tube intensities: %d detectors, %d steps each\\n\" % (raw.shape[1],raw.shape[0]))\n outfile.write(\"#Step Intensity ESD\\n\")\n for tube_no in range(raw.shape[1]):\n outfile.write(\"#Data for tube %d\\n\" % tube_no)\n for step_no in range(raw.shape[0]):\n outfile.write(\"%4d%8.2f %7.3f\\n\" % (step_no,raw.storage[step_no,tube_no],math.sqrt(raw.var[step_no,tube_no])))\n outfile.close()",
"def to_mmxu(self, mmxu):\r\n if (self.position == SinglePhaseBreaker.CLOSED):\r\n super().to_mmxu(mmxu)\r\n else:\r\n now = datetime.now()\r\n mmxu_dict = {\r\n \"A\": 0,\r\n \"Hz\": 0,\r\n \"PF\": 1,\r\n \"PFSign\": 0,\r\n \"V\": 0,\r\n \"VA\": 0,\r\n \"VAr\": 0,\r\n \"W\": 0\r\n }\r\n set_phase_a_mmxu(mmxu, mmxu_dict, now)",
"def unit_to_dva(self, in_file, out_file):\n in_img = nib.load(in_file)\n arr = in_img.get_fdata()\n dva_arr = (arr * self.stimwidth_pix) / self.ppd\n out_img = nib.Nifti1Image(dva_arr, affine=in_img.affine)\n out_img.to_filename(out_file)\n return dva_arr",
"def writefits(self,filename, z,lmu):\n t = Table([z,self.ebl_array(z,lmu)], names = ('REDSHIFT', 'EBL_DENS'))\n t2 = Table()\n t2['WAVELENGTH'] = Column(lmu, unit = 'micrometer')\n\n hdulist = fits.HDUList([fits.PrimaryHDU(),fits.table_to_hdu(t),fits.table_to_hdu(t2)])\n\n hdulist[1].name = 'NUINU_VS_Z'\n hdulist[2].name = 'WAVELENGTHS'\n\n hdulist.writeto(filename, overwrite = True)\n return",
"def saveVelocityAndPressureVTK_binary(pressure,u,v,w,x,y,z,filename,dims):\n numEl_size = u.size; numEl = np.prod(numEl_size);\n # open the file and write the ASCII header:\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('Binary\\n\\n')\n file.write('DATASET STRUCTURED_GRID\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('POINTS %d float\\n'%(numEl))\n file.close()\n \n # append binary x,y,z data\n file = open(filename,'ab')\n for i in range(len(x)): # there really needs to be a better way.\n pt = [x[i],y[i],z[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n \n file.close()\n \n # append an ASCII sub header\n file = open(filename,'a')\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('VECTORS velocity_vectors float\\n')\n file.close()\n \n # append binary u,v,w data\n file = open(filename,'ab')\n for i in range(len(u)):\n pt = [u[i],v[i],w[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n file.close()\n \n # append ASCII sub header for scalar velocity magnitude data\n file = open(filename,'a')\n file.write('SCALARS VelocityMagnitude float\\n')\n file.write('LOOKUP_TABLE default\\n')\n \n file.close()\n \n file = open(filename,'ab')\n v_mag = np.sqrt(u**2+v**2+w**2)\n file = open(filename,'ab')\n p_buf = array('f',v_mag); p_buf.byteswap()\n file.write(p_buf)\n file.close()\n \n \n # append another ASCII sub header for the scalar pressure data\n file = open(filename,'a')\n file.write('SCALARS Pressure float\\n')\n file.write('LOOKUP_TABLE default\\n')\n file.close()\n \n # append binary pressure data\n file = open(filename,'ab')\n p_buf = array('f',pressure); p_buf.byteswap()\n file.write(p_buf)\n file.close()",
"def readFT(self,file=\"out__1.ft\"):",
"def to_file(self, namefile=None):\n # Observational data\n if namefile is None:\n folder = ''\n file = self.name.replace(' ', '_')+'.dat'\n else:\n folder = os.path.dirname(namefile)\n file = os.path.basename(namefile)\n data = np.array([(self.time*u.s + self.tref).jd, self.time, self.flux, self.model, self.flux-self.model])\n colunm_names = ['Time JD', 'Time relative to {} UTC in seconds'.format(self.tref.iso),\n 'Observational Flux', 'Modelled Flux', 'Residual O-C']\n np.savetxt(os.path.join(folder, file), data.T, fmt='%11.8f')\n f = open(os.path.join(folder, file) + '.label', 'w')\n for i, name in enumerate(colunm_names):\n f.write('Column {}: {}\\n'.format(i+1, name))\n f.close()\n # Complete Model\n if hasattr(self, 'model_geometric'):\n data_model = np.array([(self.time_model*u.s + self.tref).jd, self.time_model, self.model_geometric,\n self.model_fresnel, self.model_star])\n colunm_names_model = ['Model time JD', 'Model time relative to {} UTC in seconds'.format(self.tref.iso),\n 'Geometric Model', 'Model with Fresnel diffraction', 'Model with star diameter']\n np.savetxt(os.path.join(folder, 'model_'+file), data_model.T, fmt='%11.8f')\n f = open(os.path.join(folder, 'model_'+file)+'.label', 'w')\n for i, name in enumerate(colunm_names_model):\n f.write('Column {}: {}\\n'.format(i+1, name))\n f.close()",
"def convert_dataset_to_libsvm(samples, path):\n with open(path, 'wb') as f:\n for sample in samples:\n # Write sample's label.\n f.write('%d' % sample[-1])\n\n # Write sample's features.\n for i, feature in enumerate(sample[:-1], 1): # Write features.\n # Convert to int if no data will be lost.\n if feature == int(feature):\n f.write(' %d:%d' % (i, feature))\n # Else stick with float.\n else:\n f.write(' %d:%f' % (i, sample))\n\n f.write('\\n')",
"def writeOutFileBarcodeUMICounts(barcode_dict_summary, outFileName):\n with gzip.open(outFileName, 'wb') as out_file:\n for barcode in barcode_dict_summary:\n out_file.write(barcode)\n out_file.write(\"\\t\" + str(barcode_dict_summary[barcode][1]))\n out_file.write(\"\\n\")",
"def __convert_featuresets(self, featuresets, output):\n\n\t\tif isinstance(output, str):\n\t\t\toutput = open(output,'w')\n\t\telif not isinstance(output, file):\n\t\t\traise TypeError('output is a str or a file.')\n\n\t\tfor featureset in featuresets:\n\t\t\tfeat = self.__text_converter.toSVM(\" \".join(featureset))\n\t\t\tfeat = ''.join(' {0}:{1}'.format(f,feat[f]) for f in sorted(feat))\n\n\t\t\toutput.write('-1 ' + feat + '\\n')\n\t\toutput.close()",
"def convert_vrt(fname, out_fname, dataset_name='dataset',\n compression=H5CompressionFilter.LZF, filter_opts=None,\n attrs=None):\n with h5py.File(out_fname) as fid:\n with rasterio.open(fname) as rds:\n # set default chunks and set dimensions\n if rds.count == 3:\n chunks = (3, 256, 256)\n dims = (3, rds.height, rds.width)\n else:\n chunks = (256, 256)\n dims = (rds.height, rds.width)\n\n # create empty or copy the user supplied filter options\n if not filter_opts:\n filter_opts = dict()\n filter_opts['chunks'] = chunks\n else:\n filter_opts = filter_opts.copy()\n\n\n if 'chunks' not in filter_opts:\n filter_opts['chunks'] = chunks\n\n # modify to have 3D chunks if we have a multiband vrt\n if rds.count == 3 and len(filter_opts['chunks']) != 3:\n # copy the users original 2D chunk and insert the third\n chunks = list(filter_opts['chunks'])\n chunks.insert(0, 3)\n filter_opts['chunks'] = chunks\n\n # dataset attributes\n if attrs:\n attrs = attrs.copy()\n else:\n attrs = {}\n\n attrs['geotransform'] = rds.transform.to_gdal()\n attrs['crs_wkt'] = rds.crs.wkt\n\n # dataset creation options\n kwargs = compression.config(**filter_opts).dataset_compression_kwargs()\n kwargs['shape'] = dims\n kwargs['dtype'] = rds.dtypes[0]\n\n dataset = fid.create_dataset(dataset_name, **kwargs)\n attach_image_attributes(dataset, attrs)\n\n # tiled processing (all cols by chunked rows)\n ytile = filter_opts['chunks'][1] if rds.count == 3 else filter_opts['chunks'][0]\n tiles = generate_tiles(rds.width, rds.height, rds.width, ytile)\n\n for tile in tiles:\n # numpy index\n if rds.count == 3:\n idx = (\n slice(None),\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n else:\n idx = (\n slice(tile[0][0], tile[0][1]),\n slice(tile[1][0], tile[1][1])\n )\n\n # ensure single band rds is read as 2D not 3D\n data = rds.read(window=tile) if rds.count == 3 else rds.read(1, window=tile)\n\n # write\n dataset[idx] = data",
"def writeAVUToXMLFile(outfile, target, attribute, value, unit=None):\n outfile.write('\\t<AVU>\\n')\n outfile.write(\"\\t\\t<Target>%s</Target>\\n\" % (escape(target),))\n outfile.write(\"\\t\\t<Attribute>%s</Attribute>\\n\" % (escape(attribute),) )\n outfile.write(\"\\t\\t<Value>%s</Value>\\n\" % (escape(value),) )\n if unit:\n outfile.write(\"\\t\\t<Unit>%s</Unit>\\n\" % (unit,) )\n else:\n outfile.write('\\t\\t<Unit />\\n')\n outfile.write('\\t</AVU>\\n')",
"def saveFits(self, filename):\n \n if isinstance(self.res, type(None)):\n raise Exception('Result is not yet aviable.')\n \n header = fits.Header()\n header['NAXIS1'] = self.naxis\n header['NAXIS2'] = self.naxis\n header['CTYPE1'] = 'RA---SIN'\n header['CTYPE2'] = 'DEC--SIN'\n header['CDELT1'] = - self.fov/(np.pi/180 * self.naxis)\n header['CDELT2'] = self.fov/(np.pi/180 * self.naxis)\n header['BUNIT'] = 'JY/PIXEL'\n \n hdu = fits.PrimaryHDU(self.res, header=header)\n hdulist = fits.HDUList([hdu])\n hdulist.writeto(filename, overwrite=True)\n \n print(\"Saved as '%s'.\" %(filename))",
"def get_iPTF16asu():\n z = 0.187\n ebv = 0.0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = asci.read('../data/otherSN/Whitesides2017/table1.txt')\n tb = tb.to_pandas()\n tb = tb[tb[\"col4\"].values!=\">\"]\n \n tb = tb.rename(columns={'col1' : 'mjd',\n 'col2': 'tmax_rf',\n 'col3': 'filter',\n \"col4\": 'mag',\n 'col5': 'emag',\n 'col6': 'instrument'})\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb[\"mag\"] = np.array(tb[\"mag\"].values, dtype = np.float)\n #tb[\"emag\"] = np.array(tb[\"emag\"].values, dtype = np.float)\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb = tb[tb.wave!=0]\n return tb",
"def create_vuln_report():",
"def read_CU_model(self, infname='CU_SDT1.0.mod.h5'):\n indset = h5py.File(infname)\n lons = np.mgrid[0.:359.:2.]\n lats = np.mgrid[-88.:89.:2.]\n stalst = self.waveforms.list()\n if len(stalst) == 0:\n print 'Inversion with surface wave datasets only, not added yet!'\n return\n for staid in stalst:\n netcode, stacode = staid.split('.')\n staid_aux = netcode+'_'+stacode\n stla, elev, stlo = self.waveforms[staid].coordinates.values()\n if stlo < 0.:\n stlo += 360.\n try:\n ind_lon = np.where(lons>=stlo)[0][0]\n except:\n ind_lon = lons.size - 1\n try:\n ind_lat = np.where(lats>=stla)[0][0]\n except:\n ind_lat = lats.size - 1\n pind = 0\n while(True):\n if pind == 0:\n data = indset[str(lons[ind_lon])+'_'+str(lats[ind_lat])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon]\n outlat = lats[ind_lat]\n break\n pind += 1\n continue\n data = indset[str(lons[ind_lon+pind])+'_'+str(lats[ind_lat])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon+pind]\n outlat = lats[ind_lat]\n break\n data = indset[str(lons[ind_lon-pind])+'_'+str(lats[ind_lat])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon-pind]\n outlat = lats[ind_lat]\n break\n data = indset[str(lons[ind_lon])+'_'+str(lats[ind_lat+pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon]\n outlat = lats[ind_lat+pind]\n break\n data = indset[str(lons[ind_lon])+'_'+str(lats[ind_lat-pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon]\n outlat = lats[ind_lat-pind]\n break\n data = indset[str(lons[ind_lon-pind])+'_'+str(lats[ind_lat-pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon-pind]\n outlat = lats[ind_lat-pind]\n break\n data = indset[str(lons[ind_lon-pind])+'_'+str(lats[ind_lat+pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon-pind]\n outlat = lats[ind_lat+pind]\n break\n data = indset[str(lons[ind_lon+pind])+'_'+str(lats[ind_lat-pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon+pind]\n outlat = lats[ind_lat-pind]\n break\n data = indset[str(lons[ind_lon+pind])+'_'+str(lats[ind_lat+pind])].value\n if data[0, 1] != 0:\n outlon = lons[ind_lon+pind]\n outlat = lats[ind_lat+pind]\n break\n pind += 1\n if pind >= 5:\n print 'WARNING: Large differences in the finalized points: lon = '+str(outlon)+', lat = '+str(outlat)\\\n + ', station: '+staid+' stlo = '+str(stlo) + ', stla = '+str(stla)\n # print outlon, outlat, stlo, stla, pind\n header = {'data_source': 'CU_SDT',\\\n 'depth': 0, 'vs': 1, 'vsv': 2, 'vsh': 3, 'vsmin': 4, 'vsvmin': 5, 'vshmin': 6, \\\n 'vsmax': 7, 'vsvmax': 8, 'vshmax': 9}\n self.add_auxiliary_data(data=data, data_type='ReferenceModel', path=staid_aux, parameters=header)\n return",
"def backconvert_units(array, out_units):\n if out_units == \"None\" or out_units is None:\n return array\n else:\n raise Exception(\"Unsupported unit for bispectrum descriptors.\")",
"def specfits2dat( specfitsfile, specdatfile ):\n spec = Spectrum( specfitsfile )\n wspec2dat( spec.wave, spec.flux, specdatfile )\n return( specdatfile )",
"def save_as_fits(self, filename):",
"def to_fits(self):\n # Set up data\n names = [\n \"ENERG_LO\",\n \"ENERG_HI\",\n \"THETA_LO\",\n \"THETA_HI\",\n \"RAD_LO\",\n \"RAD_HI\",\n \"RPSF\",\n ]\n units = [\"TeV\", \"TeV\", \"deg\", \"deg\", \"deg\", \"deg\", \"sr^-1\"]\n data = [\n self.energy_lo,\n self.energy_hi,\n self.offset,\n self.offset,\n self.rad_lo,\n self.rad_hi,\n self.psf_value,\n ]\n\n table = Table()\n for name_, data_, unit_ in zip(names, data, units):\n table[name_] = [data_]\n table[name_].unit = unit_\n\n hdu = fits.BinTableHDU(table)\n hdu.header[\"LO_THRES\"] = self.energy_thresh_lo.value\n hdu.header[\"HI_THRES\"] = self.energy_thresh_hi.value\n\n return fits.HDUList([fits.PrimaryHDU(), hdu])",
"def convert_units(array, in_units=\"None\"):\n if in_units == \"None\" or in_units is None:\n return array\n else:\n raise Exception(\"Unsupported unit for bispectrum descriptors.\")",
"def write_to_file(qz_values, form_factors, errors, filename):\n with open(filename, 'w') as f:\n f.write(\"\"\"set direct_err 1\nset stepsize_integral 0.05\nset normal_mode 2\n\n\"\"\")\n f.write(\"\"\"# This sample was created by NFIT_to_SDP program.\n# (1) Change all ? to an appropriate sample number. \n# (2) Change sample_name to whatever sample name you want.\n# (3) Change other parameters to actual physical values.\n# (4) Copy the following lines to your smp file.\nsamplist ? sample_name\nparameter ? nobeam \\\\\n1.18 2.3 5 2 10 1 -64 65 0.0 \\\\\nx 0.333 67.0 91.0 0 7.875 0.0 9.0 0.0 \\\\\n\"\"\")\n for qz, F, sig in zip(qz_values, form_factors, errors):\n f.write(\"{0: 8.3f} {1: .4f} {2: 8.3f} \\\\\\n\".format(F, qz, sig))",
"def write_vectors(self, filename):\n svu.write_realvectors(self,filename)",
"def convert(infile,arcsec_per_pixel=0.2,sigma_conv=1.,expansion_factor=5,writeout=None,overwrite=False,keep_units=False):\n \n PLATESCALE = 1.2120 # arcsec / mm\n rss = fits.open( infile )\n phdr = rss[1].header\n dhdr = rss[0].header\n data = rss[0].data\n \n conff=dm.read_fibers_extension(phdr)\n bundles_values=conff.bundles.keys()\n sky_bundles=[]\n for bundlei in bundles_values:\n if phdr[\"BUN%03d_T\" % bundlei]=='SKY':\n sky_bundles.append(bundlei)\n \n w0 = dhdr['CRVAL1'] # reference wavelength\n try : dw = dhdr['CRDELT1'] # wavelength step\n except : dw = dhdr['CDELT1'] # wavelength step\n wunit = dhdr['CUNIT1'] # wavelength unit\n wtype = 'WAVE' # type spectra\n\n # define the dimensions of the spaxel array \n Nx, Ny, x0, y0, dx, dy = getspaxdim( data,phdr,sky_bundles,expansion_factor=expansion_factor)\n\n nbin=int(round(float(arcsec_per_pixel)/float(dx)))\n\n\n Nw = dhdr['NAXIS1'] # number of wave. steps\n \n\n \n # initialize an empty 3-d cube (zero everywhere)\n cube = fits.PrimaryHDU()\n #cube.header=rss[0].header \n #cube.header.remove('CRPIX1') \n #cube.header.remove('CRVAL1') \n #cube.header.remove('CUNIT1') \n #cube.header.remove('CTYPE1') \n #cube.header.remove('CRPIX2') \n #cube.header.remove('CRVAL2') \n #cube.header.remove('CDELT2') \n #cube.header.remove('CTYPE2') \n cube.header.update(NAXIS=3)\n cube.header.update(NAXIS1=Nx)\n cube.header.update(NAXIS2=Ny)\n cube.header.update(NAXIS3=Nw)\n cube.header.update(CD1_1=-dx/3600.)\n cube.header.update(CD2_2=dy/3600.)\n cube.header.update(CD3_3=dw)\n cube.header.update(CRPIX1=0)\n cube.header.update(CRPIX2=0)\n cube.header.update(CRPIX3=0)\n cube.header.update(CRVAL1=x0)\n cube.header.update(CRVAL2=y0)\n cube.header.update(CRVAL3=w0)\n\n cube.header.update(CTYPE1='RA---DEG')\n cube.header.update(CTYPE2='DEC--DEG')\n cube.header.update(CTYPE3=wtype)\n cube.header.update(CUNIT3=wunit)\n\n cube.header.update(CD1_2=0)\n cube.header.update(CD1_3=0)\n cube.header.update(CD2_1=0)\n cube.header.update(CD2_3=0)\n cube.header.update(CD3_1=0)\n cube.header.update(CD3_2=0)\n\n\n cube.data = numpy.zeros( (Nw,Ny,Nx) )\n\n # extract each spectrum and place it\n # into the 3-d cube\n for ispec in range(len(data)): \n fib_str='{:3d}'.format(ispec+1)\n fib_str=fib_str.replace(' ','0') \n if not(phdr['FIB'+fib_str+'_B'] in sky_bundles):\n try:\n end_sp=phdr['FIB'+fib_str+'W2'] \n start_sp=phdr['FIB'+fib_str+'W1']\n except:\n if ('start_sp' in locals()):\n print('Warning! FIB'+fib_str+'W1 and W2 information missing in header. Assuming previous fiber wavelength coverage.') \n else: \n end_sp=Nw\n start_sp=1 \n print('Warning! FIB'+fib_str+'W1 and W2 information missing in header. Assuming default wavelength coverage.') \n \n if end_sp!=start_sp:\n spec = data[ispec][:]\n Nwspec = Nw \n \n xpos = (phdr['FIB'+fib_str+'_x']+5.)*PLATESCALE \n ypos = (phdr['FIB'+fib_str+'_y']+5.)*PLATESCALE\n ix = int( round((xpos - x0),3) / dx )\n iy = int( round((ypos - y0),3) / dy )\n \n lambda_arr=w0+dw*numpy.arange(0,Nwspec,1)\n \n if keep_units==True:\n for i in range( start_sp, min(end_sp,Nwspec) ):\n cube.data[i][iy][ix] = spec[i]##same units \n else:\n for i in range( start_sp, min(end_sp,Nwspec) ):\n cube.data[i][iy][ix] = spec[i]*3.00e-5/lambda_arr[i]**2 ## Jy to erg/s/cm**2/A \n else:\n end_sp=Nwspec \n print('1st step') \n sigma_conv_pix=sigma_conv/((dx*nbin)/expansion_factor) \n for i in range( start_sp, min(end_sp,Nwspec)):\n print(str(i)+'/'+str(Nwspec)+' spectral channels',end=\"\\r\")\n cube.data[i]=scipy.ndimage.filters.gaussian_filter(cube.data[i], sigma=sigma_conv_pix)\n \n \n cube_rebin = fits.PrimaryHDU()\n cube_rebin.header=rss[0].header \n cube_rebin.header.remove('CRPIX1') \n cube_rebin.header.remove('CRVAL1') \n cube_rebin.header.remove('CUNIT1') \n cube_rebin.header.remove('CTYPE1') \n cube_rebin.header.remove('CDELT1')\n cube_rebin.header.remove('CRPIX2') \n cube_rebin.header.remove('CRVAL2') \n #cube_rebin.header.remove('CUNIT2') \n cube_rebin.header.remove('CDELT2') \n cube_rebin.header.remove('CTYPE2') \n cube_rebin.header.update(NAXIS=3)\n cube_rebin.header.update(NAXIS1=Nx//nbin)\n cube_rebin.header.update(NAXIS2=Ny//nbin)\n cube_rebin.header.update(NAXIS3=Nw)\n cube_rebin.header.update(CD1_1=-dx*nbin/3600.)\n cube_rebin.header.update(CD2_2=dy*nbin/3600.)\n cube_rebin.header.update(CD3_3=dw)\n cube_rebin.header.update(CRPIX1=0)\n cube_rebin.header.update(CRPIX2=0)\n cube_rebin.header.update(CRPIX3=0)\n cube_rebin.header.update(CRVAL1=x0)\n cube_rebin.header.update(CRVAL2=y0)\n cube_rebin.header.update(CRVAL3=w0)\n \n cube_rebin.header.update(CTYPE1='RA---SIN')\n cube_rebin.header.update(CTYPE2='DEC--SIN')\n cube_rebin.header.update(CTYPE3=wtype)\n cube_rebin.header.update(CUNIT3=wunit)\n cube_rebin.header.update(CUNIT1='deg')\n cube_rebin.header.update(CUNIT2='deg')\n \n cube_rebin.header.update(CD1_2=0)\n cube_rebin.header.update(CD1_3=0)\n cube_rebin.header.update(CD2_1=0)\n cube_rebin.header.update(CD2_3=0)\n cube_rebin.header.update(CD3_1=0)\n cube_rebin.header.update(CD3_2=0)\n cube_rebin.verify('fix')\n if keep_units:\n cube_rebin.header.update(BUNIT= dhdr['BUNIT']) ##the rss one!!\n else:\n cube_rebin.header.update(BUNIT= 'erg/s/cm**2/Angstrom') \n\n\n\n \n cube_rebin.data = numpy.zeros( (Nw,Ny//nbin,Nx//nbin) )\n print('')\n print('2nd step')\n for i in range( 0, Nwspec) : \n shape=cube.data[i].shape \n print(str(i)+'/'+str(Nwspec)+' spectral channels',end=\"\\r\")\n for xi in numpy.arange(0,shape[0],nbin)[:-1]:\n for yj in numpy.arange(0,shape[1],nbin)[:-1]:\n pixel_ij=numpy.sum(cube.data[i][xi:xi+nbin,yj:yj+nbin]) \n cube_rebin.data[i][xi//nbin,yj//nbin]=pixel_ij \n if writeout !=None:\n cube_rebin.writeto(writeout,overwrite=overwrite)\n return( cube_rebin)",
"def export_amplicon_set_tsv(self, params):\n logging.info('start exporting amplicon set object')\n amplicon_set_ref = params.get('input_ref')\n\n amplicon_set_df = self._amplicon_set_to_df(amplicon_set_ref)\n\n result_dir = os.path.join(self.scratch, str(uuid.uuid4()))\n self._mkdir_p(result_dir)\n\n self._df_to_tsv(amplicon_set_df, result_dir, amplicon_set_ref)\n\n package_details = self.dfu.package_for_download({\n 'file_path': result_dir,\n 'ws_refs': [amplicon_set_ref]\n })\n\n return {'shock_id': package_details['shock_id']}"
] | [
"0.59630764",
"0.5706561",
"0.53236437",
"0.5267373",
"0.5232523",
"0.52308166",
"0.5225233",
"0.5186095",
"0.5159462",
"0.51572955",
"0.5152818",
"0.5151516",
"0.51237893",
"0.51043826",
"0.51003903",
"0.509759",
"0.5050525",
"0.50395685",
"0.50338125",
"0.5017286",
"0.50093114",
"0.5006597",
"0.50011873",
"0.4980939",
"0.49761137",
"0.49696666",
"0.4943814",
"0.49353823",
"0.49319756",
"0.49142328"
] | 0.6504095 | 0 |
Make sure that when we reopen a file because the inode has changed, we open to the right location. | def test_reopen_changed_inode(tmp_path):
path1 = tmp_path / "file"
path2 = tmp_path / "changed_file"
with open(path1, "w") as f:
for i in range(1000):
print(f"{i}", file=f)
with open(path2, "w") as f:
for i in range(2000):
print(f"{i}", file=f)
file_info = LogFileInfo(
filename=path1,
size_when_last_opened=0,
file_position=0,
file_handle=None,
is_err_file=False,
job_id=None,
worker_pid=None,
)
file_info.reopen_if_necessary()
for _ in range(1000):
file_info.file_handle.readline()
orig_file_pos = file_info.file_handle.tell()
file_info.file_position = orig_file_pos
# NOTE: On windows, an open file can't be deleted.
file_info.file_handle.close()
os.remove(path1)
os.rename(path2, path1)
file_info.reopen_if_necessary()
assert file_info.file_position == orig_file_pos
assert file_info.file_handle.tell() == orig_file_pos | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reopen(self):\n self.close()\n self._fileobj = os.fdopen(os.open(str(self.path), os.O_CREAT | os.O_RDWR, 384), \"r+b\", 0)",
"def reopen(self):\n self.close()\n self._fileobj = os.fdopen(\n os.open(str(self.path), os.O_CREAT | os.O_RDWR, 384), \"r+b\", 0\n )",
"def test_reopen(self):\n with contextlib.closing(logfile.LogFile(self.name, self.dir)) as log1:\n log1.write(\"hello1\")\n savePath = os.path.join(self.dir, \"save.log\")\n os.rename(self.path, savePath)\n log1.reopen()\n log1.write(\"hello2\")\n\n with open(self.path) as f:\n self.assertEqual(f.read(), \"hello2\")\n with open(savePath) as f:\n self.assertEqual(f.read(), \"hello1\")",
"def test_reopen(self):\n log1 = logfile.LogFile(self.name, self.dir)\n log1.write(\"hello1\")\n savePath = os.path.join(self.dir, \"save.log\")\n os.rename(self.path, savePath)\n log1.reopen()\n log1.write(\"hello2\")\n log1.close()\n\n f = open(self.path, \"r\")\n self.assertEquals(f.read(), \"hello2\")\n f.close()\n f = open(savePath, \"r\")\n self.assertEquals(f.read(), \"hello1\")\n f.close()",
"def reopen():",
"def open_file(self, now):\n path = now.strftime(self.template)\n if path != self.path:\n if self.file is not None:\n self.file.close()\n self.path = path\n try:\n self.file = open(path, 'ab', 0)\n except IOError as e:\n if e.errno == errno.ENOENT:\n os.makedirs(os.path.dirname(path))\n self.file = open(path, 'ab', 0)\n\n self.update_link()",
"def advance_in_file(self, file_pos):\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise",
"def _reopen(self, mode='r', **kwargs):\n\n self.h5file.close()\n self.h5file = tb.open_file(self.h5fname, mode, **kwargs)\n return True",
"def _open_changed ( self ):\n file_name = open_file( extensions = FileInfo(), id = demo_id )\n if file_name != '':\n self.file_name = file_name",
"def testOpenCloseInode(self):\n self._TestOpenCloseInode(self._tsk_partition_path_spec)",
"def testOpenCloseInode(self):\n self._TestOpenCloseInode(self._tsk_partition_path_spec)",
"def hook_file_opened(self):",
"def open (self, path, mode):\r\n pass",
"def _ReopenFileObject(self):\n if self._file_object:\n self._file_object.close()\n self._file_object = None\n\n self._file_object = self._zip_file.open(self.name, mode='r')\n self._stream_offset = 0",
"def test_file_open_bug():\n \n value = Value('test', context, 'reentrant_test', clsmap['file'], data_dir='./cache')\n \n try:\n os.remove(value.namespacemanager.file)\n except OSError:\n pass\n \n value.set_value(\"x\")\n\n f = open(value.namespacemanager.file, 'w')\n f.write(\"BLAH BLAH BLAH\")\n f.close()\n \n # TODO: do we have an assertRaises() in nose to use here ?\n try:\n value.set_value(\"y\")\n assert False\n except:\n pass\n \n _synchronizers.clear()\n context.clear()\n value = Value('test', context, 'reentrant_test', clsmap['file'], data_dir='./cache')\n\n # TODO: do we have an assertRaises() in nose to use here ?\n try:\n value.set_value(\"z\")\n assert False\n except:\n pass",
"def open_file_link_manager(file):\n pass",
"def _open(self, mode=b'r'):\n return self.opener(self.filename, mode=mode)",
"def open(self):\n self.f = open(self.join(self.fname), 'rb')",
"def open(self):\n with self._not_full:\n self._closed = False",
"def file_pointer(self):\n\n try:\n self.__file.seek(self.__file.tell() - 1)\n except Exception as e:\n raise e",
"def reopen_files(self):\r\n for log in (self.error_log, self.access_log):\r\n for h in log.handlers:\r\n if isinstance(h, logging.FileHandler):\r\n h.acquire()\r\n h.stream.close()\r\n h.stream = open(h.baseFilename, h.mode)\r\n h.release()",
"def FileOpenForWrite():\r\n return _hiew.HiewGate_FileOpenForWrite()",
"def open_file(self, fname):\n\n # Save that the file is opened.\n self.open_files[fname] = {}\n self.open_files[fname][\"name\"] = fname\n self.open_files[fname][\"contents\"] = []",
"def _open_fd_rw(self):\n self.fd = os.open(self.proxy, os.O_RDWR)",
"def open(self, number=0):\n if number != self.current_file_number:\n self.close()\n self.fh_raw = open(self.files[number], mode='rb')\n self.current_file_number = number\n return self.fh_raw",
"def _openLockFile(self):\n lock_file = self._getLockFile()\n self.stream_lock = open(lock_file, \"w\")",
"def __enter__(self):\n self.savedPath = os.getcwd()\n os.chdir(self.newPath)",
"def do_touch ( self, fspath ):\n return",
"def test_open_ped_duplicate_person(self):\n \n self.temp.write('A B 0 0 1 1\\n')\n self.temp.write('A B 0 0 1 1\\n')\n self.temp.flush()\n \n with self.assertRaises(ValueError):\n open_ped(self.temp.name)",
"def _open_for_write(self, filepath):\n tmpname = '%s.tmp.%d' % (filepath, os.getpid())\n try:\n with open(tmpname, \"wb\") as f:\n yield f\n utils.file_replace(tmpname, filepath)\n except Exception:\n # In case of error, remove dangling tmp file\n try:\n os.unlink(tmpname)\n except OSError:\n pass\n raise"
] | [
"0.6775823",
"0.67754513",
"0.65995735",
"0.6587164",
"0.6586195",
"0.6561913",
"0.6408571",
"0.621443",
"0.6102716",
"0.6087001",
"0.6087001",
"0.5911594",
"0.58738005",
"0.5827413",
"0.57688123",
"0.5745305",
"0.57410425",
"0.573082",
"0.5726611",
"0.5717674",
"0.5699772",
"0.56635547",
"0.56587374",
"0.56495297",
"0.5627291",
"0.56247616",
"0.56159675",
"0.56113297",
"0.5611277",
"0.5609254"
] | 0.7729124 | 0 |
Importing Ray used to cause `logging.makeRecord` to use the default record factory, rather than the factory set by `logging.setRecordFactory`. This tests validates that this bug is fixed. | def test_ray_does_not_break_makeRecord():
# Make a call with the cli logger to be sure that invoking the
# cli logger does not mess up logging.makeRecord.
with redirect_stdout(None):
cli_logger.info("Cli logger invoked.")
mockRecordFactory = Mock()
try:
logging.setLogRecordFactory(mockRecordFactory)
# makeRecord needs 7 positional args. What the args are isn't consequential.
makeRecord_args = [None] * 7
logging.Logger("").makeRecord(*makeRecord_args)
# makeRecord called the expected factory.
mockRecordFactory.assert_called_once()
finally:
# Set it back to the default factory.
logging.setLogRecordFactory(logging.LogRecord) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_make_record_function():\n prev_factory = logging.getLogRecordFactory()\n\n def make_record(*arguments, **kwargs):\n record = prev_factory(*arguments, **kwargs)\n return _synchrolog_record_factory(record)\n\n return make_record",
"def _record(self):\n record_attr = {\n 'name': 'test_record',\n 'level': 'ERROR',\n 'pathname': '/test/path',\n 'msg': 'This is a test record.',\n }\n record = logging.makeLogRecord(record_attr)\n return record",
"def setLogRecordFactory(self, factory):\n self.logRecordFactory = factory",
"def create_record_testing(\n mocker, caplog, create_record_of_each_type, transip_credentials_env_hash\n):\n (dns_record_parameters, dns_record) = create_record_of_each_type\n mocker.patch(\"os.environ\", transip_credentials_env_hash)\n mocker.patch(\"sys.argv\", [\"transip_dns\"] + hash_to_list(dns_record_parameters))\n\n caplog.set_level(logging.INFO)\n transip_dns()\n\n script_output = (\n f\"DNS record '{dns_record_parameters['--record_name']}.{transip_domain}' \"\n f\"('{dns_record_parameters['--record_type']}')\"\n f\" '{dns_record_parameters['--record_data']}' created\"\n )\n assert script_output in caplog.text",
"def test_log_library_context(propagate_logs, caplog, logger_name, package_name):\n logger = logging.getLogger(logger_name)\n logger.critical(\"Test!\")\n\n assert (\n caplog.records[-1].package == package_name\n ), \"Missing ray package name in log record.\"",
"def test_constructor_missing_logging():\n with pytest.raises(TypeError):\n config = {\n \"PAN_GALACTIC_GARGLE_BLASTER\": \"Yummy\"\n }\n Unpacker(config)",
"def test_default():\n logger = logging.getLogger(__name__)\n log_all_levels(logger)\n log_all_levels_decorated(logger)\n log_all_levels_loop(logger)\n return logger",
"def test_tfrecord_to_mindrecord_with_special_field_name():\n if not tf or tf.__version__ < SupportedTensorFlowVersion:\n # skip the test\n logger.warning(\"Module tensorflow is not found or version wrong, \\\n please use pip install it / reinstall version >= {}.\".format(SupportedTensorFlowVersion))\n return\n\n file_name_ = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]\n mindrecord_file_name = file_name_ + '.mindrecord'\n tfrecord_file_name = file_name_ + '.tfrecord'\n generate_tfrecord_with_special_field_name(tfrecord_file_name)\n assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name))\n\n feature_dict = {\"image/class/label\": tf.io.FixedLenFeature([], tf.int64),\n \"image/encoded\": tf.io.FixedLenFeature([], tf.string),\n }\n\n if os.path.exists(mindrecord_file_name):\n os.remove(mindrecord_file_name)\n if os.path.exists(mindrecord_file_name + \".db\"):\n os.remove(mindrecord_file_name + \".db\")\n\n tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name),\n mindrecord_file_name, feature_dict, [\"image/encoded\"])\n tfrecord_transformer.transform()\n\n assert os.path.exists(mindrecord_file_name)\n assert os.path.exists(mindrecord_file_name + \".db\")\n\n fr_mindrecord = FileReader(mindrecord_file_name)\n verify_data(tfrecord_transformer, fr_mindrecord)\n\n os.remove(mindrecord_file_name)\n os.remove(mindrecord_file_name + \".db\")\n\n os.remove(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name))",
"def test_tfrecord_to_mindrecord():\n if not tf or tf.__version__ < SupportedTensorFlowVersion:\n # skip the test\n logger.warning(\"Module tensorflow is not found or version wrong, \\\n please use pip install it / reinstall version >= {}.\".format(SupportedTensorFlowVersion))\n return\n\n file_name_ = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]\n mindrecord_file_name = file_name_ + '.mindrecord'\n tfrecord_file_name = file_name_ + '.tfrecord'\n generate_tfrecord(tfrecord_file_name)\n\n assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name))\n\n feature_dict = {\"file_name\": tf.io.FixedLenFeature([], tf.string),\n \"image_bytes\": tf.io.FixedLenFeature([], tf.string),\n \"int64_scalar\": tf.io.FixedLenFeature([], tf.int64),\n \"float_scalar\": tf.io.FixedLenFeature([], tf.float32),\n \"int64_list\": tf.io.FixedLenFeature([6], tf.int64),\n \"float_list\": tf.io.FixedLenFeature([7], tf.float32),\n }\n\n if os.path.exists(mindrecord_file_name):\n os.remove(mindrecord_file_name)\n if os.path.exists(mindrecord_file_name + \".db\"):\n os.remove(mindrecord_file_name + \".db\")\n\n tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name),\n mindrecord_file_name, feature_dict, [\"image_bytes\"])\n tfrecord_transformer.transform()\n\n assert os.path.exists(mindrecord_file_name)\n assert os.path.exists(mindrecord_file_name + \".db\")\n\n fr_mindrecord = FileReader(mindrecord_file_name)\n verify_data(tfrecord_transformer, fr_mindrecord)\n\n os.remove(mindrecord_file_name)\n os.remove(mindrecord_file_name + \".db\")\n\n os.remove(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name))",
"def _createPredictionLogger(self):\n\n class DummyLogger:\n def writeRecord(self, record): pass\n def writeRecords(self, records, progressCB): pass\n def close(self): pass\n\n self._predictionLogger = DummyLogger()",
"def setup_logging(log_level=logging.INFO, log_filename=None) -> Logger:\n logger = logging.getLogger()\n\n # Set log format to dislay the logger name to hunt down verbose logging modules\n fmt = \"%(name)-25s %(levelname)-8s %(message)s\"\n\n # Use colored logging output for console\n coloredlogs.install(level=log_level, fmt=fmt, logger=logger)\n\n # Quiet some internal logs\n logging.getLogger(\"dex_ohlcv.eventscanner\").setLevel(logging.INFO)\n\n # Disable logging of JSON-RPC requests and reploes\n logging.getLogger(\"web3.RequestManager\").setLevel(logging.WARNING)\n logging.getLogger(\"web3.providers.HTTPProvider\").setLevel(logging.WARNING)\n # logging.getLogger(\"web3.RequestManager\").propagate = False\n\n # Disable all internal debug logging of requests and urllib3\n # E.g. HTTP traffic\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\n # IPython notebook internal\n logging.getLogger(\"asyncio\").setLevel(logging.WARNING)\n\n # Datadog tracer agent\n # https://ddtrace.readthedocs.io/en/stable/basic_usage.html\n logging.getLogger(\"ddtrace\").setLevel(logging.INFO)\n\n # Flooding of OpenAPI spec debug notes on startup\n logging.getLogger(\"openapi_spec_validator\").setLevel(logging.WARNING)\n\n if log_filename:\n # Append to the log file\n handler = logging.FileHandler(log_filename, 'w+')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n return logger",
"def enhanced_make_record(self, *args, **kwargs): \n rv = _makeRecord(self, *args, **kwargs)\n frame = inspect.currentframe().f_back\n while frame.f_back and (frame.f_code.co_filename.startswith(_django_path) or frame.f_code.co_filename.startswith(_logging_path) or frame.f_code.co_filename.startswith(_djangologging_path)):\n if frame.f_code.co_filename.startswith(_admin_path):\n break\n frame = frame.f_back\n \n source_lines = inspect.getsourcelines(frame)\n lineno = frame.f_lineno - source_lines[1]\n show = 5\n start, stop = max(0, lineno - show), lineno + show + 1\n rv.__dict__['source_lines'] = python_to_html(''.join(source_lines[0][start:stop]), source_lines[1] + start, [lineno - start + 1])\n rv.__dict__['local_variables'] = frame.f_locals.items()\n return rv",
"def load_logger(factory):\n def inner():\n web.ctx.logger = factory()\n return inner",
"def test_get_logger_basic():\n with patch('krux.logging.setup') as mock_setup:\n with patch('krux.logging.syslog_setup') as mock_syslog_setup:\n krux.logging.get_logger(TEST_LOGGER_NAME, syslog_facility=None, log_to_stdout=False)\n\n assert_true(not mock_setup.called)\n assert_true(not mock_syslog_setup.called)\n assert_true(not logging.getLogger(TEST_LOGGER_NAME).propagate)",
"def test_logging():\n assert logger.name == 'wellcomeml.logger'",
"def test_export_custom(self): # pylint: disable=no-self-use\n mock_record_str = Mock(str)\n\n def formatter(record): # pylint: disable=unused-argument\n return mock_record_str\n\n mock_stdout = Mock()\n exporter = ConsoleLogExporter(out=mock_stdout, formatter=formatter)\n log_data = LogData(\n log_record=LogRecord(),\n instrumentation_scope=InstrumentationScope(\n \"first_name\", \"first_version\"\n ),\n )\n exporter.export([log_data])\n mock_stdout.write.assert_called_once_with(mock_record_str)",
"def setUp(self):\n recorder = opentracing.tracer.recorder\n recorder.clear_spans()",
"def test_tfrecord_to_mindrecord_list_with_diff_type_exception():\n if not tf or tf.__version__ < SupportedTensorFlowVersion:\n # skip the test\n logger.warning(\"Module tensorflow is not found or version wrong, \\\n please use pip install it / reinstall version >= {}.\".format(SupportedTensorFlowVersion))\n return\n\n file_name_ = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]\n mindrecord_file_name = file_name_ + '.mindrecord'\n tfrecord_file_name = file_name_ + '.tfrecord'\n generate_tfrecord(tfrecord_file_name)\n assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name))\n\n feature_dict = {\"file_name\": tf.io.FixedLenFeature([], tf.string),\n \"image_bytes\": tf.io.FixedLenFeature([], tf.string),\n \"int64_scalar\": tf.io.FixedLenFeature([1], tf.int64),\n \"float_scalar\": tf.io.FixedLenFeature([1], tf.float32),\n \"int64_list\": tf.io.FixedLenFeature([6], tf.float32),\n \"float_list\": tf.io.FixedLenFeature([7], tf.float32),\n }\n\n if os.path.exists(mindrecord_file_name):\n os.remove(mindrecord_file_name)\n if os.path.exists(mindrecord_file_name + \".db\"):\n os.remove(mindrecord_file_name + \".db\")\n\n with pytest.raises(ValueError):\n tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name),\n mindrecord_file_name, feature_dict, [\"image_bytes\"])\n tfrecord_transformer.transform()\n\n if os.path.exists(mindrecord_file_name):\n os.remove(mindrecord_file_name)\n if os.path.exists(mindrecord_file_name + \".db\"):\n os.remove(mindrecord_file_name + \".db\")\n\n os.remove(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name))",
"def test_read_namespaced_build_log_log(self):\n pass",
"def test_patch_record(self):\n pass",
"def test_logging(self):\n self._verify_logging()",
"def default_logger_creator(config):\n return UnifiedLogger(config, logdir, loggers=None)",
"def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!",
"def record(*args, **kwargs):\n LOG.info(\"args={}, kwargs={}\".format(args, kwargs))",
"def create_logger(level=logging.DEBUG, record_format=None):\n if record_format is None:\n record_format = \"[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] ## %(message)s\"\n\n logger = logging.getLogger(\"mylogger\")\n logger.setLevel(level)\n # 修改\n fh.setLevel(level)\n ch.setLevel(level)\n formatter = logging.Formatter(record_format)\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger",
"def test_config_process_logger_external(get_config, default_config, monkeypatch, levelname, levelno):\n\n int_queue = []\n message = \"test\"\n conf = {**default_config('sys')}\n conf.update(external_logging=levelno)\n if levelname == 'DEBUG':\n conf.update(debug=True)\n cfg = get_config(SystemConfig, conf)\n monkeypatch.setattr(cfg.log.internal_queue, \"put\", lambda x: int_queue.append(x))\n\n assert levelname not in cfg.log.loggers, f'log with name {levelname} already created'\n logger = cfg.logger(levelname)\n\n try:\n log_call = getattr(logger, levelname.lower())\n except Exception:\n log_call = getattr(logger, 'error')\n log_call(message)\n expected = {\"msg\": message, \"lvl\": levelname}\n\n err = \"there should be queue handler + external handler, total of 2\"\n\n assert int_queue, \"internal queue has no messages\"\n assert len(logger.handlers) == 2, f\"{err}: {logger.handlers}\"\n assert isinstance(int_queue[0], Event), f\"wrong queue content: {int_queue}\"\n assert int_queue[0].data == expected, f\"wrong data in event: {int_queue[0]}\"",
"def test_init_logger(monkeypatch):\n monkeypatch.delenv(\"LOG_LEVEL\", raising=False)\n monkeypatch.delenv(\"LOG_OUTPUT\", raising=False)\n logger = helpers.init_logger()\n assert logger.getEffectiveLevel() == WARNING\n monkeypatch.setenv(\"LOG_LEVEL\", str(DEBUG))\n logger = helpers.init_logger()\n assert logger.getEffectiveLevel() == DEBUG",
"def setup_global_logging():\n\n global global_logging_started\n\n if global_logging_started:\n return\n\n orig_logger_class = logging.getLoggerClass()\n logging.setLoggerClass(StreamTeeLogger)\n try:\n stdout_logger = logging.getLogger(__name__ + '.stdout')\n stderr_logger = logging.getLogger(__name__ + '.stderr')\n finally:\n logging.setLoggerClass(orig_logger_class)\n\n stdout_logger.setLevel(logging.INFO)\n stderr_logger.setLevel(logging.ERROR)\n stdout_logger.set_stream(sys.stdout)\n stderr_logger.set_stream(sys.stderr)\n sys.stdout = stdout_logger\n sys.stderr = stderr_logger\n\n exception_logger = logging.getLogger(__name__ + '.exc')\n sys.excepthook = LoggingExceptionHook(exception_logger)\n\n logging.captureWarnings(True)\n\n rawinput = 'input'\n builtins._original_raw_input = getattr(builtins, rawinput)\n setattr(builtins, rawinput, global_logging_raw_input)\n\n global_logging_started = True",
"def create_tfrecords(\n input_data: Union[str, pd.DataFrame],\n output_dir: str,\n header: Optional[Union[str, int, Sequence]] = 'infer',\n names: Optional[Sequence] = None,\n runner: str = 'DirectRunner',\n project: Optional[str] = None,\n region: Optional[str] = None,\n tfrecorder_wheel: Optional[str] = None,\n dataflow_options: Optional[Dict[str, Any]] = None,\n job_label: str = 'create-tfrecords',\n compression: Optional[str] = 'gzip',\n num_shards: int = 0) -> Dict[str, Any]:\n\n df = to_dataframe(input_data, header, names)\n\n _validate_data(df)\n _validate_runner(df, runner, project, region, tfrecorder_wheel)\n\n logfile = os.path.join('/tmp', constants.LOGFILE)\n _configure_logging(logfile)\n\n\n integer_label = pd.api.types.is_integer_dtype(df[constants.LABEL_KEY])\n p = beam_pipeline.build_pipeline(\n df,\n job_label=job_label,\n runner=runner,\n project=project,\n region=region,\n output_dir=output_dir,\n compression=compression,\n num_shards=num_shards,\n tfrecorder_wheel=tfrecorder_wheel,\n dataflow_options=dataflow_options,\n integer_label=integer_label)\n\n result = p.run()\n\n if runner == 'DirectRunner':\n logging.info('Using DirectRunner - blocking until job completes.')\n result.wait_until_finish()\n\n row_count_filter = beam.metrics.MetricsFilter().with_name('row_count')\n good_image_filter = beam.metrics.MetricsFilter().with_name('image_good')\n bad_image_filter = beam.metrics.MetricsFilter().with_name('image_bad')\n\n row_count = _get_beam_metric(row_count_filter, result)\n good_image_count = _get_beam_metric(good_image_filter, result)\n bad_image_count = _get_beam_metric(bad_image_filter, result)\n\n # TODO(mikebernico): Profile metric impact with larger dataset.\n metrics = {\n 'rows': row_count,\n 'good_images': good_image_count,\n 'bad_images': bad_image_count,\n }\n\n job_result = {\n 'job_id': 'DirectRunner',\n 'metrics': metrics\n }\n logging.info(\"Job Complete.\")\n\n else:\n logging.info(\"Using Dataflow Runner.\")\n # Construct Dataflow URL\n\n job_id = result.job_id()\n\n url = (\n constants.CONSOLE_DATAFLOW_URI +\n region +\n '/' +\n job_id +\n '?project=' +\n project)\n job_result = {\n 'job_id': job_id,\n 'dataflow_url': url\n }\n\n logging.shutdown()\n\n if runner == 'DataflowRunner':\n # if this is a Dataflow job, copy the logfile to GCS\n common.copy_logfile_to_gcs(logfile, output_dir)\n\n return job_result",
"def test_tfrecord_to_mindrecord_scalar_with_2_exception():\n if not tf or tf.__version__ < SupportedTensorFlowVersion:\n # skip the test\n logger.warning(\"Module tensorflow is not found or version wrong, \\\n please use pip install it / reinstall version >= {}.\".format(SupportedTensorFlowVersion))\n return\n\n file_name_ = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]\n mindrecord_file_name = file_name_ + '.mindrecord'\n tfrecord_file_name = file_name_ + '.tfrecord'\n generate_tfrecord(tfrecord_file_name)\n assert os.path.exists(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name))\n\n feature_dict = {\"file_name\": tf.io.FixedLenFeature([], tf.string),\n \"image_bytes\": tf.io.FixedLenFeature([], tf.string),\n \"int64_scalar\": tf.io.FixedLenFeature([2], tf.int64),\n \"float_scalar\": tf.io.FixedLenFeature([1], tf.float32),\n \"int64_list\": tf.io.FixedLenFeature([6], tf.int64),\n \"float_list\": tf.io.FixedLenFeature([7], tf.float32),\n }\n\n if os.path.exists(mindrecord_file_name):\n os.remove(mindrecord_file_name)\n if os.path.exists(mindrecord_file_name + \".db\"):\n os.remove(mindrecord_file_name + \".db\")\n\n tfrecord_transformer = TFRecordToMR(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name),\n mindrecord_file_name, feature_dict, [\"image_bytes\"])\n with pytest.raises(ValueError):\n tfrecord_transformer.transform()\n\n if os.path.exists(mindrecord_file_name):\n os.remove(mindrecord_file_name)\n if os.path.exists(mindrecord_file_name + \".db\"):\n os.remove(mindrecord_file_name + \".db\")\n\n os.remove(os.path.join(TFRECORD_DATA_DIR, tfrecord_file_name))"
] | [
"0.6236432",
"0.595841",
"0.5396664",
"0.51976943",
"0.518653",
"0.51287144",
"0.51224285",
"0.510316",
"0.5082014",
"0.5011037",
"0.49948853",
"0.49832702",
"0.4978337",
"0.49604952",
"0.4953547",
"0.49422857",
"0.49270672",
"0.489807",
"0.48801783",
"0.48780963",
"0.4866809",
"0.48631355",
"0.48457012",
"0.483291",
"0.47984946",
"0.4778936",
"0.47585607",
"0.4750817",
"0.47483665",
"0.47461197"
] | 0.7394546 | 0 |
Function to perform PII scan | def performPII(filename):
print(f'***PERFORM PII: {filename}')
with open(filename, "r") as f:
fullfile = f.readlines()
result = []
for line in fullfile:
if REPLACE_TEXT in line:
res = re.sub(r"Users?\\+[a-zA-Z.0-9]+", r"Users\\<u>", line)
res1 = re.sub(r':[\s]?"[a-zA-Z]:', r":<d>", res)
result.append(res1)
filteredFile = os.path.join(DIRECTORY_TO_SCAN, OUTFILES + filename.split("/")[-1])
with open(filteredFile, "w") as fd:
for line in result:
fd.write(str(line))
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scan(self, mask):",
"def scan(self) -> List[int]:",
"def scan(self) -> List[int]:",
"def scan(self) -> list[int]:",
"def _scan(self): # pragma: no cover\n raise NotImplementedError()",
"def Scan(self, plugin):\n raise 'Method not implemented'",
"def script_scan(self):\n self._scanned = True\n return self._scanner.scan(self._ips, self._ports, arguments='-Pn -sn -sC')",
"def scan(self):\n return",
"def test_run_a_scan_on_sdp_subarray_in_low():",
"def computePValues(options,whole_mapped_data,mapped_data_per_size_per_register,phase,cycle):\n min_reads_mapped_to_a_phased_register=3\n min_reads_in_a_window=10\n chromosome_hits=[]\n for chromosome in sorted(mapped_data_per_size_per_register):\n chromosome_hits.append(chromosome)\n fhr=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest\",\"r\")\n fhw=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest.concentrated\",\"w\")\n for line in fhr:\n register,start,end=line.strip().split()\n register=int(register)\n start=int(start)\n end=int(end)\n \n begin=start\n #print(chromosome,register,start,end)\n sys.stdout.flush()\n while begin+(phase*min_reads_mapped_to_a_phased_register) <= end+1:\n finish=begin+(phase*cycle)-1\n \n k=0\n for i in range(begin,finish+1):\n #print(chromosome,register,i,phase,start,end)\n try:\n k+=mapped_data_per_size_per_register[chromosome][register][i]\n except KeyError:\n pass\n #print(\"Next\")\n if k<min_reads_mapped_to_a_phased_register: \n begin+=phase\n continue\n \n num_all_reads=0\n for i in range(begin,finish+1):\n try:\n num_all_reads+=whole_mapped_data[chromosome][i]\n except KeyError:\n pass\n if num_all_reads<min_reads_in_a_window:\n begin+=phase\n continue\n \n n=0\n \"\"\"print(\"reached here\")\n sys.stdout.flush()\"\"\"\n # register_i is an iterator different from register\n for register_i in sorted(mapped_data_per_size_per_register[chromosome]):\n for i in range(begin,finish+1):\n try:\n n+=mapped_data_per_size_per_register[chromosome][register_i][i]\n except KeyError:\n pass\n \"\"\"if chromosome==\"Chr1\":\n print(str(n)+\" \"+str(num_all_reads)+\"\\n\")\"\"\"\n if n/num_all_reads<0.3:\n begin+=phase\n continue\n m=cycle*2\n pvalue=0\n for x in range(k,m+1):\n numerator=nCr((phase-1)*m,n-x)*nCr(m,x)\n pvalue+=numerator\n denominator=nCr(phase*m,n)\n pvalue=pvalue/denominator\n #print(chromosome,begin,finish,k,n,m,num_all_reads,pvalue,n/num_all_reads)\n if pvalue>=options.pvalue_cutoff:\n begin+=phase\n continue\n stuffs_to_be_printed_to_file=[register,begin,finish,k,n,m,num_all_reads,n/num_all_reads,pvalue]\n fhw.write(\"\\t\".join(map(str,stuffs_to_be_printed_to_file))+\"\\n\")\n sys.stdout.flush()\n begin+=phase",
"def scan(self) -> Collection[int]:\n ...",
"def scan_for_potentiate(self, data, percentage, outline):\n for i in self.register_and_potentiate:\n pass\n #todo/t self.feature_present_or_not(data,",
"def test_run_a_scan_on_sdp_subarray_in_mid():",
"def PETImageProcess(PET_Scan):\n PET_Scan = normalise(PET_Scan)\n return PET_Scan",
"def scan(self, param):\n\t\tself.left(355)",
"def scan_item(barcode):\n return scanner.scan(barcode)",
"def fast_scan(self):\n self._scanned = True\n return self._scanner.scan(self._ips, self._ports)",
"def calculate_pI_from_file(file, output_dir, cutoff_pi, out_CSV_pi):\n modifications = define_seq_modifications()\n count_sequences_done = 0\n total_start_time = time.time()\n with open(file, \"r\") as handle:\n for record in SeqIO.parse(\n handle,\n \"fasta\",\n alphabet=IUPAC.protein\n ):\n record_list = record.description.split(\"|\")\n # get meta data\n res = get_record_meta(record_list)\n acc_code, organism, EC_code, species, note = res\n # get unmodified pI\n seq_obj = ProteinAnalysis(''.join(record.seq))\n pi = seq_obj.isoelectric_point()\n count_sequences_done += 1\n modifier = '0'\n if pi < cutoff_pi:\n category = '0'\n else:\n category = '1'\n # output to CSV\n output_pI_row(output_dir, out_CSV_pi, file,\n acc_code, organism, EC_code,\n species, note,\n pi, modifier, category)\n\n # if the category is 1 - i.e. pi > cutoff\n # then we test modification\n if category == '1':\n modifier = '1'\n # get modified pI\n seq = record.seq\n # replace target amino acid residue\n # with replacement amino acid residue\n # one letter codes\n targ = convert_to_one_letter_code_sing(\n modifications[modifier]['target_res']\n )\n replacement = convert_to_one_letter_code_sing(\n modifications[modifier]['replace_res']\n )\n mod_seq = ''.join(seq).replace(targ, replacement)\n seq_obj = ProteinAnalysis(mod_seq)\n pi = seq_obj.isoelectric_point()\n count_sequences_done += 1\n if pi < cutoff_pi:\n category = '0'\n else:\n category = '1'\n # output to CSV\n output_pI_row(output_dir, out_CSV_pi, file,\n acc_code, organism, EC_code,\n species, note,\n pi, modifier, category)\n # break\n print(\n '--- finished %s sequences in %s seconds ---'\n % (count_sequences_done, '{0:.2f}'.format(\n time.time() - total_start_time\n )))",
"def test_get_scan(self):\n pass",
"def _scan_profilewidth(self,handle,consumer):\n read_and_call(handle, consumer.profilewidth, contains=\"Nseqs1\")",
"def scan_id(barcode):\n return scanner.scan(barcode)",
"def __scan_progressive__(self, hosts, ports, arguments, callback, sudo):\n try:\n scan_data = self._nm.scan(hosts, ports, arguments, sudo)\n except PortScannerError:\n scan_data = None\n\n if callback is not None:\n callback(hosts, scan_data)",
"def __init__(self, n_components=2, method='pca', rows_to_scan='all'):\n self.n_components = n_components\n self.method = method\n self.rows_to_scan = rows_to_scan",
"def Scan(self, argin):\n handler = self.get_command_object(\"Scan\")\n handler(argin)",
"def getScan(self, i=0, channel=0, isolate=True, scan_width=50e-6,\\\n calibrated=False, resample=False, plot=False,\n fsu=None, FUOFFSET=False):\n if fsu is None:\n if self.ftk_fsu=='FSUA':\n fsu='FSUB'\n else:\n fsu='FSUA'\n\n if self.scan_nscans==0:\n return None\n if i<0 or i>self.scan_nscans-1:\n print 'error: not such scans (nscans=',self.scan_nscans,')'\n return -1\n wi = self.scanGetStampsImaging(i)\n wo = self.scanGetStampsOpdc(i)\n\n if FUOFFSET:\n dlfdbck = lambda x: np.interp(x,\n self.raw[self.scan_opdc].data.field('TIME')[wo],\n self.raw[self.scan_opdc].data.field('FUOFFSET')[wo])\n else:\n # was DLFDBACK before\n dlfdbck = lambda x: np.interp(x,\n self.raw[self.scan_opdc].data.field('TIME')[wo],\n self.raw[self.scan_opdc].data.field(self.scan_DL)[wo]-\n self.raw[self.scan_opdc].data.field(self.scan_DL)[wo].mean())\n \n ### time tags during scan:\n t = self.raw['IMAGING_DATA_'+fsu].data.field('TIME')[wi]\n t = np.clip(t,self.raw[self.scan_opdc].data.field('TIME')[wo].min(),\n self.raw[self.scan_opdc].data.field('TIME')[wo].max())\n x = dlfdbck(t) # interpolate for imaging times\n opdc_state = np.interp(t, self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'))\n print 'MEAN OPDC STATE:', opdc_state.mean()\n n_samples_per_fringes = 2.15e-6/np.median(np.abs(np.diff(x)))\n\n pdsnr = self.raw['IMAGING_DATA_'+fsu.upper()].data.field('PDSNR')[wi[0]]\n pd = self.raw['IMAGING_DATA_'+fsu.upper()].data.field('PD')[wi[0]]\n \n if channel>=0:\n fringesA = self.raw['IMAGING_DATA_'+fsu.upper()].\\\n data.field('DATA1')[wi[0],channel]\n fringesB = self.raw['IMAGING_DATA_'+fsu.upper()].\\\n data.field('DATA2')[wi[0],channel]\n fringesC = self.raw['IMAGING_DATA_'+fsu.upper()].\\\n data.field('DATA3')[wi[0],channel]\n fringesD = self.raw['IMAGING_DATA_'+fsu.upper()].\\\n data.field('DATA4')[wi[0],channel]\n if 'DATA5' in [c.name for c in self.raw['IMAGING_DATA_'+fsu.upper()].data.columns]:\n print 'removing dark pixel'\n No = self.raw['IMAGING_DATA_'+fsu.upper()].data.field('DATA5')[wi[0],:]\n \n No -= No.mean(axis=0)[np.newaxis,:]\n No = No.mean(axis=1)\n \n fringesA -= No*(5 if channel==0 else 1)\n fringesB -= No*(5 if channel==0 else 1)\n fringesC -= No*(5 if channel==0 else 1)\n fringesD -= No*(5 if channel==0 else 1)\n else:\n No=None\n \n if calibrated:\n ### Sahlmann et al. A&A (2008), eq. 2\n fringesA -= self.fsu_calib[(fsu.upper(), 'DARK')][channel,0]\n fringesA /= (self.fsu_calib[(fsu.upper(), 'FLAT')][channel,0]-\n 2*self.fsu_calib[(fsu.upper(), 'DARK')][channel,0])\n fringesB -= self.fsu_calib[(fsu.upper(), 'DARK')][channel,1]\n fringesB /= (self.fsu_calib[(fsu.upper(), 'FLAT')][channel,1]-\n 2*self.fsu_calib[(fsu.upper(), 'DARK')][channel,1])\n fringesC -= self.fsu_calib[(fsu.upper(), 'DARK')][channel,2]\n fringesC /= (self.fsu_calib[(fsu.upper(), 'FLAT')][channel,2]-\n 2*self.fsu_calib[(fsu.upper(), 'DARK')][channel,2])\n fringesD -= self.fsu_calib[(fsu.upper(), 'DARK')][channel,3]\n fringesD /= (self.fsu_calib[(fsu.upper(), 'FLAT')][channel,3]-\n 2*self.fsu_calib[(fsu.upper(), 'DARK')][channel,3])\n else:\n # return only the colored channels\n fringesA = self.raw['IMAGING_DATA_'+fsu].\\\n data.field('DATA1')[wi[0],1:]\n fringesB = self.raw['IMAGING_DATA_'+fsu].\\\n data.field('DATA2')[wi[0],1:]\n fringesC = self.raw['IMAGING_DATA_'+fsu].\\\n data.field('DATA3')[wi[0],1:]\n fringesD = self.raw['IMAGING_DATA_'+fsu].\\\n data.field('DATA4')[wi[0],1:]\n if calibrated:\n fringesA -= self.fsu_calib[(fsu.upper(), 'DARK')]\\\n [1:,0][np.newaxis,:]\n fringesA /= self.fsu_calib[(fsu.upper(), 'FLAT')]\\\n [1:,0][np.newaxis,:]\n fringesB -= self.fsu_calib[(fsu.upper(), 'DARK')]\\\n [1:,1][np.newaxis,:]\n fringesB /= self.fsu_calib[(fsu.upper(), 'FLAT')]\\\n [1:,1][np.newaxis,:]\n fringesC -= self.fsu_calib[(fsu.upper(), 'DARK')]\\\n [1:,2][np.newaxis,:]\n fringesC /= self.fsu_calib[(fsu.upper(), 'FLAT')]\\\n [1:,2][np.newaxis,:]\n fringesD -= self.fsu_calib[(fsu.upper(), 'DARK')]\\\n [1:,3][np.newaxis,:]\n fringesD /= self.fsu_calib[(fsu.upper(), 'FLAT')]\\\n [1:,3][np.newaxis,:]\n \n if isolate: # isolate fringe packet\n if self.verbose:\n print 'truncating scans'\n\n # isolate fringe packet\n width = scan_width\n fri = (fringesA-fringesB)**2 + (fringesC-fringesD)**2\n if channel<0:\n fri = np.mean(fri, axis=1)\n w = np.where(np.abs(x-x[fri.argmax()])<=2*width)\n x = x[w]\n t = t[w]\n pdsnr=pdsnr[w]\n pd = pd[w]\n if channel<0:\n fringesA=fringesA[w[0],:]\n fringesB=fringesB[w[0],:]\n fringesC=fringesC[w[0],:]\n fringesD=fringesD[w[0],:]\n else:\n fringesA=fringesA[w]\n fringesB=fringesB[w]\n fringesC=fringesC[w]\n fringesD=fringesD[w]\n if not No is None:\n No = No[w[0],:]\n \n if resample and channel>=0: #resample, only for single channel\n if resample==2:\n n_opt = int(2**(np.ceil(np.log2(len(fringesA)))))\n print 'resampling from ',len(fringesA), \" to \", n_opt\n else:\n n_opt = len(fringesA)\n opd = np.arange(x.min(), x.max(), np.ptp(x)/float(n_opt))\n\n if x[0]>x[-1]:\n fringesA = np.interp(opd, x[::-1], fringesA[::-1])\n fringesB = np.interp(opd, x[::-1], fringesB[::-1])\n fringesC = np.interp(opd, x[::-1], fringesC[::-1])\n fringesD = np.interp(opd, x[::-1], fringesD[::-1])\n else:\n fringesA = np.interp(opd, x, fringesA)\n fringesB = np.interp(opd, x, fringesB)\n fringesC = np.interp(opd, x, fringesC)\n fringesD = np.interp(opd, x, fringesD)\n # --- done resampling\n else:\n opd = x\n \n if plot:\n opd *= 1e6\n \n X = (fringesA-fringesC)/np.sqrt(2)\n Y = (fringesB-fringesD)/np.sqrt(2)\n N = (fringesA + fringesB + fringesC + fringesD )/2.\n Z = (fringesA - fringesB + fringesC - fringesD )/2.\n\n print 'X =', [round(x,3) for x in [1/np.sqrt(2), 0, -1/np.sqrt(2), 0 ]]\n print 'Y =', [round(x,3) for x in [0,1/np.sqrt(2), 0,-1/np.sqrt(2)]]\n \n ### Sahlmann et al 2008, Eq. 4 to 6\n __alpha = self.fsu_calib[(fsu.upper(), 'PHAS')][channel,0]\n __beta = self.fsu_calib[(fsu.upper(), 'PHAS')][channel,1]\n __gamma = self.fsu_calib[(fsu.upper(), 'PHAS')][channel,2]\n __delta = self.fsu_calib[(fsu.upper(), 'PHAS')][channel,3] \n Csc = (__beta*__gamma - __alpha*__delta)/2\n Xp = (X*__gamma - Y*__alpha)/Csc\n Yp = (Y*__beta - X*__delta)/Csc\n \n print 'Xp=', [round(x/Csc,3) for x in [__gamma, -__alpha, -__gamma, __alpha]]\n print 'Yp=', [round(x/Csc,3) for x in [-__delta, __beta, __delta, -__beta]]\n \n \n print 'orthogonality:', 2*(__gamma*__delta+__alpha*__beta)/\\\n (np.sqrt(2*__gamma**2+2*__alpha**2)*np.sqrt(2*__beta**2+2*__delta**2))\n \n dec = pca.pca(np.transpose(np.array([fringesA, fringesB,\n fringesC, fringesD])))\n for k in range(4):\n print 'C'+str(k)+'=', [round(x,3) for x in list(dec.base[:,k])],\n print np.sqrt((dec.base[:,k]**2).sum())\n plt.figure(0, figsize=(6,10))\n plt.clf()\n plt.subplots_adjust(wspace=0.05, top=0.95,hspace=0,\n left=0.1,right=0.98,bottom=0.05)\n \n ax1 = plt.subplot(611) # -- RAW DATA\n plt.plot(opd, fringesA, 'r', label='A', linewidth=2, alpha=0.5)\n plt.plot(opd, fringesB, 'g', label='B', linewidth=2, alpha=0.5)\n plt.plot(opd, fringesC, 'b', label='C', linewidth=2, alpha=0.5)\n plt.plot(opd, fringesD, 'm', label='D', linewidth=2, alpha=0.5)\n \n plt.legend(ncol=2, prop={'size':8}, loc='upper left')\n\n ax2 = plt.subplot(612, sharex=ax1) # == traditional ABCD\n plt.plot(opd, N/2, label='N/2=(A+B+C+D)/4',\n linewidth=2, alpha=0.5, color='k')\n plt.plot(opd, X, label='X=(A-C)/$\\sqrt{2}$',\n linewidth=2, alpha=0.5, color='r')\n plt.plot(opd, Y, label='Y=(B-D)/$\\sqrt{2}$',\n linewidth=2, alpha=0.5, color='g')\n plt.plot(opd, Z, label='Z=(A-B+C-D)/2',\n linewidth=2, alpha=0.5, color='b')\n plt.legend(ncol=1, prop={'size':8}, loc='upper left')\n plt.ylim(-1.5,2)\n \n plt.subplot(613, sharex=ax1, sharey=ax2) # == PRIMA corrected ABCD\n cols = ['k', 'r', 'g', 'b']\n plt.plot(opd, Xp, label='$X_p$',\n color=cols[1], linewidth=2, alpha=0.5)\n plt.plot(opd, Yp, label='$Y_p$',\n color=cols[2], linewidth=2, alpha=0.5)\n plt.legend(ncol=1, prop={'size':8}, loc='upper left')\n\n plt.subplot(614, sharex=ax1, sharey=ax2) # == PCA\n for k in range(4):\n plt.plot(opd, dec.coef[:,k]/(-2 if k==0 else 1.),\n label='PCA: $C_'+str(k)+('$/2' if k==0 else '$'),\n linewidth=2, alpha=0.5, color=cols[k])\n plt.ylim(-1.5,2)\n plt.legend(ncol=1, prop={'size':8}, loc='upper left')\n \n plt.subplot(615, sharex=ax1) # == SNR\n SNR1 = (Xp**2+Yp**2)\n SNR1 /= np.median(SNR1) # original\n SNR2 = (Xp**2/(__gamma**2+__alpha**2)+\n Yp**2/(__beta**2+__delta**2))\n SNR2 /= np.median(SNR2) # corrected\n SNR3 = dec.coef[:,1]**2+dec.coef[:,2]**2\n SNR3 /= np.median(SNR3)\n \n plt.plot(opd, SNR1, '-k', label='$X_p^2+Y_p^2$')\n plt.plot(opd, SNR2, '-r', label='$X_p^2+Y_p^2$ corr.')\n \n plt.plot(opd, pdsnr, '-', color='0.2',label='PDSNR',\n linewidth=2, alpha=0.5)\n \n plt.plot(opd, SNR3, '-y', label='$C_1^2+C_2^2$',\n alpha=0.8, linewidth=2)\n plt.legend(prop={'size':8}, loc='upper left')\n \n # ---\n plt.subplot(616, sharex=ax1) # == Phases\n phi_orig = np.arctan2(Yp, Xp) # FSU formula\n phi_orig = np.unwrap(phi_orig)\n phi_orig -= phi_orig.mean()%(2*np.pi) \n plt.plot(opd, phi_orig, 'r', label='arctan2($Y_p$,$X_p$)')\n \n phi = np.arctan2(X, Y) # usual ABCD\n phi = np.unwrap(phi)\n if np.sign(phi[0]-phi[-1]) != np.sign(phi_orig[0]-phi_orig[-1]):\n phi *= -1\n phi -= phi.mean()%(2*np.pi) \n plt.plot(opd, phi, 'k', label='phase from X,Y')\n \n phi = np.arctan2(dec.coef[:,1], dec.coef[:,2]) # PCA\n phi = np.unwrap(phi)\n if np.sign(phi[0]-phi[-1]) != np.sign(phi_orig[0]-phi_orig[-1]):\n phi *= -1\n phi -= phi.mean()%(2*np.pi) \n \n plt.plot(opd, phi, 'y', label='phase from PCA')\n \n plt.xlabel('DL feedback ($\\mu m$) - '+\n str(self.raw[self.scan_opdc].data.field(self.scan_DL)[wo].mean())+' (m)')\n plt.legend(prop={'size':8}, loc='upper left')\n \n plt.figure(1)\n plt.clf()\n ax1 = plt.subplot(141)\n ax1.psd(fringesA, NFFT=len(fringesA), color='r', label='A', linewidth=2, alpha=0.5)\n ax1.psd(fringesB, NFFT=len(fringesA), color='g', label='B', linewidth=2, alpha=0.5)\n ax1.psd(fringesC, NFFT=len(fringesA), color='b', label='C', linewidth=2, alpha=0.5)\n ax1.psd(fringesD, NFFT=len(fringesA), color='m', label='D', linewidth=2, alpha=0.5)\n ax1.legend(prop={'size':8})\n ax2 = plt.subplot(142, sharex=ax1, sharey=ax1)\n ax2.psd(N/2, NFFT=len(fringesA), color='k', label='N/2', linewidth=2, alpha=0.5)\n ax2.psd(X, NFFT=len(fringesA), color='r', label='X', linewidth=2, alpha=0.5)\n ax2.psd(Y, NFFT=len(fringesA), color='g', label='Y', linewidth=2, alpha=0.5)\n ax2.psd(Z, NFFT=len(fringesA), color='b', label='Z', linewidth=2, alpha=0.5)\n ax2.legend(prop={'size':8})\n ax2.set_ylabel('')\n ax3 = plt.subplot(143, sharex=ax1, sharey=ax1)\n ax3.psd(Xp, NFFT=len(fringesA), color='r', label='Xp', linewidth=2, alpha=0.5)\n ax3.psd(Yp, NFFT=len(fringesA), color='g', label='Yp', linewidth=2, alpha=0.5)\n ax3.legend(prop={'size':8})\n ax3.set_ylabel('')\n ax4 = plt.subplot(144, sharex=ax1, sharey=ax1)\n for k in range(4):\n plt.psd(dec.coef[:,k]/(-2 if k==0 else 1.),NFFT=len(fringesA),\n label='PCA: $C_'+str(k)+('$/2' if k==0 else '$'),\n linewidth=2, alpha=0.5, color=cols[k])\n ax4.legend(prop={'size':8})\n ax4.set_ylabel('')\n\n ######################################\n else:\n return t, opd, fringesA, fringesB, fringesC, fringesD",
"def test_get_scans(self):\n pass",
"def scan(input, policy, input_encoding=DEFAULT_ENCODING,\n output_encoding=DEFAULT_ENCODING):\n return CleanResults()",
"def test_p2p_device_concurrent_scan(dev, apdev):\n with HWSimRadio(use_p2p_device=True) as (radio, iface):\n wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')\n wpas.interface_add(iface)\n wpas.p2p_find()\n time.sleep(0.1)\n wpas.request(\"SCAN\")\n ev = wpas.wait_event([\"CTRL-EVENT-SCAN-STARTED\"], timeout=15)\n if ev is None:\n raise Exception(\"Station mode scan did not start\")",
"def test_scan_file(self):\n self.run_scan(self.filename, 1)",
"def scan(controller, path):"
] | [
"0.65596026",
"0.62054896",
"0.62054896",
"0.6164332",
"0.5998671",
"0.59204054",
"0.5813969",
"0.5758621",
"0.56847495",
"0.56796247",
"0.56688553",
"0.5576477",
"0.54695153",
"0.5447298",
"0.54414886",
"0.5413166",
"0.5409423",
"0.5366492",
"0.53627986",
"0.52971727",
"0.5278877",
"0.52353245",
"0.52352333",
"0.5234122",
"0.52099663",
"0.52059793",
"0.51952493",
"0.518227",
"0.51715004",
"0.5159538"
] | 0.62964374 | 1 |
Prepend a value to the beginning of the list. | def prepend(self, value):
if self.head is None:
self.head = Node(value)
return
new_node = Node(value)
new_node.next = self.head
self.head = new_node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prepend(self, value):\n pass",
"def prepend(self, item: Any):\n self.insert(0, item)",
"def prepend(self, x):\n self.insert(0, x)",
"def prepend(self, *args):\n return _libsbml.ListWrapperSBase_prepend(self, *args)",
"def prepend(self, in_items):\n\n items = self.list\n in_items.extend(items)\n self.value = self.__class__.SEPARATOR.join(in_items)",
"def prepend(value, iterable):\n yield value\n yield from iterable",
"def add_first(self, value):\n self.__list = [value] + self.__list\n return self.__list",
"def prepend(self, sequence):\n self._add_sequence(0, sequence)",
"def prepend(self, data):\n new_head = DListNode(data=data, next=self.head)\n if self.head:\n self.head.prev = new_head\n self.head = new_head",
"def prepend(self, element):\n temp = Node(element, None, self.head)\n self.size += 1\n if self.size <= 1:\n self.tail = self.head\n self.head = temp",
"def prepend(self ,data):\n new_head = DoublyLLNode(data = data, next = self.head)\n if self.head:\n self.head.prev = new_head\n self.head = new_head",
"def prepend(self, data):\n self.head = SinglyLLNode(data=data , next=self.head)",
"def push_front(self, value):\n new_node = self.Node(value)\n\n # Edge Case : List is empty\n if self._size == 0:\n self._tail = new_node\n self._head = new_node\n self._size += 1\n return\n\n new_node.next = self._head\n self._head.prev = new_node\n self._head = new_node\n self._size += 1",
"def push(self, item):\n self.list.prepend(item)",
"def prepend(self, node):\n if not isinstance(node, Node):\n # If the node parameter is not a Node then update it\n # to refer to one.\n node = Node(node)\n\n # The new node will store the current first_node in it's next attribute.\n node.next = self.first_node\n # Update the first_node reference to the new node.\n self.first_node = node",
"def prepend(self, data):\n new_node = SingleNode(data)\n new_node.next = self.head\n self.head = new_node",
"def addAtHead(self, val):\n self.nums.insert(0, val)",
"def prepend(self, data):\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n return\n new_node.next = self.head\n self.head = new_node",
"def prepend(self, data):\n new_node = Node(data)\n\n new_node.next = self.head\n self.head = new_node",
"def add_first(self, value):\n self.head = Node(value, self.head)",
"def prepend(self, *args):\n return _libsbml.ListWrapperDate_prepend(self, *args)",
"def prepend(self, *args):\n return _libsbml.ListWrapperCVTerm_prepend(self, *args)",
"def addToFront(self, value):\n self._growCheck()\n super().addToFront(value)",
"def prepend_path(path, paths):\n\n if path in paths: paths.remove(path)\n paths.insert(0, path)",
"def prepend_element(self, element):\n\n pass",
"def push(self, item):\n self.linked_list.prepend(item)",
"def prepend(self, *args):\n return _libsbml.ListWrapperModelCreator_prepend(self, *args)",
"def push_front(self, val: Generic[T]) -> None:\n first_node = self.node.next\n\n self.node.next = Node(val)\n latest_first = self.node.next\n\n latest_first.prev = self.node #pushes the node to the front\n latest_first.next = first_node\n first_node.prev = latest_first #rearranges the list",
"def push_front(self, value):\n node = DLLNode(value)\n if self.head is None:\n self.tail = node \n else: \n self.head.prev_node = node \n node.next_node = self.head\n self.head = node",
"def __add_first(self, value):\n node = self.Node(value, self.__head)\n if self.__head == None: # when this is the first element being added,\n self.__last = node # set the last pointer to this new node\n self.__head = node\n self.__length += 1"
] | [
"0.8317461",
"0.7703864",
"0.76706845",
"0.74701613",
"0.7348015",
"0.7225708",
"0.7132274",
"0.689888",
"0.6890011",
"0.687966",
"0.6816932",
"0.6731809",
"0.67170197",
"0.6663008",
"0.6539581",
"0.6505828",
"0.65012324",
"0.6495",
"0.64920115",
"0.6425391",
"0.6424154",
"0.6373845",
"0.6365845",
"0.6357998",
"0.63344073",
"0.6333673",
"0.6322541",
"0.6291678",
"0.62764174",
"0.62690943"
] | 0.77495795 | 1 |
Search the linked list for a node with the requested value and return the node. | def search(self, value):
node = self.head
while node:
if node.value == value:
return node
node = node.next
raise ValueError('Value not found') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_node(self, value):\n cur = self.first\n while cur:\n if cur.value == value:\n return cur\n cur = cur.next\n return None",
"def search(self, val):\n current = self.head\n # import pdb; pdb.set_trace()\n while current is not None:\n if current.data == val:\n return current\n current = current.next_node\n return None",
"def search(self, val):\n if not self.head:\n raise IndexError('Cannot search empty list.')\n\n current_node = self.head\n\n while current_node:\n if current_node.val == val:\n return current_node\n current_node = current_node.next",
"def search(self, val):\n search = self.head\n while search:\n if search.val == val:\n return search\n search = search.next\n return None",
"def search(self, val):\n current = self.head\n found = False\n while current and not found:\n if current.val == val:\n found = True\n return current\n current = current.next\n return None",
"def find_node(self, value):\n for (fun, node) in self.__root.__fast_find:\n if fun(value):\n return node\n return None",
"def find(self, value):\n # initialize node as root\n node = self.root\n\n # find value\n while node != None:\n\n # value found: return node\n if node.value == value:\n return node\n\n # value is smaller than node: search in left sub tree\n elif node.value > value:\n node = node.left\n\n # value is bigger than node: search in right sub tree\n else:\n node = node.right\n\n # value not found: return None\n return None",
"def search(self, key):\n\n current = self.head\n\n while current:\n if current.data == key:\n return current\n else:\n current = current.next_node\n\n return None",
"def search(self, value):\n return self._search(self.head, value)",
"def search(self, val):\n currentNode = self.rootNode\n while True:\n if currentNode is None:\n print(\"Number not found.\")\n return None\n elif currentNode.val == val:\n print(\"Number found.\")\n return currentNode\n elif currentNode.val < val:\n currentNode = currentNode.right\n else:\n currentNode = currentNode.left",
"def _search(cls, node, value):\n if node is None:\n return False\n\n if node.value == value:\n return True\n\n return cls._search(node.next_, value)",
"def __getitem__(self, value) -> Node:\n self.value = value\n self.next_value = None\n if value in map(lambda x: x.value, self.nodes):\n return value\n\n else:\n return False",
"def search(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n current_node = self._root\n while current_node:\n if val == current_node._data:\n return current_node\n if val > current_node._data:\n current_node = current_node._rkid\n else:\n current_node = current_node._lkid\n return",
"def find(self, item):\n current = self.head\n while current.next != None:\n if current.data == item:\n return current\n current = current.next",
"def binary_search(node, value):\n aux_node = None\n while node is not None and node.value != value:\n if value < node.value:\n aux_node = node.left\n node = aux_node\n else:\n aux_node = node.right\n node = aux_node\n return node if node.value == value else None",
"def find(self, key):\n curr_node = self.head\n\n while curr_node is not None: # a normal traversal and checking first match\n if curr_node.data == key:\n return curr_node\n curr_node = curr_node.next\n\n return None",
"def search(self, val):\n search_through = self.head\n while search_through:\n if val == search_through.data:\n return search_through\n else:\n search_through = search_through.next\n return search_through",
"def _find(self, val, cur_node):\n if val == cur_node.data:\n return cur_node\n elif val > cur_node.data:\n if not cur_node.right:\n return None\n return self._find(val, cur_node.right)\n elif val < cur_node.data:\n if not cur_node.left:\n return None\n return self._find(val, cur_node.left)",
"def find(self, val):\n\n\t\tif not self.root:\n\t\t\treturn None\n\n\t\tQ = [self.root]\n\t\twhile Q:\n\t\t\tnode = Q.pop(0)\n\n\t\t\tif node.val == val:\n\t\t\t\treturn node\n\n\t\t\tif node.left:\n\t\t\t\tQ.append(node.left)\n\n\t\t\tif node.right:\n\t\t\t\tQ.append(node.right)\n\n\t\treturn None",
"def find(self, number):\n cur_node = self.head\n while cur_node is not None:\n if number == cur_node.data.number():\n return cur_node.data\n cur_node = cur_node.next\n return -1",
"def findNode(self, target: hash.hash.Hash):\n for bucket in self.buckets:\n if bucket.inRange(nodeID):\n for node in bucket:\n if node.hash == target:\n return node\n \n return None\n return None",
"def look_up(self, val):\n index = 0\n if self.head is None:\n print(\"List is empty\")\n start = self.head\n while start is not None:\n if start.data == val:\n return index\n start = start.next\n index += 1\n return \"No such element\"",
"def get_node(self, value):\n\t\treturn self.adjacency_list[value]",
"def search(self, val):\n if not self.root:\n return None\n else:\n return self._find(val, self.root)",
"def get_child(self, value):\n for node in self.children:\n if node.value == value:\n return node\n\n return None",
"def search(self, key):\n if key in self.key_list:\n return (self.nodes)[key]\n return None",
"def _Node_findNodeByVal(node_ptr, val):\n pybtlib.findNodeByVal.restype = ctypes.POINTER(ctypes.POINTER(Node))\n pybtlib.findNodeByVal.argtypes = [ctypes.POINTER(ctypes.POINTER(Node)),\n ctypes.c_int]\n return pybtlib.findNodeByVal(ctypes.byref(node_ptr), val)",
"def find_node(node, v):\n while node.value != v:\n node = node.right\n return node",
"def find_node(self, node):\r\n for tree_node in self.traverse():\r\n if tree_node.content == node:\r\n return tree_node\r\n raise LookupError(\"Given node does not exist on the tree\")",
"def getItem(self, value):\r\n # If the tree contains no items, return false\r\n if self.empty():\r\n return False\r\n\r\n # If the returned node is False, it wasn't found and an error should be given and False returned\r\n node = self.descend_to_node(value)\r\n if node:\r\n return node.object\r\n else:\r\n print(\"Value\", value, \"not found.\")\r\n return False"
] | [
"0.86444634",
"0.8273496",
"0.8144617",
"0.8143377",
"0.8091542",
"0.8027057",
"0.77055156",
"0.75110245",
"0.7502115",
"0.74791056",
"0.74587744",
"0.7358435",
"0.72910225",
"0.7215687",
"0.71858066",
"0.71661836",
"0.7138784",
"0.7113469",
"0.7101064",
"0.7096889",
"0.7041299",
"0.70312685",
"0.70092255",
"0.69733423",
"0.696865",
"0.6952157",
"0.6938515",
"0.6889003",
"0.68350226",
"0.683027"
] | 0.8764713 | 0 |
Insert value at pos position in the list. If pos is larger than the length of the list, append to the end of the list. | def insert(self, value, pos):
if self.head is None:
self.head = Node(value)
return
if pos == 0:
self.prepend(value)
return
index = 0
node = self.head
while node.next and index <= pos:
if (pos - 1) == index:
new_node = Node(value)
new_node.next = node.next
node.next = new_node
return
index += 1
node = node.next
else:
self.append(value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert(self, pos, item):\n \n if pos == 0:\n self.add(item)\n \n elif pos >= self.length():\n self.append(item)\n \n else:\n previous = None\n current = self.head\n \n for _ in range(pos):\n previous = current\n current = current.get_next()\n \n n = Node(item)\n previous.set_next(n)\n n.set_next(current)",
"def insert(self, pos, data):\n assert pos >= 0\n if pos >= self.size(): # todo: support to insert node in end of the list\n raise Exception(\"pos:%d is out of index:%d\" % (pos, self.size()-1))\n\n last = None\n current = self.head\n count = -1\n while current is not None:\n count += 1\n if count == pos:\n node = Node(data)\n\n if last is None:\n node.next = self.head\n self.head = node\n else:\n node.next = current\n last.next = node\n\n return\n\n last = current\n current = current.next",
"def insert(self, pos, element):\n if pos <= 0:\n self.add(element)\n elif pos >= self.length():\n self.append(element)\n else:\n node = Node(element)\n cursor = self.head\n for i in range(pos-1):\n cursor = cursor.next\n node.next = cursor.next\n node.prev = cursor\n cursor.next.prev = node\n cursor.next = node",
"def insert(self, pos, length):\n if pos in self.insertions:\n self.insertions[pos] += length\n else:\n self.insertions[pos] = length",
"def insert(self, pos, value):\n\t\titems = self.__dict__.values()\n\t\tif not isinstance(pos, int) or pos < 0:\n\t\t\traise ValueError(\"'pos' value is not positive integer.\")\n\t\telif pos > len(items):\n\t\t\traise ValueError(\"'pos' value is not a position in self.__dict__\")\n\t\titems.insert(pos, value)\n\t\tnew_dict = {}\n\t\tfor x, y in enumerate(items):\n\t\t\tnew_dict.update({x: y})\n\t\tself.__dict__ = new_dict",
"def insert(self, n, pos):\n if pos == 0:\n self.cons(n)\n else:\n prev = self.index(pos-1)\n next = prev.next\n prev.next = n\n n.next = next\n self.len += 1",
"def ListInsert(raw_list,insert_indice,value = None,padding = None):\n length = len(raw_list)\n if insert_indice+1 <= length:\n raw_list[insert_indice] = value\n else:\n for i in range(length,insert_indice):\n raw_list.append(padding)\n raw_list.append(value)",
"def insert(self,x,pos):\n new = ListNode()\n new.value = x\n new.next = pos.next\n pos.next = new",
"def insert(self, index, value):\n if self._can_insert(index, value):\n list.insert(self, index, value)",
"def insert(self, index, value):\n self.__validate_index(index)\n self.__list = self.__list[:index] + [value] + self.__list[index:]\n return self.__list",
"def insert(self, index, value):\n self.list.insert(index, value)",
"def insert(self, val):\n self.data.insert(0,val)\n self.size = self.size + 1",
"def insert(self, val):\n if val not in self.posFind or self.posFind[val] == -1:\n self.nums.append(val)\n self.posFind[val] = len(self.nums) - 1\n return True\n return False",
"def insert(self,\n position,\n add_value=1):\n self.array[position] += add_value\n ibloc = int(position/self.bloc_size)\n self.blocsum[ibloc] += add_value",
"def add_lists(self, key, value, pos):\n if pos == 'r':\n return self.redis.rpush(key, value)\n else:\n return self.redis.lpush(key, value)",
"def do_insert(self, text):\n args = text.split()\n if len(args) == 2:\n try:\n pos = int(args[0])\n value = int(args[1])\n self.list.insert(pos, value)\n print(self.list, sep=', ')\n except ValueError:\n print('Error: invalid literal.')\n except IndexError:\n print('Error: invalid position.')\n else:\n print('Error: insert takes two parameters.')",
"def insert(self, idx, value):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n self.data.append(None)\n for i in range(len(self.data)-1,idx,-1):\n self.data[i] = self.data[i-1]\n self.data[idx] = value",
"def leftaddlistitems(self, items, pos):\n self._leftlist.insert(pos, items)",
"def insert(self, index, value):\n self.__field.validate_element(value)\n return list.insert(self, index, value)",
"def insert_at_position(self, position, data):\n node = Node(data)\n if not self.head:\n self.head = node\n return\n if position == 1:\n node.next = self.head\n self.head = node\n return\n temp = self.head\n for _ in range(1, position - 1):\n if not temp:\n print('Index out of bound')\n return\n temp = temp.next\n node.next = temp.next\n temp.next = node",
"def insert(self, val):\n # if it already exists return error\n if val in self.inds:\n return False\n # record the index and save the number\n self.inds[val] = len(self.nums)\n self.nums.append(val) \n return True",
"def addAtHead(self, val):\n self.nums.insert(0, val)",
"def insert(self, val):\n if val in self.d:\n return False\n self.d[val] = len(self.l)\n self.l.append(val)\n return True",
"def insert(self, val):\r\n if len(self.data) != self.len:\r\n self.data[self.len] = val\r\n else:\r\n self.data.append(val)\r\n if val in self.indices:\r\n self.indices[val].append(self.len)\r\n self.len += 1\r\n return False\r\n else:\r\n self.indices[val] = [self.len]\r\n self.len += 1\r\n return True",
"def insert(self, index, item):\n # type: (int, Any) -> None\n return list.insert(self, index, self.ref(item))",
"def insert(self, new_element, position):\n current = self.head\n count = 1\n \n if position == 1:\n new_element.next = current\n self.head = new_element\n # elif not(isinstance(self.get_pos(pos), str)): # use: type(self.get_pos(pos)) == str\n else:\n while count < position-1:\n current = current.next\n count += 1\n new_element.next = current.next\n current.next = new_element",
"def insert(self, index, value):\n list.insert(self, index, value)\n self.emit('inserted', index, value)\n self.emit('modified')",
"def addAtIndex(self, index, val):\n if 0 <= index < len(self.nums):\n self.nums.insert(index, val)\n elif index == len(self.nums):\n self.nums.append(val)",
"def insert(*, list : Union[List[Any], ConduitVariable], index : int, item : Any) -> None:\n list.insert(index, item)",
"def insert(self, index, value):\n # check the validity of index\n if index < 0 or index > self.n: # larger than no. of items\n print(\"Index Error; please input valid index\")\n return\n # if index==0, same as push_front\n if index==0:\n self.push_front(value)\n return\n # else,\n new_node = Node(value)\n temp_node = self.head\n for _ in range(index-1):\n temp_node = temp_node.next # traverse the list\n new_node.next = temp_node.next # temp_node is index-1 node\n temp_node.next = new_node\n self.n += 1"
] | [
"0.7365507",
"0.71653986",
"0.7131035",
"0.70607984",
"0.7060087",
"0.6903673",
"0.6818579",
"0.67716753",
"0.6679426",
"0.66339636",
"0.6624288",
"0.6520374",
"0.6493936",
"0.6466639",
"0.64303565",
"0.63194513",
"0.6312773",
"0.62931496",
"0.6289725",
"0.6266714",
"0.6262716",
"0.62521863",
"0.6250796",
"0.62277186",
"0.621521",
"0.6199067",
"0.6196287",
"0.61368847",
"0.61355704",
"0.6135295"
] | 0.7602359 | 0 |
Deserialize privilege bytes to Privileges object. | def deserialize(cls, priv_bytes: bytes) -> object:
if len(priv_bytes) not in (0, 1, 3):
raise ValueError("Invalid privilege bytes")
priv_bytes += (3 - len(priv_bytes)) * b'\0'
value = bytes_to_int_big_endian(priv_bytes)
lst = cls()
for key, bits in cls._MAP.items():
if value & bits[0] == bits[0] and value & bits[1] == 0:
lst.append(key)
return lst | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deserialize(self, byte: bytes):\n pass",
"def from_pem(cls, data, password=None):\n p = cls()\n private_from_encoding(data, p, password)\n return p",
"def from_binary(self, d):\n p = MsgEd25519SignatureDepA._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))",
"def deserializePrivateKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_private_key(string, password = None , backend = bc)",
"def from_binary(self, d):\n p = MsgEd25519SignatureDepB._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))",
"def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 72\n (_x.health, _x.utcA0, _x.utcA1, _x.utcTOW, _x.utcWNT, _x.utcLS, _x.utcWNF, _x.utcDN, _x.utcLSF, _x.utcSpare, _x.klobA0, _x.klobA1, _x.klobA2, _x.klobA3, _x.klobB0, _x.klobB1, _x.klobB2, _x.klobB3, _x.flags,) = _get_struct_I2di6h8fI().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def loads(cls, data: str) -> \"Serializable\":\n instance = dill.loads(base64.b64decode(data.encode(\"ascii\")))\n if not isinstance(instance, cls):\n raise TypeError(f\"Unpickled object is not of type {cls}\")\n return instance",
"def _unarmor_pem(data, password=None):\n\n object_type, headers, der_bytes = unarmor(data)\n\n type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n armor_type = re.match(type_regex, object_type)\n if not armor_type:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n pem_header = armor_type.group(1)\n\n data = data.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be handled specially\n if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n algo = armor_type.group(2).lower()\n return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))\n\n key_type = pem_header.lower()\n algo = None\n if key_type == 'encrypted private key':\n key_type = 'private key'\n elif key_type == 'rsa public key':\n key_type = 'public key'\n algo = 'rsa'\n\n return (key_type, algo, der_bytes)",
"def parse_bytes_to_dict(bytes_to_parse):\n return ast.literal_eval(bytes_to_parse.decode(\"utf-8\"))",
"def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 57\n (_x.decision, _x.distance, _x.oriX, _x.oriY, _x.oriZ, _x.placX, _x.placY, _x.placZ,) = _get_struct_b7d().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def deserialize(self, blob):\n return dill.loads(blob.encode('latin-1'))",
"def deserializer():\n return bytes.decode",
"def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 4\n (self.yaw,) = _struct_f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def decode_packet(self, bytes):\n b64 = False\n if not isinstance(bytes, six.binary_type):\n bytes = bytes.encode('utf-8')\n\n packet_type = six.byte2int(bytes[0:1])\n if packet_type == ord('b'):\n binary = True\n bytes = bytes[1:]\n packet_type = int(chr(six.byte2int(bytes[0:1])))\n b64 = True\n elif packet_type >= ord('0'):\n packet_type = int(chr(packet_type))\n binary = False\n else:\n binary = True\n\n packet_data = None\n if len(bytes) > 1:\n if binary:\n if b64:\n packet_data = base64.b64decode(bytes[1:])\n else:\n packet_data = bytes[1:]\n else:\n packet_data = bytes[1:].decode('utf-8')\n\n return Packet(packet_type, packet_data, binary)",
"def deserialize(self, data):\n assert self._cert_store is not None\n try:\n data = self._deserialize(data)\n signature = b64decode(data[\"signature\"])\n signer = data[\"signer\"]\n data = data[\"data\"]\n self._cert_store[signer].verify(data, signature, self._digest)\n return self._deserialize(data)\n except Exception, exc:\n raise SecurityError(\"Unable to deserialize: %r\" % (exc, ))",
"def from_bytes(buf: bytes) -> 'ProposalInfo':\n proposal_info_in_dict: dict = json_loads(buf.decode())\n proposal_info_in_dict[\"id\"] = bytes.fromhex(proposal_info_in_dict[\"id\"])\n proposal_info_in_dict[\"proposer\"] = Address.from_string(proposal_info_in_dict[\"proposer\"])\n return ProposalInfo(**proposal_info_in_dict)",
"def from_binary(self, d):\n p = MsgEd25519CertificateDep._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))",
"def decode(binary):\n return json_mod.loads(binary.decode(\"utf-8\"))",
"def FromBytes (cls, data):\n return cls (json.loads (zlib.decompress (data).decode ('utf-8')))",
"def from_binary(self, d):\n p = MsgEcdsaSignatureDepA._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))",
"def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 24\n (_x.sysid, _x.compid, _x.limits_state, _x.last_trigger, _x.last_action, _x.last_recovery, _x.last_clear, _x.breach_count, _x.mods_enabled, _x.mods_required, _x.mods_triggered,) = _struct_3B4IH3B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def deserialize(serialized: str): \n try:\n decoded = b64decode(serialized)\n return loads(decoded)\n except Exception as e:\n # If we raise an Exception in a try/except, it includes the original exception.\n # So we include from None to prevent this (and only raise a single exception)\n raise Exception(f\"{type(e).__name__}: {e} raised on {serialized[:50]}\") from None",
"def from_binary(self, d):\n p = MsgEcdsaSignature._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))",
"def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 36\n (_x.mask, _x.dynModel, _x.fixMode, _x.fixedAlt, _x.fixedAltVar, _x.minElev, _x.drLimit, _x.pDop, _x.tDop, _x.pAcc, _x.tAcc, _x.staticHoldThresh, _x.dgpsTimeOut, _x.reserved2, _x.reserved3, _x.reserved4,) = _get_struct_H2BiIbB4H2B3I().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill",
"def from_bytes(cls, bytes):\n construct = _constructs.PreMasterSecret.parse(bytes)\n return cls(\n client_version=ProtocolVersion(\n major=construct.version.major,\n minor=construct.version.minor,\n ),\n random=construct.random_bytes,\n )",
"def from_binary(self, d):\n p = MsgEcdsaSignatureDepB._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))",
"def deserialize(self, message):\n # Removes the random prefix\n message = message[12:]\n message = message.decode(\"utf-8\")\n\n if self.crypter:\n message = self.crypter.decrypt(message, self.expiry + 10)\n return json.loads(message)",
"def deserialize(self, data):",
"def decode(binary):\n name, fields = JsonCoder.decode(binary)\n dataclass = dataclasses.make_dataclass(\n name, [(key, type(value)) for key, value in fields.items()]\n )\n instance = dataclass(**fields)\n return instance",
"def extract(self, data):\n return ujson.loads(self.cipher.decrypt(data))"
] | [
"0.5320958",
"0.5281915",
"0.52777076",
"0.52333075",
"0.51927114",
"0.51070666",
"0.5058015",
"0.5019592",
"0.50140345",
"0.5010676",
"0.4996001",
"0.49813634",
"0.4941202",
"0.4928008",
"0.4924398",
"0.49172363",
"0.49156976",
"0.48830962",
"0.4876608",
"0.48694396",
"0.48627627",
"0.4851241",
"0.48459193",
"0.48409036",
"0.48332366",
"0.4831099",
"0.48271215",
"0.48170915",
"0.48169586",
"0.4809046"
] | 0.71521014 | 0 |
Serialize Privileges to privilege bytes. | def serialize(self, n_bytes: int = 3) -> bytes:
value = 0
# This masks clears 'security_domain' bit
check_mask = ~(self._MAP['SECURITY_DOMAIN'][0])
for priv in self:
set_bits = self._MAP[priv][0]
if (value & check_mask) & set_bits != 0:
raise ValueError("Incompatible privileges")
value |= set_bits
priv_bytes = int_to_bytes_big_endian(value, 3)
return priv_bytes[:n_bytes] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __bytes__(self):\n byteout = bytearray()\n for index in range(1, 15):\n key = \"d\" + str(index)\n if self._user_data.get(key) is not None:\n byteout.append(self._user_data[key])\n else:\n byteout.append(0x00)\n return bytes(byteout)",
"def serialize(self, password: str = None) -> bytes:\n pass",
"def dump(self):\n\n result = {\n 'verb': self.verb,\n 'whitelist_name': self.whitelist_name,\n 'permissioned_public_keys': self.permissioned_public_keys,\n 'permissioned_addrs': self.permissioned_addrs\n }\n return result",
"def serialize_to_bytes(self):\n return \\\n struct.pack(\n self._format,\n self.cpu_svn.serialize_to_bytes(),\n self.misc_select,\n self._RESERVED_1,\n self.attributes.serialize_to_bytes(),\n self.mr_enclave.serialize_to_bytes(),\n self._RESERVED_2,\n self.mr_signer.serialize_to_bytes(),\n self._RESERVED_3,\n self.isv_prod_id,\n self.isv_svn,\n self._RESERVED_4,\n self.report_data.serialize_to_bytes())",
"def dumps(self) -> str:\n bits = dill.dumps(self)\n return base64.b64encode(bits).decode(\"ascii\")",
"def show_privileges(self):\n print(\"\\nPrivilegios:\")\n for privilege in self.privileges:\n print(\"- \" + privilege)",
"def toBytes(self):\n return self.toJson().encode()",
"def toBytes(self):\n return self.toJson().encode()",
"def toBytes(self):\n return self.toJson().encode()",
"def deserialize(cls, priv_bytes: bytes) -> object:\n if len(priv_bytes) not in (0, 1, 3):\n raise ValueError(\"Invalid privilege bytes\")\n priv_bytes += (3 - len(priv_bytes)) * b'\\0'\n value = bytes_to_int_big_endian(priv_bytes)\n lst = cls()\n for key, bits in cls._MAP.items():\n if value & bits[0] == bits[0] and value & bits[1] == 0:\n lst.append(key)\n return lst",
"def to_bytes(self) -> bytes:\n proposal_info_in_dict = vars(self)\n proposal_info_in_dict[\"id\"] = bytes.hex(proposal_info_in_dict[\"id\"])\n proposal_info_in_dict[\"proposer\"] = str(proposal_info_in_dict[\"proposer\"])\n return json_dumps(proposal_info_in_dict).encode()",
"def dumpprivkey(self, address):\n return self.proxy.dumpprivkey(address)",
"def dumps(self, data: Any) -> bytes:\n out = BytesIO()\n self._write(out, data)\n return out.getvalue()",
"def serialize(self) -> bytes:\n pass",
"def serialize(self) -> bytes:\n pass",
"def to_json(self):\n return PolicySchema().dump(self)",
"def to_bytes(self) -> bytes:",
"def to_bytes(self) -> bytes:\n\n # for extendability\n version = ACCOUNT_DATA_STRUCTURE_VERSION\n\n flags = 0\n if self._locked:\n flags |= AccountFlag.LOCKED\n if self._c_rep:\n flags |= AccountFlag.C_REP\n\n return Account._struct.pack(version, self._type, flags, self._icx.to_bytes(DEFAULT_BYTE_SIZE, DATA_BYTE_ORDER))",
"def to_bytes(self) -> bytes:\n return pack('4B', self.tag, self.flags, self.reserved, self.params_count)",
"def to_dump(self):\n s = []\n for k in self.keys():\n if isinstance(self[k], int) or isinstance(self[k], long):\n s.append(\"%s=%d\" % (k, self[k]))\n elif isinstance(self[k], float):\n s.append(\"%s=%f\" % (k, self[k]))\n else:\n for v2 in self.list(k):\n if isinstance(v2, str):\n s.append(\"%s=%s\" % (k, v2))\n else:\n s.append(\"%s=%s\" % (k, util.encode(v2)))\n s.append(\"~format=%s\" % self.format)\n s.append(\"\")\n return \"\\n\".join(s)",
"def dumps(self, obj, salt=None):\n payload = want_bytes(self.dump_payload(obj))\n rv = self.make_signer(salt).sign(payload)\n if self.is_text_serializer:\n rv = rv.decode(\"utf-8\")\n return rv",
"def to_binary(self):\n c = containerize(exclude_fields(self))\n self.payload = MsgEd25519SignatureDepA._parser.build(c)\n return self.pack()",
"def encrypt_data(self, params):\n from django.core.signing import dumps\n return dumps(params, salt=self.salt_namespace)",
"def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()",
"def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()",
"def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()",
"def __bytes__(self):\n return bytes(bytearray([int(self)]))",
"def persistence_serialize(self):\n raise NotImplementedError",
"def to_bytes(self):\n return bytes(self.data)",
"def show_privileges(self):\n print(\"\\nAdministrator privileges: \")\n for privilege in self.privileges:\n print(\"- \" + privilege)"
] | [
"0.5824203",
"0.57819706",
"0.5508372",
"0.54918075",
"0.5487138",
"0.5451303",
"0.542187",
"0.542187",
"0.542187",
"0.53858984",
"0.53761977",
"0.53647023",
"0.533796",
"0.533434",
"0.533434",
"0.52456355",
"0.52057683",
"0.516433",
"0.51384103",
"0.5122727",
"0.512248",
"0.51070744",
"0.5096256",
"0.50480884",
"0.50480884",
"0.50480884",
"0.5045463",
"0.5042496",
"0.50323135",
"0.50203997"
] | 0.62342376 | 0 |
Set The Title And The Icon Of The Main Window | def set_title_and_icon():
pygame.display.set_caption("Sukodu")
icon = pygame.image.load("assets/icon.png")
pygame.display.set_icon(icon) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def showBasic(self):\n self.setWindowIcon(QIcon(self.icon))\n self.setWindowTitle(self.title)\n self.setGeometry(*self.posXY, *self.windowSize)\n self.show()",
"def __init__(self, title):\n super(TXWindowHeader, self).__init__()\n self.setupUi(self)\n self.window_lbl.setText(title)\n pxm = QtGui.QPixmap(path.Path(__file__).dirname().dirname() / 'resource' / 'tx_x.png').scaled(30, 30, QtCore.Qt.AspectRatioMode.KeepAspectRatio, QtCore.Qt.SmoothTransformation)\n self.tx_logo_lbl.setPixmap(pxm)",
"def set_title(self):\n if self.loaded_filename:\n self.setWindowTitle('Starbound Mapper | {}'.format(self.loaded_filename))\n else:\n self.setWindowTitle('Starbound Mapper')",
"def configure_ui(self):\n self.setWindowIcon(self.MAIN_ICON)\n self.setWindowModality(Qt.ApplicationModal)",
"def set_title(self):\n if self.currentconfig is None:\n self.setWindowTitle(\"(No config)\")\n elif self.cfname is None:\n self.setWindowTitle(\"Working config\")\n else:\n filename = miscutils.removesuffix(os.path.basename(self.cfname)).upper()\n self.setWindowTitle(\"Processing - \" + filename)",
"def set_title( self , winTitle ):\r\n self.rootWin.wm_title( str( winTitle ) )",
"def SetIcons (self):\n self.window.setWindowIcon(QIcon(win_icon))\n self.window.tabs.setTabIcon(0,QIcon(ui_data))\n self.window.tabs.setTabIcon(1,QIcon(ui_field))\n self.window.tabs.setTabIcon(2,QIcon(ui_query))",
"def set_caption(self, title=None, icon=None):\n pygame.display.set_caption(title if title is not None else \"Game\")\n\n if icon is not None:\n pygame.display.set_icon(self.__manager.get_image(icon))",
"def initialise_window(self):\n self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)\n self.imageLabel.setScaledContents(True)\n self.scrollArea.setWidget(self.imageLabel)\n self.setCentralWidget(self.scrollArea)\n self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable horizontal scrollbar.\n self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable vertical scrollbar.\n self.setWindowTitle(\"Robot Map\") # Set title.\n self.showFullScreen() # Make fullscreen.",
"def winTitle(self, title):\n winTitle = title\n window = self.window\n window.setWindowTitle(winTitle)",
"def display_gui_window(self, window_title):\r\n cv2.imshow(window_title, self.image)",
"def setWindowTitle(self, title):\n self.__windowTitle = title",
"def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n self.setWindowTitle(\"RPI HMI - pH Debug\") # Title creation",
"def main():\n # Makes the icon in the taskbar as well.\n appID = \"opt-id\" # arbitrary string\n if os.name == 'nt':\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(appID)\n elif os.name == 'posix':\n pass\n\n app = App(sys.argv)\n # To add icon:\n # app.setWindowIcon(QIcon(application_path\n # + \"{0}gui{0}misc{0}logo{0}logo3.ico\".format(os.sep)))\n sys.exit(app.exec_())",
"def __setDetails(self):\n self.MainWindow.setWindowTitle(\"{0} {1}\".format(\n const.APP_NAME, const.VERSION))\n return True",
"def set_up_icon(self):\n\t\tself.__menu = QMenu()\n\t\tself.__show = QAction(\"Show\")\n\t\tself.__quit = QAction(\"Quit\")\n\n\t\t# signals --> slots\n\t\tself.__show.triggered.connect(self.show_app_slot)\n\t\tself.__quit.triggered.connect(self.close_app_slot)\n\n\t\tself.__menu.addActions([self.__show, self.__quit])\n\t\tself.setIcon(QIcon('app.png'))\n\t\tself.setContextMenu(self.__menu)",
"def set_window_icon(widget, remove_help=True):\n icon = QIcon(get_resource_path('icons/Ducky.ico'))\n widget.setWindowIcon(icon)\n if remove_help:\n widget.setWindowFlags(Qt.Window |\n Qt.CustomizeWindowHint |\n Qt.WindowTitleHint |\n Qt.WindowCloseButtonHint |\n Qt.WindowStaysOnTopHint)",
"def set_window_title(self, filename=None):\n\n if filename:\n self.setWindowTitle(f\"DataLab {self.version} - Loaded Project: {filename}\")\n else:\n self.setWindowTitle(f\"DataLab {self.version}\")",
"def init(self):\n sg.theme(gui.app_theme)\n self.window = sg.Window(\n gui.app_title,\n gui.create_layout(),\n **gui.window_config,\n )\n gui.after_window_init(self.window)",
"def __init__(self, title, width, height):\n super(BaseApp, self).__init__()\n self.title(title)\n self.__geometry(width, height)",
"def title(self, title: str):\n\n #self.master.title(title)\n self.ax.set_title(title)\n self.canvas.draw()",
"def __init__(self, inWindowTitleStr):\n super(MainWindow, self).__init__()\n self._mainWorkspace = None\n\n self.setWindowTitle(inWindowTitleStr)\n self.setGeometry(500, 100, 700, 900)\n\n self.mainWorkspace = workspace.WorkSpace(parent=self)",
"def initGui(self):\n\n icon_path = ':/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'PacSafe'),\n callback=self.run,\n parent=self.iface.mainWindow())",
"def set_root_title(self, text=None):\n title = \"Faceswap.py\"\n title += \" - {}\".format(text) if text is not None and text else \"\"\n self.root.title(title)",
"def __init__(self, master=None):\r\n \r\n tk.Frame.__init__(self, master)\r\n self.master.title('AQUAMI')\r\n module_path, this_filename = os.path.split(__file__)\r\n try:\r\n self.master.iconbitmap(''.join((module_path, '/icon.ico')))\r\n except:\r\n pass\r\n\r\n self.initGUI()",
"def set_title(self):\n plt.title(label=self.title, fontsize=self.titlesize)",
"def about(self, \n\t\tbackground=os.getenv('IC_SPLASHSCREEN', QtGui.QColor('#111111')), #23282d\n\t\ticon=None, \n\t\tmessage=\"\"):\n\n\t\taboutDialog = about.AboutDialog(parent=self)\n\t\taboutDialog.display(\n\t\t\tbackground=background, \n\t\t\ticon_pixmap=self.iconTint(icon), \n\t\t\tmessage=message)",
"def initGui(self):\n\n icon_path = ':/plugins/Integracion/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u''),\n callback=self.run,\n parent=self.iface.mainWindow())",
"def set_up_menu(self):\n self.app.title = \"work\"\n self.timer.start()",
"def initGui(self):\n\n icon_path = ':/plugins/Hybriddekning/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'Hybriddekning'),\n callback=self.run,\n parent=self.iface.mainWindow())"
] | [
"0.7387999",
"0.72235245",
"0.70776117",
"0.70550996",
"0.69922644",
"0.69855475",
"0.68707806",
"0.6845815",
"0.6811798",
"0.6764755",
"0.6720163",
"0.6626838",
"0.66072255",
"0.6593078",
"0.6582844",
"0.65729517",
"0.6561191",
"0.65342915",
"0.64890605",
"0.6480931",
"0.6453399",
"0.64420396",
"0.64390624",
"0.64386785",
"0.6424706",
"0.64177334",
"0.63959813",
"0.6393473",
"0.63376707",
"0.632907"
] | 0.77098817 | 0 |
Load All The Buttons | def load_buttons(self):
self.playing_buttons.append(Button(20, 40, 100, 40, "New Game")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_all_buttons(self) -> bool:\n raise NotImplementedError",
"def show_main_buttons(self):\n pass",
"def create_buttons(self):\r\n return []",
"def generate_buttons(self):\n # Put up a progress dialog\n self.get_world_progress = QtWidgets.QProgressDialog(self.mainwindow)\n self.get_world_progress.setWindowTitle('Caching World Information for {}'.format(self.player.name))\n label = QtWidgets.QLabel('<b>Caching World Information for {}</b>'.format(self.player.name))\n label.setAlignment(QtCore.Qt.AlignCenter)\n self.get_world_progress.setLabel(label)\n self.get_world_progress.setRange(0, 0)\n self.get_world_progress.setModal(True)\n self.get_world_progress.setMinimumSize(300, 100)\n self.get_world_progress.show()\n # TODO: This is lame - handle cancel events properly instead.\n self.get_world_progress.setCancelButton(None)\n self.mainwindow.app.processEvents()\n\n # Actually do the loading\n buttons = []\n for (mtime, cache_entry, filename) in self.player.get_worlds(self.parent_dialog.mainwindow.data,\n progress_callback=self.update_get_world_progress):\n button = OpenByPlanetName.PlanetNameButton(self, filename, mtime, cache_entry)\n buttons.append((mtime, cache_entry.sort_name, button))\n\n # Clean up and exit\n self.get_world_progress.close()\n self.get_world_progress = None\n return buttons",
"def test_02_AllButtons(self):\n l_button = self.m_api.read_all_buttons_xml(self.m_pyhouse_obj, self.m_xml.button_sect, self.m_version)\n l_xml = self.m_api.write_buttons_xml(self.m_pyhouse_obj)",
"def generate_buttons(self):\n raise Exception('Implement me!')",
"def getbuttons(self):\n return self.buttons",
"def generate_buttons(self):\n buttons = []\n for mtime, player in self.mainwindow.data.get_all_players():\n button = OpenByPlayerName.PlayerNameButton(self, player, mtime)\n buttons.append((mtime, player.name.lower(), button))\n return buttons",
"def initialize_buttons(self):\r\n self.start_button = tk.Button(self.master, text='Start', command = self.startRunning)\r\n self.start_button.grid(row=0, column=0)\r\n\r\n self.pause_button = tk.Button(self.master, text='Pause', command = self.pauseRunning)\r\n self.pause_button.grid(row=0, column=1)\r\n\r\n self.graph_button = tk.Button(self.master, text='Graph', command = self.showGraph)\r\n self.graph_button.grid(row=0, column=2)\r\n \r\n self.plot_button = tk.Button(self.master, text='Plot', command = self.showPlot)\r\n self.plot_button.grid(row=0, column=3)\r\n \r\n self.draw_button = tk.Button(self.master, text='Draw', command = self.drawCells)\r\n self.draw_button.grid(row=0, column=4)\r\n \r\n # Initialize Button States and Actions\r\n self.pause_button['state'] = 'disabled'\r\n # Boolean switch to control flow of placement process\r\n self.running = False\r\n # Boolean switch to plot placement connections and tags, turn off for faster processing\r\n self.plot = False\r\n self.drawing = False\r\n self.graph = False\r\n # Boolean switch to specify first run and allow stop/continue behavior that doesn't initialize program\r\n self.firstRun = True",
"def update_buttons(self):\n # Enable the Add/Remove step buttons if a Generator is loaded\n enable = self.mgr.obj is not None\n self.addButton.setEnabled(enable)\n self.removeButton.setEnabled(enable)\n self.upButton.setEnabled(enable)\n self.downButton.setEnabled(enable)",
"def activate_buts(self):\n\n for b in self.gamebuttons:\n b.activate()",
"def init_buttons(self):\r\n self.btn_encrypt = QtWidgets.QPushButton('Encrypt')\r\n self.btn_encrypt.clicked.connect(self.encrypt)\r\n self.btn_encrypt.setEnabled(False)\r\n\r\n self.btn_decrypt = QtWidgets.QPushButton('Decrypt')\r\n self.btn_decrypt.clicked.connect(self.decrypt)\r\n self.btn_decrypt.setEnabled(False) \r\n\r\n self.layout_buttons = QtWidgets.QGridLayout()\r\n\r\n self.layout_buttons.addWidget(self.btn_encrypt,0,0)\r\n self.layout_buttons.addWidget(self.btn_decrypt,0,1)",
"def loadButtonMethod(self):\n return AbstractAction(\"Load\")",
"def setup_buttons(self):\n confirm = self.centre.findChild(QPushButton, \"confirmBtn\")\n confirm.clicked.connect(partial(self.confirmed))\n cancel = self.centre.findChild(QPushButton, \"cancelBtn\")\n cancel.clicked.connect(partial(self.controller.show_selector_menu))",
"def show_buttons(self):\n for button in self.buttons:\n x = button.starting_x\n y = button.starting_y\n self.screen.fill(button.color, ((x, y), (button.width, button.height)))",
"def get_request_buttons():\r\n return request_buttons",
"def buttons(self):\n return self._buttons",
"def buttons(self):\n return self._buttons",
"def buttons(self):\n return self._buttons",
"def _update_buttons(self):\n for index, piece in enumerate(self.game_board.board):\n self.board_button_list[index].config(\n image=self.image_dict.get(f\"{piece.team_id}{piece.piece_id}\")\n )\n\n self.update()",
"def place_app_buttons(self):\n i = 0\n for app in Settings.roku_apps:# pylint: disable=no-member\n tk.Button(self.parent,\n text=app,\n command=lambda: self.rokucontrol.app_callback(app)).grid(row=i, column=4)\n i += 1",
"def test_get_custom_buttons_list(self):\n pass",
"def create_buttons(self: object) -> None:\n buttons = {\"BWT\": Button(\n self, text=\"BWT\", command=self.bwt_window, width = 15).grid(row=3,column=0, padx=5, pady=6),\n \"DEBWT\": Button(\n self, text=\"reverse BWT\", command=self.debwt_window,width = 15).grid(\n row=6,column=0, padx=5, pady=6),\n \"Huffcode\": Button(\n self, text=\"Huffman coding\", command=self.huffcode_window, width = 15).grid(\n row=3,column=1, padx=5, pady=6),\n \"Huffdecode\": Button(\n self, text=\"Huffman decoding\", command=self.huffdecode_window, width = 15).grid(\n row=6,column=1, padx=5, pady=6),\n \"fullzip\": Button(\n self, text=\"Full zip\", command=self.fullzip_window, width = 15).grid(\n row=3,column=2, padx=5, pady=6),\n \"fullunzip\": Button(\n self, text=\"Full unzip\", command=self.fullunzip_window, width = 15).grid(\n row=6,column=2, padx=5, pady=6),\n \"generate\": Button(\n self, text=\"Generate\", command=self.generate_random, width = 15).grid(\n row=10,column=1, padx=5, pady=6),\n \"save\": Button(\n self, text=\"Save\", command=self.save_random, width = 15).grid(\n row=11,column=1, padx=5, pady=6)}\n\n self.buttons = buttons",
"def make_play_mode_buttons(self):\r\n play_button_list = []\r\n play_button_1a = Button(self._screen,\"1 Atom Random\", 200, 162, 1)\r\n play_button_list.append(play_button_1a)\r\n play_button_2a = Button(self._screen, \"2 Atoms Random\", 500, 162, 2)\r\n play_button_list.append(play_button_2a)\r\n play_button_3a = Button(self._screen, \"3 Atoms Random\", 200, 350, 3)\r\n play_button_list.append(play_button_3a)\r\n play_button_4a = Button(self._screen, \"4 Atoms Random\", 500, 350, 4)\r\n play_button_list.append(play_button_4a)\r\n play_button_5a = Button(self._screen, \"5 Atoms Random\", 200, 537, 5)\r\n play_button_list.append(play_button_5a)\r\n play_button_6a = Button(self._screen, \"Manual 4 Atoms\", 500, 537,\r\n \"4m\")\r\n play_button_list.append(play_button_6a)\r\n\r\n return play_button_list",
"def _add_buttons(self, gui):\n gui.greet_button.pack()\n gui.close_button.pack()\n gui.buttons_on.set(True)",
"def update_buttons(self):\n # Enable the Add/Remove/Up/Down measurements buttons if a Survey is loaded\n enable = self.mgr.obj is not None\n self.addButton.setEnabled(enable)\n self.removeButton.setEnabled(enable)\n self.upButton.setEnabled(enable)\n self.downButton.setEnabled(enable)\n \n # Enable the Add/Remove condition buttons if a Measurement is selected\n #enable = len(list(self.mgr.obj.measurements)) > 0\n enable = self.measurementTableWidget.rowCount() > 0\n self.addConditionButton.setEnabled(enable)\n self.removeConditionButton.setEnabled(enable)",
"def show_control_buttons(self):\n self.settings_button.show()\n self.radio_button.show()\n self.blank_button.show()\n self.close_button.show()",
"def draw_buttons(self):\n for button in self.playing_buttons:\n button.draw(self.screen)",
"def _initResizeButtons(self):\n self._addBasesButton = SVGButton(\":/pathtools/add-bases\", self)\n self._addBasesButton.clicked.connect(self._addBasesClicked)\n self._addBasesButton.hide()\n self._removeBasesButton = SVGButton(\":/pathtools/remove-bases\", self)\n self._removeBasesButton.clicked.connect(self._removeBasesClicked)\n self._removeBasesButton.hide()",
"def populate_buttons(self):\n\n # Figure out which index we'll sort on\n if self.sort_group.checkedButton() == self.button_mtime:\n to_sort = self.sort_mtime_idx\n reverse = True\n else:\n to_sort = self.sort_alpha_idx\n reverse = False\n\n # Now add things. This'll automatically shuffle stuff around without\n # us having to worry about removing things first.\n for row, (_, _, button) in enumerate(\n sorted(self.buttons, reverse=reverse, key=lambda i: i[to_sort])\n ):\n self.grid.addWidget(button, row, 0)"
] | [
"0.7483391",
"0.7030409",
"0.7026208",
"0.689735",
"0.686187",
"0.6792814",
"0.6710763",
"0.670641",
"0.6673981",
"0.6620799",
"0.6557749",
"0.6476816",
"0.6472717",
"0.6458795",
"0.64072716",
"0.64047295",
"0.63530964",
"0.63530964",
"0.63530964",
"0.6310986",
"0.63100123",
"0.62846017",
"0.62407684",
"0.62219226",
"0.62208045",
"0.6164744",
"0.61626416",
"0.6159008",
"0.61448145",
"0.6142279"
] | 0.7794688 | 0 |
It Colors The Selected Cell | def draw_selected(self):
if self.get_selected() is not None and not self.check_if_locked(self.get_selected()):
self.color_cell(pos=self.get_selected(
), color=SELECTED_INVALID if self.get_selected() in self.invalid else SELECTED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def click_color(self, row_i):\n self.bg_reset()\n select_rgb = (191, 243, 228)\n select_color = QtGui.QColor(*select_rgb)\n for col_i in range(self.people_table.columnCount()):\n # when table changes row_i may no longer exist (None)\n if self.people_table.item(row_i, col_i):\n self.people_table.item(row_i, col_i).setBackground(select_color)",
"def highlight_cells(c, c_dict):\n colour= c_dict.get(c)\n return 'background-color: %s' % colour",
"def on_selected(self):\n self.colour = self.selected_colour\n self.is_selected = True\n self.redraw()",
"def change_cell_bgcolor(self, cell: tuple, color: str = \"#fefefe\") -> None:\n self.cells[cell].set_background(color)",
"def selectCanvas(self, row, column, color):\n self.canvasGrid[row][column][\"background\"] = color",
"def select_cell(app, color, cell):\n app_cell = find_cell(app, cell)\n if app_cell is None:\n return 'Cell not found. may be invalid column and row in block or cell'\n if app_cell.get_cell_color() != color:\n return f'Cell not expected color: expected: {color}, actual: {app_cell.get_cell_color()}'\n app_cell.invoke()\n # app_cell.button.invoke()\n return ''",
"def cellSelected(self):\n\n x = self.tableWidget.currentRow()\n y = self.tableWidget.currentColumn()\n if y != self.CAT_COLUMN:\n return\n catText = str(self.tableWidget.item(x, y).text())\n #print(x,y, catText)\n self.tableWidget.item(x, y).setSelected(False)\n for row, code in enumerate(self.codes):\n if code['category'] == catText:\n self.tableWidget.item(row, self.CODE_COLUMN).setSelected(True)",
"def paint_cell(self, col, row, color):\r\n if isinstance(color, Number):\r\n self.A[row, col] = color\r\n else:\r\n self.A[row, col] = self.cdict[color]\r\n self.plot()",
"def choose_color(self, b, name):\n section, option = name\n cur = b.background_color[:3]\n self.subview_open = True\n name = dialogs.list_dialog(\"Choose a color\", COLORS, multiple=False)\n self.subview_open = False\n if name is None:\n return\n _stash.config.set(section, option, repr(name))\n self.table.reload_data()\n self.save()",
"def get_cell_color ( self, object ):\n if self.is_editable( object ):\n if self._is_selected( object ):\n return self.selected_cell_color_\n return self.cell_color_\n return self.read_only_cell_color_",
"def set_green(self):\n self.fill= Cell.FILLED_COLOR_BG\n self.draw()",
"def selectCell(self, data=None):\n for row in range(6):\n for col in range(7):\n itemWidget = self.table.cellWidget(row, col)\n _data = itemWidget.data\n if data == _data:\n self.table.setCurrentCell(row, col, QItemSelectionModel.Select)\n self.cellSelectedList[0] = self.table.selectedIndexes()[0]\n # print('cell selected', itemWidget.dictFlags)\n self.selectedDate()\n return",
"def select_color(self):\n\t\tresult = tkinter.colorchooser.askcolor(self.center['bg'])\n\t\tif result:\n\t\t\t# 2nd part of result is the color object\n\t\t\tself.center['bg'] = result[1]",
"def color(self):\n if self._simplecell:\n self.fetch()\n return self._color",
"def changing_color(self, row_i, res):\n # drop_j = self.person_columns.index('maxdrop')\n drop_j = 6\n drop_colors = {\n \"subject\": QtGui.QColor(249, 179, 139),\n \"visit\": QtGui.QColor(240, 230, 140),\n \"future\": QtGui.QColor(240, 240, 240),\n \"unknown\": QtGui.QColor(203, 233, 109),\n }\n\n for row_i, row in enumerate(res):\n droplevel = row[drop_j]\n # don't do anything if we don't have a color for this drop level\n if droplevel is None or droplevel == \"nodrop\":\n continue\n drop_color = drop_colors.get(droplevel, drop_colors[\"unknown\"])\n # go through each column of the row and color it\n for j in range(self.people_table.columnCount()):\n self.people_table.item(row_i, j).setBackground(drop_color)",
"def setCellColor(self, row, column, color = \"CCCCCC\"):\n\n\t\t\t\tfillObject = openpyxl.styles.PatternFill(start_color = color, end_color = color, fill_type = \"solid\")\n\t\t\t\tcell = self.getCell(row = row, column = column)\n\t\t\t\tcell.fill = fillObject",
"def select_cell(self, event):\n # Get row and symbols.\n row = event.GetRow()\n symbol1 = self.grid_correlations.GetCellValue(row, self.COLUMN_SYMBOL1)\n symbol2 = self.grid_correlations.GetCellValue(row, self.COLUMN_SYMBOL2)\n self.__selected_correlation = [symbol1, symbol2]\n\n self.show_graph(symbol1, symbol2)",
"def red2blue(self):\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n self.cells[x][y] = 2",
"def choose_rgb_color(self, b, name):\n section, option = name\n cur = b.background_color[:3]\n picker = RGBColorPicker(cur)\n self.subview_open = True\n rgb = picker.get_color()\n self.subview_open = False\n _stash.config.set(section, option, str(rgb))\n self.table.reload_data()\n self.save()",
"def cell_selection_changed(self):\n # Enable Edit Cell menu if a single cell is selection else disable it\n self.cells_selected = self.csv_data_table.selectionModel().selectedIndexes()\n if len(self.cells_selected) == 1:\n self.action_edit_data.setEnabled(True)\n else:\n self.action_edit_data.setEnabled(False)\n\n # Enable delete options iff 1 or more cells are selected\n if len(self.cells_selected) >= 1:\n self.action_delete_selected.setEnabled(True)\n self.action_toolbar_delete_selected.setEnabled(True)\n else:\n self.action_delete_selected.setEnabled(False)\n self.action_toolbar_delete_selected.setEnabled(False)\n\n # Add a way to identify all the currently selected columns\n cols = self.csv_data_table.selectionModel().selectedColumns()\n self.selected_columns = []\n for index in sorted(cols):\n col = index.column()\n self.selected_columns.append(col)\n\n rows = self.csv_data_table.selectionModel().selectedRows()\n self.selected_rows = []\n for index in sorted(rows):\n row = index.row()\n self.selected_rows.append(row)\n\n self.set_bottom_toolbar_info()\n\n # Enable plot toolbars iff exactly 2 columns are selected\n if len(self.selected_columns) == 2:\n self.set_plot_options(True)\n else:\n self.set_plot_options(False)",
"def edit_current_cell(self):\n cells = self.csv_data_table.selectionModel().selectedIndexes()\n if len(cells) == 1:\n for cell in sorted(cells):\n r = cell.row()\n c = cell.column()\n self.csv_data_table.editItem(self.csv_data_table.item(r, c))",
"def highlight_area(self, row1, row2, col1, col2, color, sheet_number):\n for i in range(row1, row2 + 1):\n for j in range(col1, col2 + 1):\n self.formats[sheet_number][i][j].set_bg_color(color)\n self.worksheets[sheet_number].write(i, j, self.arrays[sheet_number][i][j], self.formats[sheet_number][i][j])",
"def __selectHexColor(self):\n editor = e5App().getObject(\"ViewManager\").activeWindow()\n if editor is None:\n return\n \n if editor.hasSelectedText():\n currColor = editor.selectedText()\n if not self.__isValidColor(currColor):\n E5MessageBox.critical(\n self.__ui,\n self.tr(\"Color String\"),\n self.tr(\n \"\"\"<p>The selected string <b>{0}</b> is not a\"\"\"\n \"\"\" valid color string. Aborting!</p>\"\"\")\n .format(currColor))\n return\n \n if currColor.startswith(\"#\"):\n withHash = True\n elif self.__isHexString(currColor):\n withHash = False\n currColor = \"#\" + currColor\n else:\n withHash = True\n initColor = QColor(currColor)\n else:\n withHash = True\n currColor = \"\"\n initColor = QColor()\n \n color = QColorDialog.getColor(\n initColor, self.__ui, self.tr(\"Color String\"))\n if color.isValid():\n colorStr = color.name()\n if not withHash:\n colorStr = colorStr[1:]\n editor.beginUndoAction()\n if editor.hasSelectedText():\n editor.replaceSelectedText(colorStr)\n else:\n line, index = editor.getCursorPosition()\n editor.insert(colorStr)\n editor.setCursorPosition(line, index + len(colorStr))\n editor.endUndoAction()",
"def _onEdit(self, event):\n index = self.colorlist.GetSelection()\n icol = self._indexTupleToColor(index)\n icd = wx.ColourData()\n icd.SetColour(icol)\n dialog = wx.ColourDialog(self, icd)\n\n if dialog.ShowModal() == wx.ID_OK:\n tup = _colorDataToTuple(dialog.GetColourData())\n self.graphColors[index] = tup\n self._tupleListToStrings()\n self._updateButtons(None)",
"def get_cell_color ( self, object ):\n if self.is_editable( object ):\n return self.cell_color_\n return self.read_only_cell_color_",
"def _bbTableDoubleClicked(self, row, col):\n it = self.table.item(row, col).text()\n\n try:\n idx = int(it) # decimal\n bb_path = self.ba.cache.bb_paths[idx]\n\n col = QtGui.QColorDialog.getColor()\n if col.isValid():\n # IDA works with BGR (annoying)\n ida_color = misc.pyside_to_ida_color(col.name())\n\n misc.paint_basic_blocks(bb_path, ida_color)\n return\n\n except IndexError:\n # Address value (does not contain [A-F]) is interpreted as index\n return\n\n except ValueError:\n # Address value (containing [A-F]) fucks up int()\n return",
"def highlight_color(self):\n return curses.color_pair(4) if self.cycling else curses.color_pair(2)",
"def set_color(self, c, color, draw=True):\n \n if c == self.maze.get_start_cell() or c == self.maze.get_end_cell():\n return\n self.cvs.itemconfig(self.cvs_cells[c], fill=color)\n\n if draw: self.draw()",
"def cell_entered(self, row: int, _):\n self.selectRow(row)",
"def _onclick(self,event):\r\n if self.NumCells > 0:\r\n ShapeMask = np.shape(self.Mask)\r\n # get coorinates at selected location in image coordinates\r\n if event.xdata == None or event.ydata == None:\r\n return\r\n xcoor = min(max(int(event.xdata),0),ShapeMask[1])\r\n ycoor = min(max(int(event.ydata),0),ShapeMask[0])\r\n \r\n # search for the mask coresponding to the selected cell\r\n for EachCell in range(self.NumCells):\r\n if self.Mask[ycoor,xcoor,EachCell]:\r\n self.SelectedCellIndex = EachCell\r\n break\r\n \r\n # highlight selected cell\r\n if self.SelectedCellIndex not in self.selected_ML_Index:\r\n # Get the selected cell's contour coordinates and mask patch\r\n self.contour_verts, self.Cell_patch = self.get_cell_polygon(self.Mask[:,:,self.SelectedCellIndex])\r\n \r\n self.Matdisplay_Figure_axis.add_patch(self.Cell_patch)\r\n self.Matdisplay_Canvas.draw()\r\n \r\n self.selected_ML_Index.append(self.SelectedCellIndex)\r\n self.selected_cells_infor_dict['cell{}_verts'.format(str(self.SelectedCellIndex))] = self.contour_verts\r\n else:\r\n # If click on the same cell\r\n self.Cell_patch.remove()\r\n self.Matdisplay_Canvas.draw()\r\n self.selected_ML_Index.remove(self.SelectedCellIndex)\r\n self.selected_cells_infor_dict.pop('cell{}_verts'.format(str(self.SelectedCellIndex)))"
] | [
"0.6985751",
"0.6849019",
"0.67640656",
"0.6757863",
"0.6691283",
"0.668399",
"0.66343147",
"0.65790117",
"0.65168613",
"0.639773",
"0.63874525",
"0.6332912",
"0.6331097",
"0.63164014",
"0.62920564",
"0.6288776",
"0.6197164",
"0.61816067",
"0.61125493",
"0.6097194",
"0.60843825",
"0.6069002",
"0.6052284",
"0.6043118",
"0.60144854",
"0.60137576",
"0.59806937",
"0.5977981",
"0.5977866",
"0.5971645"
] | 0.7410787 | 0 |
It Will Draw The Numbers On The Board | def draw_numbers(self):
for i in range(9):
for j in range(9):
pos = self.get_pos_in_grid(i, j)
text = self.grid[i][j]
text = '' if text == 0 else str(text)
self.text_to_screen(text, pos) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_given_numbers(self):\n y = self.step_y/2 \n while y < self.height:\n x = self.step_x/2\n while x < self.width:\n\n # find row and column of the board based on the step sizes\n r, c = round((y-self.step_y/2)/self.step_y), round((x-self.step_x/2)/self.step_x)\n number = self.board[r][c] or ''\n self.text_ids[r][c] = self.canvas.create_text(x, y, text=str(number), **style.numbers)\n sleep(0.05)\n self.canvas.update()\n x += self.step_x\n y += self.step_y",
"def draw(self):\n for x in range(self.numRows):\n print self.grid[x]",
"def drawBoard(self):\r\n \r\n for i in range(8):\r\n for j in range(8):\r\n if (i %2 == 0 and j % 2 == 0) or (i % 2 !=0 and j % 2 != 0):\r\n COLOR = COLOR1\r\n else: COLOR = COLOR2\r\n pygame.draw.rect(screen, COLOR, Rect(i*50, j*50, 50, 50))\r\n\r\n self.drawLabels()\r\n \r\n if not self.piecesDrawn:\r\n self.drawPieces()\r\n self.piecesDrawn = True",
"def steady_numbers(self):\n for y in range(9):\n for x in range(9):\n if not self.grid[y][x] == 0:\n n = self.font.render(str(self.grid[y][x]), 1, 'black')\n self.screen.blit(n, ((self.x_pos + x * 80), (self.y_pos + y * 80)))",
"def updateBoard():\n #Drawing the initial board positions;\n for y in range(1, n+1): #1,2,3\n for x in range(1, n+1):\n val = positions[y][x];\n colorNode((x,y), numColors[val])\n label = Text(Point((x-0.5)*grid_side, (y-0.5)*grid_side),val);\n label.setSize(30)\n label.draw(win)",
"def draw_board(self):\n print(\"\\n\" * 10)\n print(\"-PRINTING BOARD-\")\n for row in self.grid:\n for column in row:\n print(column.character(), end=\"\")\n print() # to create a new line",
"def draw_cell(self, board, x, y, color):\n r = self.rect_area(x, y) # gets rect area for given cell\n pygame.draw.rect(self.screen, color, r, 3)\n e = self.font.render(str(board[y][x]), 1, (0, 0, 0)) # creates number\n self.screen.blit(e, (self.x_pos + x * 80, self.y_pos + y * 80)) # draws number\n pygame.display.update(r) # updates screen to showcase rect",
"def draw(self):\n\t\tfor i in range(0, self.size):\n\t\t\tprint('\\n' + \"----\" * self.size)\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tprint(self.grid[i][j] + ' |', end=\" \")\n\t\tprint('\\n'+ \"----\" * self.size + '\\n')",
"def drawBoard(self):\r\n self.outer.draw(self.surface)\r\n self.background.draw(self.surface)\r\n for point in self.points:\r\n point.draw(self.surface)\r\n point.drawCheckers(self.surface)\r\n self.dice.draw(self.surface)\r\n self.message.draw(self.surface)\r\n self.checkerBox.draw(self.surface)\r\n self.checkerBox.drawCheckers(self.surface)\r\n for bar in self.bar:\r\n bar.draw(self.surface)\r\n bar.drawCheckers(self.surface)\r\n pygame.display.flip()",
"def print_board(board):\n win = GraphWin('N-Rainhas', 850, 650)\n win.setBackground(color_rgb(188, 237, 145))\n title = Text(Point(400, 30), \"N-Rainhas\")\n title.setSize(20)\n title.draw(win)\n\n # Desenha tabuleiro principal\n rect = Rectangle(\n Point(150 - 5, 100 - 5),\n Point(650 + 5, 600 + 5)\n )\n rect.setFill('brown')\n rect.draw(win)\n\n # Desenha as casas no tabuleiro\n square = 500 / N\n for i in range(N):\n for j in range(N):\n if (i + j) % 2 == 0:\n x = 150 + i * square\n y = 100 + j * square\n rect = Rectangle(\n Point(x, y),\n Point(x + square, y + square)\n )\n rect.setFill('gray')\n rect.draw(win)\n\n # Desenha as peças no tabuleiro\n x = 150 + i * square\n y = 100 + board[i] * square\n cir = Circle(\n Point(x + 0.5 * square, y + 0.5 * square), 160 / N\n )\n cir.setFill('blue')\n cir.draw(win)\n\n win.getMouse()\n win.close()",
"def draw_number(self, number):\n\t\tglobal changed_rects\n\n\t\tnumber = number -1\t\t# correct to correct index\n\t\tself.image.fill((255, 255, 255))\n\t\tpygame.draw.lines(\n\t\t\tself.image,\n\t\t\t(0, 0, 0),\t\t# black\n\t\t\tTrue,\t\t\t# connects last to first point\n\t\t\t[(0, 0), (self.size, 0), (self.size, self.size), (0, self.size)],\n\t\t\tself.size//20)\t\t\t# line width\n\t\tfor pip in NUMBERS[number]:\n\t\t\tpygame.draw.circle(\n\t\t\t\tself.image,\n\t\t\t\t(0, 0, 0),\n\t\t\t\t(self.size*pip).astype(int),\n\t\t\t\tself.size//10)\t\t\t\t\t# radius\n\t\t# Add the rect to the changed rects list (doesnt work on init)\n\t\tchanged_rects.append(self.rect)\n\n\t\treturn self.image",
"def draw(self):\n self.drawLine()\n\n for l in range(0, self.height):\n print(\"|\", end='', flush=True)\n for c in range(0, self.width):\n print(\" \" + str(self.grid[l][c]) + \" |\", end='', flush=True)\n print(\"\\n\", end='', flush=True)\n\n self.drawLine()",
"def draw_number(self):\n text_color = (0, 0, 0)\n if self.bombs_around == 1:\n text_color = (0, 0, 150)\n if self.bombs_around == 2:\n text_color = (0, 150, 0)\n if self.bombs_around == 3:\n text_color = (150, 0, 0)\n if self.bombs_around == 4:\n text_color = (133, 39, 138)\n if self.bombs_around == 5:\n text_color = (128, 0, 0)\n if self.bombs_around == 6:\n text_color = (175, 238, 238)\n if self.bombs_around == 7:\n text_color = (0, 0, 0)\n if self.bombs_around == 8:\n text_color = (33, 161, 166)\n\n font = pygame.font.Font(\"fonts/JetBrainsMono-Bold.ttf\", 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(\n str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))",
"def draw_n(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(135)\r\n pen.forward(1.414*40)\r\n pen.left(135)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.back(40)\r\n pen.forward(50)",
"def draw_board(board_state):\n print(\" {} | {} | {} \".format(board_state[6], board_state[7], board_state[8]))\n print(\"-----------\")\n print(\" {} | {} | {} \".format(board_state[3], board_state[4], board_state[5]))\n print(\"-----------\")\n print(\" {} | {} | {} \".format(board_state[0], board_state[1], board_state[2]))",
"def draw_board(self):\n for i in range(0, 800, 80):\n if i == 80:\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, (0, 0, 128), (0, i), (720, i), width=5)\n continue\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, 'black', (0, i), (720, i), width=3)\n for j in range(240, 800, 240):\n pygame.draw.line(self.screen, (0, 0, 128), (j, 80), (j, 800), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, j + 80), (720, j + 80), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, 80), (0, 800), width=5)",
"def draw_board(self):\n pygame.draw.rect(background, BLACK, self.outline, 3)\n # Outline is inflated here for future use as a collidebox for the mouse\n self.outline.inflate_ip(20, 20)\n for i in range(self.size-1):\n for j in range(self.size-1):\n rect = pygame.Rect(5+GRID_SIZE+(GRID_SIZE*i), 5+GRID_SIZE+(GRID_SIZE*j), GRID_SIZE, GRID_SIZE)\n pygame.draw.rect(background, COLOR[BLACK], rect, 1)\n if self.size >= 13:\n for i in range(3):\n for j in range(3):\n coords = (5+4*GRID_SIZE+(GRID_SIZE*6*i), 5+4*GRID_SIZE+(GRID_SIZE*6*j))\n pygame.draw.circle(background, COLOR[BLACK], coords, 5, 0)\n screen.blit(background, (0, 0))\n pygame.display.update()",
"def printBoard(self):",
"def draw(self, win):\n for y in range(len(self.board)):\n for x, color in enumerate(self.board[y]):\n pygame.draw.rect(win, color, (self.x+x*self.cell_size, self.y+y*self.cell_size,\n self.cell_size, self.cell_size), 0)\n\n pygame.draw.rect(win, (0, 0, 0), (self.x, self.y, self.width, self.height), BORDER_THICKNESS)",
"def _draw_score(self) -> None:\n score_digits = [int(x) for x in list(str(self.game.score))]\n total_width = 0 # total width of all numbers to be printed\n\n for digit in score_digits:\n total_width += self.images['numbers'][digit].get_width()\n\n x_offset = (self._screen_width - total_width) / 2\n\n for digit in score_digits:\n self.surface.blit(self.images['numbers'][digit],\n (x_offset, self._screen_height * 0.1))\n x_offset += self.images['numbers'][digit].get_width()",
"def drawGrid(self):\n for div in range(NBCELL):\n sec = SSIZE*div\n self.can.create_line(0, sec, GSIZE, sec, width=3, fill=GFILL)\n self.can.create_line(sec, 0, sec, GSIZE, width=3, fill=GFILL)",
"def draw():\r\n\r\n print('\\n+---+---+---+')\r\n for i in range(9):\r\n print('| ' + board[i] + ' ', end='')\r\n if (i + 1) % 3 == 0:\r\n print('|\\n+---+---+---+')",
"def draw( self ):\n\n if self.__drawnGrid == 0:\n draw_grid().draw()\n\n self.__drawnGrid = 1\n\n column = 0\n row = 0\n i = 0\n for mark in self.__grid:\n if row == 0:\n turtle.goto(-60+60*column, 60)\n elif row == 1:\n turtle.goto(-60+60*column, 0)\n elif row == 2:\n turtle.goto(-60+60*column, -60)\n\n if isinstance(mark, str):\n if mark.lower() == 'x': \n drawX(i)\n elif mark.lower() == 'o':\n drawO(i)\n\n column += 1\n\n if column == 3:\n column = 0\n row += 1\n\n i+=1\n\n turtle.goto(-60, 60)",
"def drawBoard():\t\n\t#draw 64 Rectangles from (MARGINH,MARGINV) with CASESIZE sizes\n\tfor i in range(BOARDSIZE):\n\t\tfor j in range(BOARDSIZE):\n\t\t\tpygame.draw.rect(DISPLAYSURF, BLACK, [MARGINH + (i)*CASESIZE, MARGINV + (j)*CASESIZE, CASESIZE, CASESIZE], 1)",
"def display(self):\n width = self.width\n height = self.height\n x = self.x\n y = self.y\n for d_y in range(y):\n print()\n for h in range(height):\n if x != 0:\n print(\" \" * x, end=\"\")\n print(\"#\" * width)",
"def DrawGrid(self, count):\n for i in range(0, self.width, self.incr):\n self.canvas.create_line(i, 100, i, 700, fill = \"#696969\", width = 1)\n for i in range(100, 800, 100):\n self.canvas.create_line(0, i, self.width, i, fill = \"#696969\", width = 1)\n self.canvas.create_rectangle(self.incr * 4, self.height - self.incr * 3.5,\n self.width - self.incr * 4, self.height, fill = \"black\", width = 3)\n for i in range(int(self.height - self.incr * 3.5), self.height, int(self.incr / 4)):\n self.canvas.create_line(self.incr * 4, i, self.width - self.incr * 4,\n i, fill = \"#696969\", width = 1)\n for i in range(self.incr * 4, self.width - self.incr * 4 + 1, int(self.incr / 4)):\n self.canvas.create_line(i, self.height - self.incr * 3.5, i, self.height,\n fill = \"#696969\", width = 1)",
"def draw():",
"def draw_grid(self) -> None:\n grid = self.life.curr_generation\n for row in range(self.cell_height):\n for column in range(self.cell_width):\n if grid[row][column] == 1:\n color = \"green\"\n else:\n color = \"white\"\n pygame.draw.rect(\n self.screen,\n pygame.Color(color),\n (column * self.cell_size, row * self.cell_size, self.cell_size, self.cell_size),\n )",
"def draw_grid(self):\n\n screen.fill(GREY)\n\n for row in self.grid:\n for cell in row:\n if cell.root:\n color = GREEN\n elif cell.goal:\n color = RED\n elif cell.value:\n color = DARK_BLUE\n elif cell.visited:\n color = LIGHT_BLUE\n elif cell.f:\n color = LIGHT_GREEN\n elif cell.wall:\n color = GRAY\n else:\n color = WHITE\n\n pygame.draw.rect(screen, color, cell.rect)\n\n x, y = cell.rect.x, cell.rect.y\n\n if cell.g:\n self.draw_score(x + 2, y + 2, cell.g)\n if cell.h:\n self.draw_score(x + 18, y + 2, cell.h)\n if cell.f:\n self.draw_score(x + 2, y + self.cell_size - 10, cell.f)",
"def _render_board(self):\n for index, row in enumerate(self._board):\n print(index, end=' ') if index < 10 else print(index, end=' ')\n list(map(lambda x: print(x, end=' '), row))\n print()\n print(' ', end='')\n for i in range(len(self._board)):\n print(i, end=' ') if i < 10 else print(i, end=' ')\n print()"
] | [
"0.7678089",
"0.7436017",
"0.7178829",
"0.7080666",
"0.70133114",
"0.6922454",
"0.6885909",
"0.6860841",
"0.6852656",
"0.68270683",
"0.68268335",
"0.6806605",
"0.67897975",
"0.6787238",
"0.6716217",
"0.6704868",
"0.6694162",
"0.66936094",
"0.66898865",
"0.6687331",
"0.66805345",
"0.6654767",
"0.664507",
"0.66170055",
"0.6610125",
"0.6600602",
"0.6598964",
"0.65830696",
"0.6567196",
"0.656463"
] | 0.8320049 | 0 |
This Will Draw All The Buttons Onto The Screen | def draw_buttons(self):
for button in self.playing_buttons:
button.draw(self.screen) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_buttons(self):\n for button in self.buttons:\n x = button.starting_x\n y = button.starting_y\n self.screen.fill(button.color, ((x, y), (button.width, button.height)))",
"def drawButtons(self):\n self.__pausedTitle.draw(self.__screen)\n self.__exitGameButton.draw(self.__screen)\n self.__resumeButton.draw(self.__screen)\n self.__mainMenuButton.draw(self.__screen)",
"def draw_buttons(screen):\r\n for button in start_buttons: # goes through every start button\r\n if button.active:\r\n button.draw(screen) # shows the button\r\n for button in end_buttons: # goes through every start button\r\n if button.active:\r\n button.draw(screen) # shows the button\r\n for onewriting in button_writings_start: # goes through every start writing\r\n if onewriting.active:\r\n onewriting.draw(screen, True) # shows the writing\r\n for onewriting in button_writings_end: # goes through every in game writing\r\n if onewriting.active:\r\n onewriting.draw(screen, True) # shows the writing\r",
"def draw(self, surface, offset=(0,0)):\n for button in self.buttons:\n button.draw(surface, offset)",
"def draw_request_buttons(screen):\r\n for button in request_buttons: # goes through every request button\r\n button.draw(screen) # shows that button\r\n for onewriting in button_writings_request: # goes through every request button's writing\r\n onewriting.draw(screen, True) # shows that writing\r",
"def sprint(self):\n self.buttons = []\n self.screen.blit(self.background_image, (0, 0))\n self.create_button((self.width // 2 - 257, self.height // 8 - 85), 501, 200, Colors.BLACK, \"20L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 3 - 81), 501, 200, Colors.BLACK, \"40L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 5 - 86), 501, 200, Colors.BLACK, \"100L\")\n self.create_button((self.width // 2 - 257, self.height // 8 * 7 - 85), 501, 200, Colors.BLACK, \"1000L\")\n self.show_buttons()\n self.show_text_in_buttons()\n pygame.display.flip()",
"def draw_buttons(self, keys='000000'):\n \n xpos = None\n ypos = 500\n button_width = 60\n button_height = 80\n offset = 7\n\n x_divisor = self.SCREEN_WIDTH / 6\n x_scalar = 0.8\n x_buffer = (1.0 - x_scalar) * self.SCREEN_WIDTH * 0.5\n x_middle = self.SCREEN_WIDTH * 0.07\n\n key_order = [3, 4, 5, 2, 1, 0]\n \n for i in range(len(keys)):\n if keys[key_order[i]] == '1':\n color = self.display_states[self.display_names[self.current_display_state]]['text']\n elif keys[key_order[i]] == '0':\n color = self.display_states[self.display_names[self.current_display_state]]['background']\n \n if i > 2:\n xpos = (i* x_divisor * x_scalar) + x_buffer + x_middle\n else:\n xpos = (i* x_divisor * x_scalar) + x_buffer\n\n position = (xpos, ypos, button_width, button_height)\n position_small = (xpos + offset, ypos + offset, button_width - (2 * offset), button_height - (2 * offset))\n\n self.draw_single_button(self.display_states[self.display_names[self.current_display_state]]['text'], position)\n self.draw_single_button(color, position_small)",
"def draw_button(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.msg_img, self.msg_img_rect)",
"def draw_button(self):\n # Draw the button's outline\n pg.draw.rect(self.screen, self.text_color, pg.Rect(self.rect.left - 1, self.rect.top - 1, self.rect.width + 2, self.rect.height + 2))\n\n # Draw the button\n pg.draw.rect(self.screen, self.button_color, self.rect)\n\n # Blit the button's text onto it\n self.screen.blit(self.txt_surface, self.txt_surface_rect)",
"def draw(self, screen: pygame.Surface) -> None:\n page = self.pages[self.current_page]\n # Draw background\n screen.blit(page.background, (0, 0))\n # Draw buttons to screen\n for button in page.buttons:\n if button.image is not None:\n screen.blit(button.image, button.rect)\n screen.blit(button.text, button.rect)\n # Draw highlights if mouse is hovering over button\n if button.tag not in ('display', 'output') and \\\n button.rect.collidepoint(self.mouse_pos):\n surf = create_trans_surf(button.rect.width, button.rect.height, 50, (100, 255, 100))\n screen.blit(surf, button.rect)",
"def redrawButtons(self):\n for self.a in range(self.btnNumber): # btnNumber = maximum number of buttons\n self.btns[self.a].configure(text=self.btnList[self.a])\n self.btns[self.a].grid()\n self.keypad = 'KeyPad'\n self.title(self.keypad)",
"def create_buttons(self):\r\n # The buttons are created in the center of the screen then offset in the x/y directions by a number of button\r\n # widths. E.g. The \"-1, 0\" for the easy button means to shift the button one button width left of center.\r\n self.easy_button = Button(self.ai_game, \"Easy\", -1, 0)\r\n self.normal_button = Button(self.ai_game, \"Normal\", 0, 0)\r\n self.hard_button = Button(self.ai_game, \"Hard\", 1, 0)\r\n self.quit_button = Button(self.ai_game, \"Quit\", 0, 1)\r\n self.buttons = (self.easy_button, self.normal_button,\r\n self.hard_button, self.quit_button)",
"def draw_menu(self, menu):\n for but in menu.get_buttons():\n self.win.blit(but.image, but.rect)",
"def show_text_in_buttons(self):\n for button in self.buttons:\n self.screen.blit(button.rendered_text, button.get_text_position())",
"def draw_button(self):\r\n self.surface.fill(self.button_color, self.rect)\r\n self.surface.blit(self.msg_image, self.msg_image_rect)",
"def draw_placement_buttons(screen):\r\n for button in placement_buttons: # goes through every request button\r\n button.draw(screen) # shows that button\r\n for onewriting in placement_writings: # goes through every request button's writing\r\n onewriting.draw(screen, True) # shows that writing\r",
"def DrawButtons(self, dc, _rect, bmp, bkcolour, button_state):\r\n\r\n rect = wx.Rect(*_rect)\r\n\r\n if button_state == AUI_BUTTON_STATE_PRESSED:\r\n rect.x += 1\r\n rect.y += 1\r\n\r\n if button_state in [AUI_BUTTON_STATE_HOVER, AUI_BUTTON_STATE_PRESSED]:\r\n dc.SetBrush(wx.Brush(StepColour(bkcolour, 120)))\r\n dc.SetPen(wx.Pen(StepColour(bkcolour, 75)))\r\n\r\n # draw the background behind the button\r\n dc.DrawRectangle(rect.x, rect.y, 15, 15)\r\n\r\n # draw the button itself\r\n dc.DrawBitmap(bmp, rect.x, rect.y, True)",
"def draw_buttons(self): \n self.button_frame = Frame(self)\n\n # -- getting images\n prev_image = PhotoImage(file=self.directory + '/images/previous.png')\n prev_image = prev_image.subsample(10, 10) \n\n next_image = PhotoImage(file=self.directory + '/images/next.png')\n next_image = next_image.subsample(10, 10) \n \n # -- adding image to label\n prev_label = ttk.Label(self.button_frame, image = prev_image)\n next_label = ttk.Label(self.button_frame, image = next_image)\n\n prev_label.image = prev_image\n next_label.image = next_image\n\n # -- adding a twitter hide button\n self.twitter_hide = ttk.Button(self.button_frame, text='hide twitter')\n \n # -- adding the buttons to the frame \n prev_label.pack(side=RIGHT, padx=75) \n self.twitter_hide.pack(side=RIGHT, padx=200) \n next_label.pack(side=LEFT, padx=75)\n\n # -- adding bindings and commands\n prev_label.bind('<Button-1>', self.prev_article)\n next_label.bind('<Button-1>', self.next_article) \n self.twitter_hide.config(command=self.hide_twitter) \n\n # -- adding frame to canvas\n self.button_frame.pack(side=BOTTOM, fill=X)",
"def paintButtons(self):\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_OK)\n buttonOK = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"ok\"], self.showTooltip, self.removeTooltip)\n buttonOK.topleft = [770, 30]\n buttonOK.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.changeConfiguration)\n self.window.add_child(buttonOK)\n\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_CANCEL)\n buttonCancel = guiobjects.OcempImageButtonTransparent(imgPath, self.buttonTooltips[\"cancel\"], self.showTooltip, self.removeTooltip)\n buttonCancel.topleft = [890, 30]\n buttonCancel.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeConfiguration)\n self.window.add_child(buttonCancel)",
"def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()",
"def draw_but(self, window):\n # draws the rectangular button\n p1 = graphics.Point(self.cen_point_x - self.width / 2, \n self.cen_point_y - self.height / 2)\n p2 = graphics.Point(self.cen_point_x + self.width / 2, \n self.cen_point_y + self.height / 2)\n self.button = graphics.Rectangle(p1, p2)\n self.button.setOutline(\"Orange\")\n self.button.draw(window)\n \n # draws the text on the button\n self.text.draw(window)",
"def draw(self):\n # static\n surf = self.surf.copy()\n\n # dynamic\n pos = (10+int((self.val-self.mini)/(self.maxi-self.mini)*130), 40)\n self.button_rect = self.button_surf.get_rect(center=pos)\n surf.blit(self.button_surf, self.button_rect)\n # move of button box to correct screen position\n self.button_rect.move_ip(self.xpos, self.ypos)\n\n # screen\n screen.blit(surf, (self.xpos, self.ypos))",
"def draw_menu_buttons():\n global menu_leds\n \n def draw_menu_led(x, y, color):\n \"\"\"Draws menu lights to launchpad and stores them in menu_leds\n \n :param x: x-coord of led\n :param y: y-coord of led\n :param color: color of led\n :return: None\n \"\"\"\n lp.led_ctrl_xy(x, y, *color)\n if y != 0:\n menu_leds[7 + y] = color\n else:\n menu_leds[x] = color\n \n # Clear all circular buttons\n for i in range(8):\n draw_menu_led(i, 0, (0, 0))\n draw_menu_led(8, i + 1, (0, 0))\n \n # Color all menu buttons\n draw_menu_led(*BUT_LEFT, (3, 3)) # Select buttons yellow\n draw_menu_led(*BUT_RIGHT, (3, 3))\n draw_menu_led(*BUT_START, (0, 3)) # Start button green\n draw_menu_led(*BUT_QUIT, (3, 0)) # Quit button red",
"def activate_buts(self):\n\n for b in self.gamebuttons:\n b.activate()",
"def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)",
"def __place_main_buttons(self):\n\n #load the images and the locations\n base_x = self.__main_buttons_coords[\"x\"]\n base_y = self.__main_buttons_coords[\"y\"]\n self.__exit_btn_img = ImageTk.PhotoImage(PIL.Image.open(r\"Images server\\exit btn.png\"))\n self.__kick_all_passengers_img = ImageTk.PhotoImage(PIL.Image.open(r\"Images server\\kick people btn.png\"))\n self.__kick_all_buses_btn_img = ImageTk.PhotoImage(PIL.Image.open(r\"Images server\\kick buses btn.png\"))\n #create the button objects and set their settings to match the desgin\n self.__kick_buses_btn = Button(self.__main_window, image=self.__kick_all_buses_btn_img, command=lambda: self.__bus_controller.kick_all_buses(reason=\"kicked all buses by the console\"), borderwidth=0, background = \"#000000\", activebackground = \"#083417\")\n self.__kick__all_passengers_btn = Button(self.__main_window, image=self.__kick_all_passengers_img,command = lambda:self.__telegram_controller.kick_all_passengers(\"kicked all users from console\"), borderwidth=0, background = \"#000000\", activebackground = \"#083417\")\n self.__exit_button = Button(self.__main_window, command=self.__stop, image=self.__exit_btn_img, borderwidth=0, background = \"#000000\", activebackground = \"#B91D1D\")\n #place the buttons on the screen\n self.__kick__all_passengers_btn.place(x=base_x, y=base_y)\n self.__exit_button.place(x=base_x+210, y=base_y+133)\n self.__kick_buses_btn.place(x=base_x + 210, y=base_y)",
"def create_buttons(grid_display, text_color, outline_color, screen_width, screen_height):\n\n all_button = []\n\n # create font used inside the buttons\n button_font = pygame.font.SysFont(\"monospace\", screen_width / 20)\n\n # help button\n temp_font = button_font.render(\"test\", 1, text_color)\n center_text = temp_font.get_rect()\n center_text.centery = screen_height * .75\n center_text.width = screen_width * .5\n center_text.centerx = screen_width * .5\n help_button = Button(center_text.copy(), \"Help\", text_color, outline_color, button_font)\n all_button.append(help_button)\n\n # 1 player button\n center_text.left = screen_width * .27\n center_text.width = screen_width * .12\n center_text.top = screen_height * .5\n center_text.height = screen_height * .1\n one_button = Button(center_text.copy(), \"1\", text_color, outline_color, button_font)\n all_button.append(one_button)\n\n # 2 player button\n center_text.left = screen_width * .44\n two_button = Button(center_text.copy(), \"2\", text_color, outline_color, button_font)\n all_button.append(two_button)\n\n # 3 player button\n center_text.left = screen_width * .6\n three_button = Button(center_text.copy(), \"3\", text_color, outline_color, button_font)\n all_button.append(three_button)\n\n # back button\n center_text.width = screen_width * .25\n center_text.centerx = grid_display.get_rect().centerx\n center_text.centery = screen_height * .8\n back_button = Button(center_text.copy(), \"Back\", text_color, outline_color, button_font)\n all_button.append(back_button)\n\n # continue button\n center_text.centery = screen_height * .62\n center_text.centerx = screen_width * .5\n pause_button = Button(center_text.copy(), \"Continue\", text_color, outline_color, button_font)\n all_button.append(pause_button)\n\n # reset button\n center_text.centery = screen_height * .75\n center_text.centerx = screen_width * .5\n reset_button = Button(center_text.copy(), \"Reset\", text_color, outline_color, button_font)\n all_button.append(reset_button)\n\n # return to menu button\n center_text.centery = screen_height * .88\n center_text.centerx = screen_width * .5\n back_to_menu_button = Button(center_text.copy(), \"Menu\", text_color, outline_color, button_font)\n all_button.append(back_to_menu_button)\n\n return all_button",
"def draw(self, screen):",
"def generate_buttons(self):\n raise Exception('Implement me!')",
"def draw(screen):\n MY.restart_button.draw(screen)\n MY.display_text.draw(screen)"
] | [
"0.85175043",
"0.8169243",
"0.8007272",
"0.7607241",
"0.7395919",
"0.73840696",
"0.7270972",
"0.7066256",
"0.69378734",
"0.6934372",
"0.69018245",
"0.68842477",
"0.6872431",
"0.6840603",
"0.6824653",
"0.67917746",
"0.6725772",
"0.6670006",
"0.6659281",
"0.66530484",
"0.6620136",
"0.65550596",
"0.6544652",
"0.6542965",
"0.65150404",
"0.6503239",
"0.64989835",
"0.6479144",
"0.64764404",
"0.6458003"
] | 0.8405718 | 1 |
It Will Shade All The Locked Cells | def shade_locked_cells(self):
for i in range(9):
for j in range(9):
if self.grid_check[i][j] != 0:
self.color_cell(pos=(i, j), color=LOCKED_CELL) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lock(cell):\n m = cell['metadata']\n m[\"editable\"] = False\n m[\"deletable\"] = False",
"def f_unlock(self):\n self._locked = False",
"def erase(self):\n\tself.state={}\n\tself.display(update_board=0)",
"def unlockMeshes():\n setLockOnMeshes(0)",
"def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()",
"def clear_rows(self):\n ### Previous version had a bug, in that it assumed the set of ###\n ### indices of full rows had to be a contiguous sequence! ###\n full_rows = [j for j in range(ROWS) if all(\n (i, j) in self.locked_squares for i in range(COLS))]\n if not full_rows: return\n ### Calculate how for to drop each other row, and do it ###\n drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}\n self.locked_squares = {(i, j+drop[j]): color for (i, j), color in\n self.locked_squares.items() if j not in full_rows}\n ### Now just update score, etc. ###\n d = len(full_rows)\n self.increment_lines(d)\n self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])\n if self.level < self.lines // 10 + 1:\n self.increment_level()",
"def mark_safe(self, cell):\n \n if cell in self.cells:\n self.cells.discard(cell)",
"def initialize(self):\r\n for cell in self.free_cell_list:\r\n cell.unlock()\r\n self.add_cell(cell)\r\n self.free_cell_list.clear()",
"def unlock(*args):",
"def set_cell_to_hole(self):\n self.tick = \"H\"\n self.is_hole = True\n self.is_active = False",
"def lock_table(self):\n\n self.status = 'Locked'",
"def mark_safe(self, cell):\n #if cell in self.cells, else do nothing\n if cell in self.cells:\n #remove the cell since known\n self.cells.discard(cell)",
"def mark_safe(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)",
"def disable_cells(self):\n for index in range(0, 9):\n cell = getattr(self, 'cell_' + str(index))\n cell.config(state=DISABLED)",
"def toggle_fore_mod(self, checked):\n for tile in self.tiles:\n tile.toggle_fore_mod(checked)",
"def reset_board(self):\n cell_list = self.get_cells()\n for current_cell in cell_list:\n current_cell.set_cell_state(0) # remove player ownership of cell",
"def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)",
"def lockMeshes():\n setLockOnMeshes(2)",
"def mark_safe(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)\n #raise NotImplementedError",
"def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()",
"def lock_curr_piece(self):\n for pos in self.curr_piece.occupying():\n self.locked_squares[pos] = self.curr_piece.color\n self.clear_rows()\n self.curr_piece = self.next_piece_display.get_piece()\n if any(pos in self.locked_squares\n for pos in self.curr_piece.occupying()):\n self.game_over()",
"def update_invalid(self):\n self.invalid = []\n for i in range(9):\n for j in range(9):\n if not self.check_if_locked((i, j)) and not self.check_entered((i, j)) and self.grid[i][j] != 0:\n self.invalid.append((i, j))",
"def unlock(self):\n raise NotImplementedError",
"def reset(self) :\n for i in range(len(self.playerCellList)) :\n for j in range(len(self.playerCellList[i])) :\n self.playerCellList[i][j].hasPawn = False",
"def unlock_all(self):\n for store in self.stores.values():\n if store.locked:\n self.log.debug(\"%s: Unlocking storage...\" % store)\n store.unlock()",
"def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()",
"def reset(self):\r\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \r\n for dummy_row in range(self._grid_height) ]\r\n \r\n \r\n self.new_tile()\r\n self.new_tile()",
"def renewMasking(self, indices, colours_dict):\n for idx in self.abundance_df.index:\n if idx in indices:\n self.abundance_df.loc[idx, 'masked'] = False\n else:\n self.abundance_df.loc[idx, 'masked'] = True\n if idx in colours_dict:\n self.abundance_df.loc[idx, 'colour'] = colours_dict[idx]\n else: \n self.abundance_df.loc[idx, 'colour'] = 'undefined'",
"def unlock(self):\n return _image.image_unlock(self)",
"def clear_blockages(self):\n debug.info(3,\"Clearing all blockages\")\n self.rg.clear_blockages()"
] | [
"0.6175055",
"0.57997423",
"0.5639808",
"0.5596297",
"0.5584451",
"0.5535728",
"0.5517984",
"0.5450767",
"0.5386599",
"0.53715956",
"0.5323088",
"0.5304359",
"0.5285234",
"0.52705973",
"0.52583265",
"0.5231134",
"0.5224701",
"0.5221622",
"0.5218678",
"0.52166355",
"0.5212057",
"0.5196187",
"0.5162183",
"0.51619816",
"0.5128727",
"0.50982916",
"0.5086187",
"0.5082",
"0.50785637",
"0.50594157"
] | 0.7075585 | 0 |
It Will Color The Invalid Entry | def color_invalid(self):
for i in self.invalid:
self.color_cell(i, INVALID) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_invalid():\n print(colored('Invalid input\\n', 'red', attrs=['bold']))",
"def _update_color(self, txt):\n color = self.valid_color\n if not self.hasAcceptableInput():\n color = self.invalid_color\n self.setStyleSheet(\"background-color: %s\" % color)",
"def _confirm_color(self, event = None):\n color = self._entry.get().strip()\n if color != \"\":\n self._color = color\n self._window.destroy()",
"def validate_color(self, field):\n if match(r'^[A-Fa-f0-9]{0,6}$', field.data):\n field.data = field.data.lower()\n else:\n raise ValidationError('Field is not a valid hexadecimal color code.')",
"def validateInput(self):\n palette = QPalette()\n validInput = self.sender().hasAcceptableInput()\n if validInput:\n palette.setColor(QPalette.Text, Qt.black)\n else:\n palette.setColor(QPalette.Text, Qt.blue)\n self.sender().setPalette(palette)\n self.hasValidInput.emit(validInput)",
"def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ):\n print \"\"\n print \"Error: are the passed in colors valid?\"\n print \" - passed in background-color '\" + enteredBGColor + \"' was converted to '\" + convertedBGColor + \"'.\"\n print \" - passed in foreground-color '\" + enteredFGColor + \"' was converted to '\" + convertedFGColor + \"'.\"\n print \"\"",
"def throwColorError(type, r,g,b):\n\t\tif not (r >= 0): \n\t\t\tError.wrong_color_number(type, r)\n\t\telif not (g >= 0):\n\t\t\tError.wrong_color_number(type, g)\n\t\telse:\n\t\t\tError.wrong_color_number(type, b)",
"def __quickSearchSetEditColors(self, error):\n if error:\n palette = self.quickFindtextCombo.lineEdit().palette()\n palette.setColor(QPalette.Base, QColor(\"red\"))\n palette.setColor(QPalette.Text, QColor(\"white\"))\n self.quickFindtextCombo.lineEdit().setPalette(palette)\n else:\n palette = self.quickFindtextCombo.lineEdit().palette()\n palette.setColor(\n QPalette.Base,\n self.quickFindtextCombo.palette().color(QPalette.Base))\n palette.setColor(\n QPalette.Text,\n self.quickFindtextCombo.palette().color(QPalette.Text))\n self.quickFindtextCombo.lineEdit().setPalette(palette)",
"def clear_entry(event):\n\tbackground = event.widget.config()['background'][-1];\n\tif(background==ERROR_COLOR):\n\t\tevent.widget.delete(0, \"end\")\n\t\tevent.widget.config(bg=WHITE)\n\tfeedback.config(text=\"\", fg=ERROR_COLOR);",
"def error(text):\n return color_str(text, 'RED')",
"def test_color__int_arg_invalid(self):\n with self.assertRaises(ValueError):\n color = pygame.Color(0x1FFFFFFFF)",
"def check_state(self, *args, **kwargs):\n\n # TODO: Implement from\n # http://stackoverflow.com/questions/27159575/pyside-modifying-widget-colour-at-runtime-without-overwriting-stylesheet\n\n sender = self.sender()\n validator = sender.validator()\n state = validator.validate(sender.text(), 0)[0]\n if state == QtGui.QValidator.Acceptable:\n color = 'none' # normal background color\n elif state == QtGui.QValidator.Intermediate:\n color = '#fff79a' # yellow\n else:\n color = '#f6989d' # red\n sender.setStyleSheet('QLineEdit { background-color: %s }' % color)",
"def clean_colors(self):\n err = _(\"Color must be a valid hex triplet.\")\n colors = ['background_color_custom', 'font_color_custom']\n colors2 = colors + ['background_color', 'font_color']\n # If there are custom colors specified in settings, length of\n # self.COLORS will be > 6, so check for validity\n if len(self.COLORS) > 6:\n colors = colors2\n for color in colors:\n c = getattr(self, color)\n l = len(c)\n if l:\n if l != 6:\n raise ValidationError(err)\n else:\n try:\n int(c, 16)\n except ValueError:\n raise ValidationError(err)",
"def on_lineEdit_temperature_textChanged(self, p0):\n try :\n float(self.lineEdit_temperature.text())\n self.lineEdit_temperature.setStyleSheet(\"background-color: white;\")\n \n except ValueError: \n self.lineEdit_temperature.setStyleSheet(\"background-color: red;\")",
"def color_negative_red(val):\n if val == 'k':\n color = 'red' \n else:\n color = 'yellow'\n return ['color: %s' % color]*3",
"def style_error(msg='{}'):\n red_code = '\\033[0;31m'\n return text_color(msg, red_code)",
"def updateColors(self):\n self.negativeColor = (int(self.negativeRedTextField.get(\"1.0\", tk.END)),\n int(self.negativeGreenTextField.get(\"1.0\", tk.END)),\n int(self.negativeBlueTextField.get(\"1.0\", tk.END)))\n self.positiveColor = (int(self.positiveRedTextField.get(\"1.0\", tk.END)),\n int(self.positiveGreenTextField.get(\"1.0\", tk.END)),\n int(self.positiveBlueTextField.get(\"1.0\", tk.END)))\n # Update the positive and negative labels\n self.negativeLabel.config(background=self.negativeColorHex())\n self.positiveLabel.config(background=self.positiveColorHex())\n\n print(f\"Negative: {self.negativeColor}\")\n print(f\"Positive: {self.positiveColor}\")",
"def color_domain_record_cells(val):\n if isinstance(val, int):\n color = \"yellow\" if val < 3 else None\n elif isinstance(val, float):\n color = \"yellow\" if val > 4.30891 or val < 2.72120 else None\n else:\n color = None\n return f\"background-color: {color}\"",
"def test_color__sequence_arg_invalid_value_without_alpha(self):\n cls = pygame.Color\n for seq_type in (tuple, list):\n self.assertRaises(ValueError, cls, seq_type((256, 90, 80)))\n self.assertRaises(ValueError, cls, seq_type((100, 256, 80)))\n self.assertRaises(ValueError, cls, seq_type((100, 90, 256)))",
"def _color(self, args):",
"def test_color__rgba_int_args_invalid_value_without_alpha(self):\n self.assertRaises(ValueError, pygame.Color, 256, 10, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 256, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 256)",
"def test_manual_legend_uneven_colors():\n with pytest.raises(YellowbrickValueError, match=\"same number of colors as labels\"):\n manual_legend(None, (\"a\", \"b\", \"c\"), (\"r\", \"g\"))",
"def invalid(self):\n pass",
"def test_weirdColorFormatting(self):\n self.assertAssembledEqually(\"\\x031kinda valid\", A.fg.black[\"kinda valid\"])\n self.assertAssembledEqually(\n \"\\x03999,999kinda valid\", A.fg.green[\"9,999kinda valid\"]\n )\n self.assertAssembledEqually(\n \"\\x031,2kinda valid\", A.fg.black[A.bg.blue[\"kinda valid\"]]\n )\n self.assertAssembledEqually(\n \"\\x031,999kinda valid\", A.fg.black[A.bg.green[\"9kinda valid\"]]\n )\n self.assertAssembledEqually(\n \"\\x031,242 is a special number\",\n A.fg.black[A.bg.yellow[\"2 is a special number\"]],\n )\n self.assertAssembledEqually(\"\\x03,02oops\\x03\", A.normal[\",02oops\"])\n self.assertAssembledEqually(\"\\x03wrong\", A.normal[\"wrong\"])\n self.assertAssembledEqually(\"\\x031,hello\", A.fg.black[\"hello\"])\n self.assertAssembledEqually(\"\\x03\\x03\", A.normal)",
"def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))",
"def test_colors_fail_uncalibrated(self):\n command = ('{0} -b 100 -e {1} {2} {2} {3}').format(\n os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_ev' + HEN_FILE_EXTENSION),\n 3, 5, 10)\n with pytest.raises(ValueError) as excinfo:\n hen.colors.main(command.split())\n\n assert \"No energy information is present \" in str(excinfo.value)",
"def _onEdit(self, event):\n index = self.colorlist.GetSelection()\n icol = self._indexTupleToColor(index)\n icd = wx.ColourData()\n icd.SetColour(icol)\n dialog = wx.ColourDialog(self, icd)\n\n if dialog.ShowModal() == wx.ID_OK:\n tup = _colorDataToTuple(dialog.GetColourData())\n self.graphColors[index] = tup\n self._tupleListToStrings()\n self._updateButtons(None)",
"def test_color__rgba_int_args_invalid_value(self):\n self.assertRaises(ValueError, pygame.Color, 257, 10, 105, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 257, 105, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 257, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 44, 257)",
"def highlight_unallocated(series):\n one_person_req = (series > 0) & (series <= 0.5)\n two_person_req = series > 0.5\n is_good = series == 0\n\n style = []\n for i in range(len(series)):\n if two_person_req[i]:\n style.append(\"background-color: red\")\n elif one_person_req[i]:\n style.append(\"background-color: orange\")\n elif is_good[i]:\n style.append(\"background-color: lime\")\n else:\n style.append(\"background-color: yellow\")\n\n return style",
"def callback(self, name, mode, index):\n raw, status = self.validate(self._variable.get())\n bg_color_indicator(self, status)"
] | [
"0.6991874",
"0.67031866",
"0.6483819",
"0.64674556",
"0.6457547",
"0.6432279",
"0.61969495",
"0.6153183",
"0.60659254",
"0.6022004",
"0.5955116",
"0.5907345",
"0.5892128",
"0.5884154",
"0.58290666",
"0.5822534",
"0.57697856",
"0.5750057",
"0.57197183",
"0.57189155",
"0.5716524",
"0.5704538",
"0.5703558",
"0.56771684",
"0.56725824",
"0.56670135",
"0.5654898",
"0.56533235",
"0.5650891",
"0.564137"
] | 0.7534199 | 0 |
This Will Update The Button Hover Status | def update_button_hover_status(self):
for button in self.playing_buttons:
button.update(self.mousePos) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mouse_hover(self):\n self.color1 = self.color # Color changes\n position = pygame.mouse.get_pos() # Get mouse position\n if self.rect.collidepoint(position): # If the mouse is inside the button rect\n self.color1 = LIGHT_GREEN # Change color to light green",
"def update(self):\n self.mousePos = pygame.mouse.get_pos()\n self.update_button_hover_status()",
"def on_hover(self) -> None:",
"def get_hovering(self, button_list=None):\n\t\tif button_list:\n\t\t\tbtn_list = button_list\n\t\telse:\n\t\t\tbtn_list = self.button_list\n\t\tfor button in btn_list:\n\t\t\tif button.rect.collidepoint(self.mouse_pos):\n\t\t\t\tbutton.hover = True\n\n\t\t\tif button.hover:\n\t\t\t\tif not button.rect.collidepoint(self.mouse_pos):\n\t\t\t\t\tbutton.hover = False",
"def check_button_hover(self, mouse_pos):\n for button in self.buttons: # type: Button\n if button.is_position_on_button(mouse_pos):\n button.hover()\n else:\n button.un_hover()",
"def update_reset_button(self):\r\n if self.board.hovered_tiles and self.is_left_mouse_down:\r\n self.reset_button.draw_uhoh()\r\n else:\r\n self.reset_button.draw_smiley()",
"def UpdateButtonOnScreen(self, button_ui_part, event):\r\n\r\n hit_test = self.HitTest(*event.GetPosition())\r\n\r\n if not hit_test or not button_ui_part:\r\n return\r\n \r\n state = AUI_BUTTON_STATE_NORMAL\r\n \r\n if hit_test == button_ui_part:\r\n if event.LeftDown():\r\n state = AUI_BUTTON_STATE_PRESSED\r\n else:\r\n state = AUI_BUTTON_STATE_HOVER\r\n else:\r\n if event.LeftDown():\r\n state = AUI_BUTTON_STATE_HOVER\r\n \r\n # now repaint the button with hover state\r\n cdc = wx.ClientDC(self._frame)\r\n\r\n # if the frame has a toolbar, the client area\r\n # origin will not be (0,0).\r\n pt = self._frame.GetClientAreaOrigin()\r\n if pt.x != 0 or pt.y != 0:\r\n cdc.SetDeviceOrigin(pt.x, pt.y)\r\n\r\n if hit_test.pane: \r\n self._art.DrawPaneButton(cdc, self._frame,\r\n button_ui_part.button.button_id,\r\n state,\r\n button_ui_part.rect, hit_test.pane)",
"def mouse_over(self):\n pass",
"def update(self):\n for (x, y) in self.board.fields:\n text = self.board.fields[x, y]\n self.buttons[x, y]['text'] = text\n self.buttons[x, y]['disabledforeground'] = 'black'\n if text == self.board.empty:\n self.buttons[x, y]['state'] = 'normal'\n else:\n self.buttons[x, y]['state'] = 'disabled'\n winning = self.board.won()\n if winning:\n for x, y in winning:\n self.buttons[x, y]['disabledforeground'] = 'red'\n for x, y in self.buttons:\n self.buttons[x, y]['state'] = 'disabled'\n for (x, y) in self.board.fields:\n self.buttons[x, y].update()",
"def check_button_hover(coord, play_button, high_scores_button):\r\n x = coord[0]\r\n y = coord[1]\r\n play_x = (play_button.rect.x <= x <= play_button.rect.x + play_button.width)\r\n play_y = (play_button.rect.y <= y <= play_button.rect.y + play_button.height)\r\n scores_x = (high_scores_button.rect.x <= x <= high_scores_button.rect.x + high_scores_button.width)\r\n scores_y = (high_scores_button.rect.y <= y <= high_scores_button.rect.y + high_scores_button.height)\r\n if play_x and play_y:\r\n play_button.text_color = (0, 255, 0)\r\n else:\r\n play_button.text_color = (255, 255, 255)\r\n\r\n play_button.prep_msg()\r\n play_button.draw_button()\r\n\r\n if scores_x and scores_y:\r\n high_scores_button.text_color = (0, 255, 0)\r\n else:\r\n high_scores_button.text_color = (255, 255, 255)\r\n\r\n high_scores_button.prep_msg()\r\n high_scores_button.draw_button()",
"def buttonStatusChange(self,**kwargs):\n # If the dictionary robot value is 'tb1' then change the button Style\n if kwargs['robot']=='tb1':\n if self.robot_TB1_Viewer.isChecked() is True:\n self.robot_TB1_Status.setStyleSheet(\"background: rgba(25, 27, 33, 0.2);\\n\"\n \"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n else:\n self.robot_TB1_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n # If the dictionary robot value is 'tb2' then change the button Style\n if kwargs['robot']=='tb2':\n if self.robot_TB2_Viewer.isChecked() is True:\n self.robot_TB2_Status.setStyleSheet(\"background: rgba(25, 27, 33, 0.2);\\n\"\n \"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n else:\n self.robot_TB2_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n # If the dictionary robot value is 'tb3' then change the button Style\n if kwargs['robot']=='tb3':\n if self.robot_TB3_Viewer.isChecked() is True:\n self.robot_TB3_Status.setStyleSheet(\"background: rgba(25, 27, 33, 0.2);\\n\"\n \"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n else:\n self.robot_TB3_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n # If the dictionary robot value is 'tb4' then change the button Style\n if kwargs['robot']=='tb4':\n if self.robot_TB4_Viewer.isChecked() is True:\n self.robot_TB4_Status.setStyleSheet(\"background: rgba(25, 27, 33, 0.2);\\n\"\n \"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")\n else:\n self.robot_TB4_Status.setStyleSheet(\"color: rgb(193, 69, 69);\\n\"\n \"font: 7pt \\\"Khmer OS\\\";\")",
"def SetHoverItem(self, pitem):\r\n\r\n former_hover = None\r\n\r\n for item in self._items:\r\n \r\n if item.state & AUI_BUTTON_STATE_HOVER:\r\n former_hover = item\r\n \r\n item.state &= ~AUI_BUTTON_STATE_HOVER\r\n\r\n if pitem:\r\n pitem.state |= AUI_BUTTON_STATE_HOVER\r\n \r\n if former_hover != pitem:\r\n self.Refresh(False)\r\n self.Update()",
"def on_unhover(self) -> None:",
"def OnMotion_Other(self, event):\r\n \r\n part = self.HitTest(*event.GetPosition())\r\n\r\n if part and part.type == AuiDockUIPart.typePaneButton \\\r\n and self.IsPaneButtonVisible(part):\r\n if part != self._hover_button:\r\n \r\n if self._hover_button:\r\n self.RefreshButton(self._hover_button)\r\n\r\n self._hover_button = part\r\n self.RefreshButton(part)\r\n \r\n else:\r\n \r\n if self._hover_button:\r\n self.RefreshButton(self._hover_button)\r\n else:\r\n event.Skip()\r\n\r\n self._hover_button = None",
"def set_active_tool_button(self, active_button):\n\n # button_style = 'font-weight: bold'\n # active_style = \"background-color: blue; color: white\"\n # active_style = \"background-color: rgb(0,49,80); color: white\"\n active_style = \"background-color: rgb(0,112,192); color: white\"\n inactive_style = \"background-color: none; color: none\"\n\n # Reset all button colours\n self.projConfigButton.setStyleSheet(inactive_style)\n self.rawDataButton.setStyleSheet(inactive_style)\n self.dataQualityButton.setStyleSheet(inactive_style)\n self.statsScreeningButton.setStyleSheet(inactive_style)\n self.spectralScreeningButton.setStyleSheet(inactive_style)\n self.histogramsButton.setStyleSheet(inactive_style)\n self.seascatterButton.setStyleSheet(inactive_style)\n self.transFuncsButton.setStyleSheet(inactive_style)\n self.fatigueButton.setStyleSheet(inactive_style)\n\n # Colour active dashboard button\n if active_button == \"config\":\n self.projConfigButton.setStyleSheet(active_style)\n if active_button == \"raw\":\n self.rawDataButton.setStyleSheet(active_style)\n if active_button == \"quality\":\n self.dataQualityButton.setStyleSheet(active_style)\n if active_button == \"stats\":\n self.statsScreeningButton.setStyleSheet(active_style)\n if active_button == \"spectral\":\n self.spectralScreeningButton.setStyleSheet(active_style)\n if active_button == \"histograms\":\n self.histogramsButton.setStyleSheet(active_style)\n if active_button == \"seascatter\":\n self.seascatterButton.setStyleSheet(active_style)\n if active_button == \"tf\":\n self.transFuncsButton.setStyleSheet(active_style)\n if active_button == \"fatigue\":\n self.fatigueButton.setStyleSheet(active_style)",
"def hovered(self, *args, **kwargs): # real signature unknown\n pass",
"def on_hovered(self):\n if not self.is_selected:\n self.colour = self.hover_colour\n self.is_hovered = True\n self.redraw()",
"def colour_press(self):\n global last_button\n if last_button is None:\n # If there is no \"last button press\", set this as the latest one\n last_button = self\n else:\n # Another button has been pressed before. Switch the colours of the two\n last_button.background_color, self.background_color = self.background_color, last_button.background_color\n # Set their states back to normal and reset the last button pressed\n last_button.state = 'normal'\n self.state = 'normal'\n last_button = None\n # Check if the switch removed any blocks\n points = self.screen.check_removal()\n if points == 0:\n # If nothing has been removed, the player gets one step closer to losing\n self.screen.misses += 1\n else:\n # Give the player the points\n self.screen.points += points\n if self.screen.misses > 3:\n # Player has lost, leave the game\n self.screen.leave()",
"def SetHoverBitmap(self, bmp):\r\n \r\n self.hover_bitmap = bmp",
"def OnLeaveWindow(self, event):\r\n \r\n if self._hover_button:\r\n self._hover_button.cur_state = AUI_BUTTON_STATE_NORMAL\r\n self._hover_button = None\r\n self.Refresh()\r\n self.Update()",
"def _update_buttons(self):\n for index, piece in enumerate(self.game_board.board):\n self.board_button_list[index].config(\n image=self.image_dict.get(f\"{piece.team_id}{piece.piece_id}\")\n )\n\n self.update()",
"def update(self, mouse_pos):\n if self.blocked:\n return\n if hasattr(self, 'collide_rect'):\n rect = self.collide_rect\n else:\n rect = self.rect\n hover = rect.collidepoint(mouse_pos)\n if hover:\n self.image = self.hover_image\n else:\n self.image = self.idle_image\n self.hover = hover",
"def hover(mouse):\n nonlocal desc_label\n\n smallest_element = get_element(mouse)\n\n with data_lock:\n if smallest_element:\n output = [f\"{k}={str(v)}\" for k, v in smallest_element.metadata.items() if k != \"text\"]\n desc_label.config(text=\", \".join(output))\n else:\n desc_label.config(text=str(\"{}\"))",
"def _updateButtons(self, event):\n selectedIndex = self.colorlist.GetSelection()\n number = self.colorlist.GetCount()\n try:\n if not 0 <= selectedIndex < number:\n self.buttondown.Enable(False)\n self.buttonup.Enable(False)\n self.buttonremove.Enable(False)\n self.buttonedit.Enable(False)\n elif selectedIndex == 0:\n self.buttondown.Enable(True)\n self.buttonup.Enable(False)\n self.buttonremove.Enable(True)\n self.buttonedit.Enable(True)\n elif selectedIndex == number - 1:\n self.buttondown.Enable(False)\n self.buttonup.Enable(True)\n self.buttonremove.Enable(True)\n self.buttonedit.Enable(True)\n else:\n self.buttondown.Enable(True)\n self.buttonup.Enable(True)\n self.buttonremove.Enable(True)\n self.buttonedit.Enable(True)\n except wx.PyDeadObjectError:\n pass",
"def setButtonToolTip(self, tooltip):\n self.__button.setToolTip(tooltip)",
"def _hover(self, event):\n if self.ignore(event):\n return\n\n if self._active_handle is not None or not self._selection_completed:\n # Do nothing if button is pressed and a handle is active, which may\n # occur with drag_from_anywhere=True.\n # Do nothing if selection is not completed, which occurs when\n # a selector has been cleared\n return\n\n _, e_dist = self._edge_handles.closest(event.x, event.y)\n self._set_cursor(e_dist <= self.grab_range)",
"def handle_attributes_mouseover(self):\n pass",
"def param_changes_button_clicked():\n for label in param_change_labels:\n if label.visible:\n param_changes_button.label = 'Show Parameter Changes'\n label.visible = False\n label.text_alpha = 0 # label.visible does not work, so we use this instead\n else:\n param_changes_button.label = 'Hide Parameter Changes'\n label.visible = True\n label.text_alpha = 1",
"def OnLeaveWindow(self, event):\r\n\r\n if self._hover_button:\r\n self.RefreshButton(self._hover_button)\r\n self._hover_button = None",
"def activate_button(self, e):\n self.serv_but.config(state=\"normal\")"
] | [
"0.7449659",
"0.72782147",
"0.7196526",
"0.69607794",
"0.6892113",
"0.6406873",
"0.63839555",
"0.636601",
"0.634425",
"0.63436794",
"0.6326481",
"0.6225555",
"0.6220948",
"0.60805935",
"0.60604835",
"0.5974381",
"0.5956773",
"0.5947975",
"0.5823839",
"0.5748594",
"0.57011336",
"0.5690154",
"0.56881595",
"0.56847286",
"0.5669269",
"0.56568635",
"0.56508195",
"0.56389934",
"0.56324714",
"0.5631653"
] | 0.8288496 | 0 |
It should return True or False based on motion. | def detectMotion():
global MotionDetected
MotionDetected = False
return MotionDetected | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def require_motion(self) -> bool:\n return self._require_motion",
"def __check_motion(self):\n if self.communications.get_motion_sensor():\n self.communications.set_status(\"Detecting Motion\")\n self.communications.send_motion_data(self.motion_sensors.detect_motion(10))",
"def motion_detection_enabled(self):\n return self._motion_status",
"def is_motion(self, channel=None):\n return bool(self.getBinaryData(\"MOTION\", channel))",
"def is_motion(self, channel=None):\n return bool(self.getBinaryData(\"MOTION\", channel))",
"def _ismoving(self):\n return self.dp.state()==PyTango.DevState.MOVING",
"def is_moving(self):\n is_moving = self.get_raw_status() & self.STATUS_MOVING\n return bool(is_moving)",
"def checkMotion(self):\n res = 0\n while(self.ser.inWaiting() > 0):\n res = self.ser.readline().strip()\n\n try:\n if self.state == 1 and time.time() - self.last_move > self.config[\"keep_on_time\"]:\n self.execOff()\n\n if res == \"1\":\n self.last_move = time.time()\n\n if res == \"1\" and self.state == 0:\n self.execOn()\n except Exception as e:\n self.logger.error(e)",
"def iswalking(self):\r\n return self.model.coord!=self.model.targetcoord",
"def is_moving(self):\n response = self.__send_and_receive(protocol.GET_IS_MOVE)\n value = self.__gen_response_value(response)\n if value:\n # printf(\"\".join(value[1:]))\n if \"\".join(value)[1:] == \"1\":\n return True\n else:\n return False\n else:\n return False",
"def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r",
"def is_moving(self):\n return self.steps < self.max_steps",
"def shooting(self):\r\n return not self.stopped",
"def ismoving(self):\n return not self.get_par(\"done_moving\")",
"def is_moving(self):\n return self.gripper_io.get_signal_value(\"is_moving\")",
"def check_movement(self):\n is_clear = True # default return value if no obstacles\n # !!! IR_SENSORS DISABLED\n if self.move_state == MOV_FORWARD:\n if self.l.look_for_obstacle(OBST_FRONT) == True:\n is_clear = False\n return is_clear",
"def __bool__(self) -> bool:\n if self.initial_value == 1 and self.number_of_steps == 0:\n return True\n return False",
"def _detect(self):\n return True",
"def get_is_moving(self):\r\n return self._arm.get_is_moving()",
"def _has_arrived(self, context) -> bool:\n return self._target[0] == context.x and self._target[1] == context.y",
"def Active(self):\n return self.NMove > 0",
"def status(self):\r\n return not self.sendQuery(\"isMoving\",\"isMoving\")",
"def is_on(self) -> bool:\n return self._current_speed != SPEED_OFF",
"def is_onhold(self) -> bool:",
"def motion_detect(motion):\n if (GPIO.input(motion) == 1):\n print \"Motion detected\"\n else:\n print \"No motion detected\"",
"def is_active(self):\n if self.steps > STEPS_MAX or not self.targets[0]:\n return False\n return True",
"def is_jumping(self):\n if(self.going_down or self.going_up or self.mid_air):\n return True\n else:\n return False",
"def can_move(self, next_x, next_y):\n\t\tif self.battery == 0:\n\t\t\tif self.planet.tiles[next_y][next_x].is_shaded():\n\t\t\t\treturn False\n\t\tif self.planet.tiles[next_y][next_x].elevation(self) == \"+\":\n\t\t\treturn False\n\t\tif self.planet.tiles[next_y][next_x].elevation(self) == \"-\":\n\t\t\treturn False\n\t\treturn True",
"def can_move(self):\n return self.movement",
"def is_displacing(self):\r\n e=0.02\r\n for v in self.current_velocity:\r\n if abs(v)>e: return True\r\n return False"
] | [
"0.764494",
"0.73808944",
"0.73020977",
"0.7163485",
"0.7163485",
"0.7149546",
"0.71006066",
"0.70653343",
"0.69570637",
"0.6867434",
"0.6822684",
"0.68209064",
"0.6782755",
"0.6743013",
"0.6732485",
"0.6698545",
"0.6684766",
"0.66791064",
"0.6676883",
"0.66607964",
"0.66529274",
"0.6648524",
"0.6626711",
"0.6565391",
"0.6555816",
"0.6513958",
"0.65104324",
"0.65101224",
"0.65098965",
"0.6464583"
] | 0.78054595 | 0 |
Convert coordinates into pygame coordinates (lowerleft => top left). | def to_pygame(coords):
return (coords[0], HEIGHT - coords[1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_to_pygame(pos):\n return int(pos.x), int(-pos.y+600)",
"def to_pygame_coords(self, coords):\n coords = Vector(coords)\n if Window.FollowPlayer:\n # offset coords by screen center\n coords = coords + (self.size * .5)\n # offset coords by centered_obj\n center_point = game.current_level.player.position + Vector(game.current_level.player.image.get_size()) * .5\n coords = coords - center_point\n \n coords = Vector(0, self.size.y) + Vector(coords.x, -coords.y)\n return coords",
"def to_pygame(point):\n return int(point.x), int(-point.y+500)",
"def to_pygame(p):\n return int(p.x), int(-p.y+600)",
"def convert_coords(x, y, conversion):\n if conversion == \"cartesian\" :\n # convert to cartesian plane coordinates \n x_new = x - (width/2)\n y_new = (height/2) + y \n\n elif conversion == \"pygame\":\n # only needed to place images in pygame\n x_new = x + (width/2)\n y_new = (height/2) - y\n \n return x_new, y_new",
"def pixel2coord(x, y,a,b,xoff,yoff,d,e):\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n return(xp, yp)",
"def to_pygame(self, p):\n return int(p.x), int(-p.y+600)",
"def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4",
"def pixel2coords(self, x, y):\n xoff, a, b, yoff, d, e = self.geotransform()\n\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n return (xp, yp)",
"def to_coords(self, px, py):\n if px not in range(self.SIZE**2) or py not in range(self.SIZE**2):\n raise IndexError\n return (px // self.SIZE, py // self.SIZE,\n px % self.SIZE, py % self.SIZE)",
"def convert_coords(self, coords):\n xPos = int(int(coords[0]) / 8)\n yPos = int(coords[1])\n zPos = int(int(coords[2]) / 8)\n return list(xPos, yPos, zPos)",
"def screen_coordinates(pos):\n\n return [int((pos[0] % screen_width) / px), screen_height - int((pos[1] % screen_height) / px)]",
"def _convert_coordinates(self, coords, units=None):\n xscrolloffset, yscrolloffset = self._window._scrolloffset()\n if units is None:\n units = self.__units\n if self._need_convert_coordinates:\n coords = self._window._convert_coordinates(coords, units = units)\n x = coords[0] + xscrolloffset\n y = coords[1] + yscrolloffset\n if len(coords) == 2:\n return x, y\n else:\n w, h = coords[2:]\n return (x, y, x+w, y+h)",
"def transform_coords(x, y, w, h, nw, nh):\r\n return ((((x / w) - 0.5) * nw), (((h - y) / h) - 0.5) * nh)",
"def makeToCoordinates(fromCoords, Uframe, Vframe, scale):\n\n out = []\n\n for e in fromCoords:\n x = e[0]\n y = e[1]\n toX = Uframe[x][y]\n toY = Vframe[x][y]\n out.append((int(round(x+toX*scale)),int(round(y+toY*scale))))\n\n return out",
"def position_gazebo_to_px4(position_x, position_y, position_z):\n return (position_y, position_x, -position_z)",
"def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)",
"def _posToScreenCoords(self, pos):\n camLim_x = self.camCenter.x - self.camSize / 2\n camLim_y = self.camCenter.y - self.camSize / 2\n\n x = (self.screenSize[0] / self.camSize) * (pos.x - camLim_x)\n y = (self.screenSize[1] / self.camSize) * (pos.y - camLim_y)\n\n # Invert orientation of y\n y = self.screenSize[1] - y\n\n return int(x), int(y)",
"def translateCoord(self,absX,absY):\n rawConvertedX=hex(absX*(16**4-1)/self.maxX).upper()[2:]\n rawConvertedY=hex(absY*(16**4-1)/self.maxY).upper()[2:]\n return {'x':rawConvertedX,'y':rawConvertedY}",
"def event_to_x_y(self, event):\n\t\treturn (round(event.x / self.w_to_px), round((HEIGHT - event.y) / self.h_to_px))",
"def edwin_transform(self, coordinates):\n edwinx = int(3.459 * coordinates[0] - 1805)\n edwiny = int(6.167 * coordinates[1] - 2400)\n edwinz = int(37.50 * coordinates[2] + 2667)\n\n if edwinx > 4000:\n edwinx = 4000\n elif edwinx < 0:\n edwinx = 0\n\n if edwiny > 4000:\n edwiny = 4000\n elif edwiny < -400:\n edwiny = -400\n\n if edwinz > 3500:\n edwinz = 3500\n elif edwinz < -600:\n edwinz = -600\n\n return edwinx, edwiny, edwinz",
"def to_map_pos(self, screen_pos):\n return screen_pos + self.player_pos - SCREEN.size // 2",
"def normalize_coords(xx, yy, width, height):\n xx = (2.0 / (width - 1.0)) * xx.float() - 1.0\n yy = (2.0 / (height - 1.0)) * yy.float() - 1.0\n return xx, yy",
"def screen_to_map(self, coords):\n (x0, y0) = self.origin\n\n (x, y) = coords\n\n # Note that the screen uses left-handed coordinates while the map\n # uses right-handed coordinates, so we flip the y axis when converting\n map_coords = ((x / self.display_scale) + x0,\n -(y / self.display_scale) + y0)\n log.debug(\"screen coords {0} --> map coords {1}\"\n .format(coords, map_coords))\n return map_coords",
"def from_screen_coordinates(pos):\n\n return [float(pos[0]) * px, float(screen_height - pos[1]) * px]",
"def mouse_position(pos):\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m",
"def convert_hex_coords(hex_coords, unit=1):\n x = (hex_coords[0] - hex_coords[1]/2) * unit\n y = (hex_coords[1] * np.sqrt(3)/2) * unit\n return (x, y)",
"def positions_to_coords(self, positions):\n return [self.to_coords(px, py) for (px, py) in positions]",
"def mapToCoordinates(self, shot):\r\n toks = shot.split(\"-\")\r\n return Coordinates(ord(toks[0]) - ord(\"A\"), int(toks[1]) - 1)",
"def to_position(self, x, y, i, j):\n return (x * self.SIZE + i, y * self.SIZE + j)"
] | [
"0.7254828",
"0.72006845",
"0.682167",
"0.67977166",
"0.66983885",
"0.6682158",
"0.6584505",
"0.6572574",
"0.65490896",
"0.6544315",
"0.6501023",
"0.6490764",
"0.6435862",
"0.64180505",
"0.63830537",
"0.637115",
"0.6364992",
"0.6360416",
"0.6345675",
"0.6342017",
"0.6336103",
"0.6293663",
"0.627436",
"0.6244029",
"0.62276256",
"0.6225994",
"0.6206863",
"0.61869365",
"0.6185366",
"0.61530924"
] | 0.77644044 | 0 |
Process event log of DynamoDB stream and update ANDS handle content path if needed | def raid_table_dynamodb_stream_event(event, context):
try:
# Log AWS Lambda event
logger.info('Event: {}'.format(json.dumps(event, indent=4)))
for record in event['Records']:
# Convert low-level DynamoDB format to Python dictionary
deserializer = TypeDeserializer()
table_keys = {k: deserializer.deserialize(v) for k, v in record['dynamodb']['Keys'].items()}
table_attributes = {k: deserializer.deserialize(v) for k, v in record['dynamodb']['NewImage'].items()}
if record['eventSourceARN'] == os.environ['DEMO_RAID_STREAM_ARN']:
ands_url_path = "{}modifyValueByIndex?handle={}&value={}&index={}".format(
os.environ["DEMO_ANDS_SERVICE"],
table_keys['handle'],
table_attributes['contentPath'],
table_attributes['contentIndex']
)
ands_secret = os.environ["ANDS_DEMO_SECRET"]
elif record['eventSourceARN'] == os.environ['RAID_STREAM_ARN']:
ands_url_path = "{}modifyValueByIndex?handle={}&value={}&index={}".format(
os.environ["ANDS_SERVICE"],
table_keys['handle'],
table_attributes['contentPath'],
table_attributes['contentIndex']
)
ands_secret = os.environ["ANDS_SECRET"]
else:
logger.info('Unknown DynamoDB Stream')
continue
# Process new records
if record['eventName'] == 'INSERT':
# Skip if default Raid
if table_attributes['contentPath'] == settings.RAID_SITE_URL:
logger.info('Not updating content path "{}" on new RAiD as it is the default: {}'.format(
table_attributes['contentPath'], table_keys['handle'])
)
continue
logger.info('Updating content path "{}" on new RAiD: {}'.format(
table_attributes['contentPath'], table_keys['handle'])
)
ands_mint = ands_helpers.ands_handle_request(
ands_url_path,
os.environ["ANDS_APP_ID"],
"raid",
"raid.org.au",
ands_secret,
)
logger.info(json.dumps(ands_mint))
elif record['eventName'] == 'MODIFY':
old_table_attributes = {
k: deserializer.deserialize(v) for k, v in record['dynamodb']['OldImage'].items()
}
# Update handle content Path if it is different
if old_table_attributes['contentPath'] != table_attributes['contentPath']:
logger.info('Updating content path "{}" on existing RAiD: {}'.format(
table_attributes['contentPath'], table_keys['handle'])
)
ands_mint = ands_helpers.ands_handle_request(
ands_url_path,
os.environ["ANDS_APP_ID"],
"raid",
"raid.org.au",
ands_secret,
)
logger.info(json.dumps(ands_mint))
else:
logger.info('Existing RAiD has no changes to content path.')
except Exception as e:
logger.error('Unknown error occurred.')
logger.error(str(e))
logger.info('DynamoDB Stream Processed...') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def processEventLog(log):\n pass",
"def lambda_handler(event, context): # pylint: disable=too-many-locals,too-many-branches,too-many-statements\r\n try: # pylint: disable=too-many-nested-blocks\r\n print(\"Execution started!\")\r\n #print(\"Event: \",event)\r\n # Bucket name and Full path for file - where file will be uploded\r\n source_bucket_name = event[\"detail\"][\"requestParameters\"][\"bucketName\"]\r\n source_key = urllib.parse.unquote_plus(\r\n event[\"detail\"][\"requestParameters\"][\"key\"], encoding='utf-8')\r\n \r\n print(\"file_path: \",source_key)\r\n #Loading master config\r\n print(\"Loading master_config\")\r\n audit_config = {}\r\n config_path = \"./config/\" + \\\r\n os.environ['CCM_ENV'] + \"/master_config.json\"\r\n config_content = open(config_path).read()\r\n config_json = json.loads(config_content)\r\n audit_config = config_json[\"audit_config\"]\r\n snow_params = config_json[\"ERROR_NOTIFICATION_SNOW_PARAMS\"]\r\n athena_query_param = config_json[\"ATHENA_QUERY_PARAMS\"]\r\n athena_table_params = config_json[\"ATHENA_TABLE_PARAMS\"]\r\n\r\n # Audit Parameters Based on the Invoking lambda and its operation involved\r\n audit_config[\"component_type_code\"] = \"ETL\"\r\n audit_config[\"component_name\"] = \"PCP Appflow\"\r\n audit_config[\"source_name\"] = \"Patient Connections Platform\"\r\n audit_config[\"target_name\"] = \"Consumer Consent Management\"\r\n audit_config[\"full_file_path\"] = \"s3://\" + \\\r\n source_bucket_name + \"/\" + source_key\r\n audit_config[\"file_version_id\"] = \"\"\r\n\r\n # Creates Job Entry in ABC Framework\r\n print(\"audit config::\", audit_config)\r\n process_execution_id = audit_helper.\\\r\n invoke_edb_abc_log_process_status_event_job_entry(audit_config)\r\n audit_config[\"process_execution_id\"] = process_execution_id\r\n print(\"process_execution_id ::\", process_execution_id)\r\n #print(\"source_key: \",source_key)\r\n s3_write = boto3.client('s3')\r\n record_dict = {}\r\n file_name = \"\"\r\n final_json = \"\"\r\n # prefix = \"\"\r\n # file_list = []\r\n # client = boto3.client(\"s3\")\r\n # result = client.list_objects(Bucket=source_bucket_name, Prefix=source_key, Delimiter='/')\r\n # #print(result)\r\n # for obj in result.get('CommonPrefixes'):\r\n # prefix = obj.get('Prefix')\r\n # #print(prefix)\r\n # file_list = list_files(client,source_bucket_name,prefix)\r\n # for file in file_list:\r\n # #print(file)\r\n json_read = read_s3_file(source_bucket_name, source_key)\r\n data = json.loads(json_read)\r\n #print(data)\r\n if data != '':\r\n record_dict = {k.lower(): v for k, v in data.items()}\r\n print(\"Record_Dict::\",record_dict)\r\n event_type_param = {}\r\n event_type_list = athena_table_params.keys()\r\n print(\"event_type_list\",event_type_list)\r\n for key in event_type_list:\r\n print(\"key\",key)\r\n if key in source_key:\r\n print(\"key\",key)\r\n event_type_param = athena_table_params[key]\r\n print(event_type_param)\r\n if \"changeeventheader\" in record_dict:\r\n if record_dict[\"changeeventheader\"][\"changeType\"] == \"CREATE\":\r\n #and record_dict[\"dtpc_affiliate__c\"] == 'US':\r\n recordid_create = record_dict[\"changeeventheader\"][\"recordIds\"][0]\r\n print(recordid_create)\r\n if recordid_create != '':\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace(\":\",\".\")\r\n create_json = json.dumps(record_dict)\r\n final_json = create_json\r\n file_name = recordid_create + \"-create-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, Key=final_source_key)\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n elif record_dict[\"changeeventheader\"][\"changeType\"] == \"UPDATE\":\r\n record_ids_list = record_dict[\"changeeventheader\"][\"recordIds\"]\r\n if len(record_ids_list) != 0:\r\n for ele in record_ids_list:\r\n print(ele)\r\n element = \"'\" + ele + \"'\"\r\n payload_condition = event_type_param[\"recordid_condition\"]\r\n query = 'SELECT * FROM '+event_type_param[\"athena_create_table\"]+\\\r\n ' WHERE lastmodifieddate IN(SELECT max(lastmodifieddate) from '\\\r\n +event_type_param[\"athena_create_table\"]+\\\r\n ', UNNEST(\"'+payload_condition[0]+'\".\"'+payload_condition[1]+\\\r\n '\") AS ln(jsondata) WHERE jsondata IN ('+element+'));'\r\n print(query)\r\n athena_query_param['athena_query'] = query\r\n query_result_record_id = athena_helper.perform_athena_search\\\r\n (athena_query_param)\r\n print(\"Athena Query Result for Create Path:::\", query_result_record_id)\r\n update_json = create_complete_payload(data,query_result_record_id)\r\n print(\"update_json: \",update_json)\r\n if len(update_json) != 0:\r\n last_modified_date = record_dict[\"lastmodifieddate\"].replace\\\r\n (\":\",\".\")\r\n final_json = json.dumps(update_json)\r\n file_name = ele + \"-update-\" + str(last_modified_date)\r\n print(\"file_name: \",file_name)\r\n outbound_path = event_type_param[\"folder_path\"]\r\n final_source_key = outbound_path + '/' + file_name+\".json\"\r\n print(\"final_source_key :\", final_source_key)\r\n s3_write.put_object(\r\n Body=final_json, Bucket=source_bucket_name, \\\r\n Key=final_source_key)\r\n else:\r\n print(ele,\" does not have a create payload\")\r\n else:\r\n raise Exception(\"RecordId is missing: \", record_dict)\r\n else:\r\n raise Exception(\"ChangeEventHeader is missing: \", record_dict)\r\n else:\r\n raise Exception(\"Invalid Payload: \", record_dict)\r\n\r\n except (Exception) as err: # pylint: disable=line-too-long,broad-except\r\n print(\"Error occured: {0}\".format(str(err)))\r\n audit_type = \"error\"\r\n error_msg = sys.exc_info()\r\n exc_type = error_msg\r\n exc_obj = error_msg\r\n snow_params[\"flag\"] = \"FAIL\"\r\n snow_params[\"error_message\"] = str(exc_obj)\r\n snow_params[\"error_type\"] = str(exc_type)\r\n audit_config[\"exception_message\"] = str(exc_obj)\r\n if audit_config != {}:\r\n logging.exception(sys.exc_info())\r\n audit_helper.invoke_edb_abc_log_process_status_event(\r\n audit_type, audit_config) # pylint: disable=line-too-long\r\n audit_helper.raise_snow_incident(snow_params)",
"def process_log_file(cur, filepath):\n \n # open log file\n df = pd.read_json(filepath,lines=True)\n\n # filter by NextSong action - i.e. get only listening music events from the logs\n df = df[(df.page == \"NextSong\")]\n\n # insert time records\n __insert_time_data(cur, df)\n \n # insert user records\n __insert_user_data(cur, df)\n \n # insert songplay records\n __insert_songplay_data(cur, df)\n \n # erase dataframe\n df = df.iloc[0:0]",
"def handle(event, context):\r\n for record in event.get('Records', []):\r\n payload = json.loads(base64.b64decode(record[\"kinesis\"][\"data\"]))\r\n LOGGER.info('Payload: %s', payload)\r\n key_name = get_key_name(payload)\r\n app_slug, uuid_folder, file_name = get_key_details(key_name)\r\n if not app_slug or not file_name:\r\n return event\r\n\r\n response = DYNAMO.get_item(\r\n TableName='FilesToPipelines',\r\n Key={\r\n 'App': {'S': app_slug},\r\n 'File': {'S': file_name}\r\n },\r\n ProjectionExpression='Pipeline')\r\n if 'Item' not in response:\r\n LOGGER.info('Could not file pipeline for %s', key_name)\r\n return\r\n if not response['Item'].get('Pipeline'):\r\n LOGGER.info('Could not file pipeline for %s: Got %s',\r\n key_name, response)\r\n return\r\n\r\n pipeline = response['Item']['Pipeline'][\"S\"]\r\n response = LAMBDA.invoke_async(\r\n FunctionName=pipeline,\r\n InvokeArgs=json.dumps({\r\n 'trigger': 'evented',\r\n 'uuid': uuid_folder\r\n })\r\n )\r\n LOGGER.debug(response)\r\n LOGGER.info('Executed %s on %s', pipeline, uuid_folder)",
"def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file",
"def handler(context, event):\n\n if _ensure_str(event.trigger.kind) != 'http' or _invoked_by_cron(event):\n body = event.body.decode('utf-8')\n context.logger.info('Received event body: {0}'.format(body))\n\n # serialized record\n serialized_record = json.dumps({\n 'body': body,\n 'headers': {\n _ensure_str(header): _ensure_str(value)\n for header, value in event.headers.items()\n },\n 'timestamp': datetime.datetime.utcnow().isoformat(),\n })\n\n # store in log file\n with open(events_log_file_path, 'a') as events_log_file:\n events_log_file.write(serialized_record + ', ')\n\n else:\n\n # read the log file\n try:\n with open(events_log_file_path, 'r') as events_log_file:\n events_log_file_contents = events_log_file.read()\n except IOError:\n events_log_file_contents = ''\n\n # make this valid JSON by removing last two chars (, ) and enclosing in [ ]\n encoded_event_log = '[' + events_log_file_contents[:-2] + ']'\n\n context.logger.info('Returning events: {0}'.format(encoded_event_log))\n\n # return json.loads(encoded_event_log)\n return encoded_event_log",
"def lambda_handler(event, context):\n raw_kinesis_records = event['Records']\n\n # Deaggregate all records in one call\n records = deaggregate_records(raw_kinesis_records)\n for record in records:\n # Kinesis data in Python Lambdas is base64 encoded\n payload = base64.b64decode(record['kinesis']['data'])\n # payload is the actual ion binary record published by QLDB to the stream\n ion_record = ion.loads(payload)\n print(\"Ion reocord: \", (ion.dumps(ion_record, binary=False)))\n\n if ((\"recordType\" in ion_record) and (ion_record[\"recordType\"] == \"REVISION_DETAILS\")):\n revision_data, revision_metadata = get_data_metdata_from_revision_record(ion_record)\n print(revision_metadata[\"version\"])\n table_info = get_table_info_from_revision_record(ion_record)\n\n # Check if new wallet is being created or balance update.\n if (revision_metadata[\"version\"] == 0): # a new wallet created\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo DB insertion\n print(\"Proceed to create wallet in dynamo userwallet table\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'Balance': revision_data[\"Balance\"],\n 'last_txn_source': revision_data[\"last_txn_source\"],\n 'last_txn_ref': revision_data[\"last_txn_ref\"],\n 'last_txn_type': revision_data[\"last_txn_type\"],\n 'last_txn_amount': revision_data[\"last_txn_amount\"],\n 'last_txn_date': revision_data[\"last_txn_date\"],\n 'version' : 0\n }\n )\n else: # Balance updates\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo db logic to update the balance\n print(\"Dyanmo update balance\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.update_item(\n Key={\n 'walletid': revision_data[\"walletid\"]\n },\n UpdateExpression=\"set Balance=:a , last_txn_source=:b , last_txn_ref=:c, last_txn_type=:d ,last_txn_amount=:e ,last_txn_date=:f ,version=:g\",\n ExpressionAttributeValues={\n ':a': revision_data[\"Balance\"],\n ':b': revision_data[\"last_txn_source\"],\n ':c': revision_data[\"last_txn_ref\"],\n ':d': revision_data[\"last_txn_type\"],\n ':e': revision_data[\"last_txn_amount\"],\n ':f': revision_data[\"last_txn_date\"] ,\n ':g': revision_metadata[\"version\"],\n },\n ConditionExpression=\"version < :g\",\n ReturnValues=\"UPDATED_NEW\"\n )\n\n # update all transactions to dynamodb except for getfunds\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Transactions')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'updated_balance': revision_data[\"Balance\"],\n 'txn_source': revision_data[\"last_txn_source\"],\n 'txn_ref': revision_data[\"last_txn_ref\"],\n 'txn_type': revision_data[\"last_txn_type\"],\n 'txn_amount': revision_data[\"last_txn_amount\"],\n 'txn_date': revision_data[\"last_txn_date\"],\n 'version' : revision_metadata[\"version\"]\n }\n )\n\n return {\n 'statusCode': 200\n }",
"def handler(event, context):\n if event and \"Records\" in event:\n for record in event[\"Records\"]:\n time_str = time.ctime()\n if \"body\" in record:\n try:\n hasura_request(record[\"body\"])\n except Exception as e:\n print(f\"Start Time: {time_str}\", str(e))\n time_str = time.ctime()\n print(\"Done executing: \", time_str)\n raise_critical_error(\n message=f\"Could not process record: {str(e)}\",\n data=record,\n exception_type=Exception\n )",
"def lambda_handler(event, context):\n\n for record in event['Records']:\n\n bucket = record['s3']['bucket']['name']\n key = unquote_plus(record['s3']['object']['key'])\n\n str_value = s3_utils.download_file_as_string(bucket, key)\n data = json.loads(str_value)\n\n normalized_data = {\n 'meta': {\n 'table': 'parcels',\n 'column_names': [\n 'dataset',\n 'as_of',\n 'apn',\n 'objectid',\n 'city',\n 'x_coordinate',\n 'y_coordinate',\n 'area',\n 'length'\n ]\n }\n }\n\n rows = []\n\n dataset = data['meta']['dataset']\n as_of = data['meta']['datetime']\n\n for r in data['results']:\n\n attr = r['attributes']\n\n temp_dict = {\n 'dataset': dataset,\n 'as_of': as_of,\n 'apn': attr.get('APN_SPACE'),\n 'objectid': attr.get('OBJECTID'),\n 'city': attr.get('CITY'),\n 'x_coordinate': attr.get('X'),\n 'y_coordinate': attr.get('Y'),\n 'area': attr.get('Shape.STArea()'),\n 'length': attr.get('Shape.STLength()')\n }\n\n rows.append(temp_dict)\n\n normalized_data['rows'] = rows\n \n bucket = 'gis-data-normalized'\n file_name = 'normalized_' + key\n s3_utils.upload_json_as_file(normalized_data, bucket, file_name)",
"def lambda_handler(event, context):\n print('Received request')\n item = None\n\n mysql_host = '54.212.197.235'\n mysql_username = 'rts'\n mysql_password = 'SamWangRamsay520-S'\n mysql_dbname = 'rts_kinesis'\n mysql_tablename = 'benchmark_kinesis'\n\n print('Start connection')\n conn = mysql.connector.connect(host=mysql_host,\n user=mysql_username,\n passwd=mysql_password,\n db=mysql_dbname )\n print('End connection')\n '''Write the message to the mysql database'''\n cur = conn.cursor()\n\n #dynamo_db = boto3.resource('dynamodb')\n #table = dynamo_db.Table('benchmark_kinesis')\n _mysql_buffer = [] #ad-hoc message buffering for mysql, equivalent to dynamodb batch-write behavior\n _mysql_buffer_limit = 25\n records = [record for record in event['Records']]\n new_records = deaggregate_records(records)\n #decoded_record_data = [record['kinesis']['data'] for record in new_records]\n #deserialized_data = [decoded_record for decoded_record in records]\n #for data in decoded_record_data:\n for record in new_records:\n\t#d_record = \"%.15g\" % record['kinesis']['partitionKey']\n\t#con_time = \"%.15g\" % time.time()\n\tcreation_time = Decimal(record['kinesis']['partitionKey'])\n\tconsumer_time = Decimal(time.time())\n\tvalue = record['kinesis']['data']\n\t#cur.execute('INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)', (creation_time, consumer_time, value))\n sql = 'INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)'\n _mysql_buffer.append((creation_time, consumer_time, value))\n if len(_mysql_buffer) > _mysql_buffer_limit:\n cur.executemany(sql, _mysql_buffer)\n _mysql_buffer = []\n\t# Add a processed time so we have a rough idea how far behind we are\n #item['processed'] = datetime.datetime.utcnow().isoformat()\n\n conn.commit()\n conn.close()\n cur.close()\n # Print the last item to make it easy to see how we're doing\n #print(json.dumps(item))\n print('Number of records: {}'.format(str(len(new_records))))",
"def process_app_log(event, context):\n pubsub_message = base64.b64decode(event[\"data\"]).decode(\"utf-8\")\n\n log_data = json.loads(pubsub_message)\n print(log_data)\n payload = None\n try:\n if \"protoPayload\" in log_data:\n # If there is a protoPayload, we assume it's an entry from the audit log\n protoPayload = log_data[\"protoPayload\"]\n payload = protoPayload[\"operation\"].copy()\n payload[\"methodName\"] = log_data[\"methodName\"]\n payload[\"timestamp\"] = log_data[\"timestamp\"]\n\n elif \"jsonPayload\" in log_data:\n # Assuming the log entry has the fields we need, we just pass it over\n payload = log_data[\"jsonPayload\"]\n\n if payload:\n time_difference = store_data(payload)\n if time_difference:\n send_metric(time_difference, payload[\"methodName\"])\n except Exception as e:\n print(e)",
"def lambda_handler(event, context):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Decode the bytes to base64\n decoded_record_data = []\n for record in event['Records']:\n try:\n decoded_record_data.append(base64.b64decode(record['kinesis']['data']))\n except Exception as e:\n logger.error('%s - %s', \"Error decoding record\", e)\n\n # Deserialize the data\n deserialized_data = []\n for decoded_record in decoded_record_data:\n try:\n deserialized_data.append(json.loads(decoded_record))\n except Exception as e:\n logger.error('%s - %s', \"Error deserializing data\", e)\n\n # Try opening a connection to DynamoDB\n try:\n # Get a handle to the table\n dynamo_db = boto3.resource('dynamodb')\n curr_pos_table = dynamo_db.Table('current_position')\n except Exception as e:\n logger.error('%s - %s', \"Error connecting to DynamoDB\", e)\n return\n\n # Try sending the data\n transmit_data(curr_pos_table, deserialized_data, 0)",
"def lambda_handler(event, context):\n # Environmental Variables\n CATALOG_BRANCHES_TABLE = anejocommon.set_env_var('CATALOG_BRANCHES_TABLE')\n PRODUCT_INFO_TABLE = anejocommon.set_env_var('PRODUCT_INFO_TABLE')\n S3_BUCKET = anejocommon.set_env_var('S3_BUCKET')\n\n # Loop through event records\n try:\n event_records = event['Records']\n except KeyError:\n event_records = [{'body': event}]\n\n for record in event_records:\n try:\n catalog_sync_info = json.loads(record['body'])\n except TypeError:\n catalog_sync_info = record['body']\n\n # Event Variables\n catalog_url = catalog_sync_info['catalog_url']\n\n apple_bucket_catalog_path = anejocommon.get_path_from_url(\n catalog_url,\n 'html',\n append_to_path='.apple'\n )\n \n catalog = anejocommon.retrieve_url(catalog_url)\n try:\n catalog_plist = plistlib.readPlistFromBytes(catalog.data)\n except plistlib.InvalidFileException:\n print(\"ERROR: Cannot read catalog plist\")\n return\n\n # Write our local (filtered) catalogs\n anejocommon.write_local_catalogs(\n apple_bucket_catalog_path,\n catalog_plist,\n S3_BUCKET,\n CATALOG_BRANCHES_TABLE,\n PRODUCT_INFO_TABLE\n )",
"def handle_event(event, context):\n\n try:\n print(\"Received event: \" + json.dumps(event, indent=2))\n\n # grab resources section of event, get task execution ids\n task_execution_arns = event['resources']\n\n # now fetch the input filter info from each task_detail, fire off jobs\n new_files_to_process = []\n for task_execution_arn in task_execution_arns:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/datasync.html#DataSync.Client.describe_task_execution\n response = datasync_client.describe_task_execution(TaskExecutionArn=task_execution_arn)\n print(\"Task execution details: \" + str(response))\n # this will be the location of the data in configured s3 bucket:\n # 'Includes': [\n # {\n # 'FilterType': 'SIMPLE_PATTERN',\n # 'Value': 'string'\n # },\n # ]\n if len(response['Includes']) > 0:\n file = response['Includes'][0]['Value']\n # files typically start with leading '/', strip that leading '/'\n print(\"Got filename:\" + file)\n if file.startswith('/', 0):\n new_files_to_process.append(file.lstrip('/'))\n else:\n new_files_to_process.append(file)\n else:\n print(\"Response didn't contain Includes files...\")\n\n if len(new_files_to_process) == 0:\n print('No files were parsed from input...exiting')\n return\n\n for new_file_to_process in new_files_to_process:\n state_machine_arn = os.environ['STATE_MACHINE_ARN']\n payload = {\"ObjectName\": new_file_to_process}\n json_payload = json.dumps(payload)\n print('Starting bcl2fastq with payload %s' % json_payload)\n #\n response = step_client.start_execution(stateMachineArn=state_machine_arn, input=json_payload)\n print(response)\n\n except Exception as e:\n print(e)\n print('Error handling event. %s' % e)\n raise e",
"def handler(event, context):\n message = [record['body'] for record in event.get('Records', [])]\n email_record = json.loads(message[0])[\"Records\"][0]\n\n new_email = [(email_record['s3']['bucket']['name'],\n urllib.parse.unquote(email_record['s3']['object']['key']))]\n\n if new_email:\n LOG.info(\"Changed/new object notification received from S3 bucket to the sqs queue\")\n for bucket, s3_key in new_email:\n LOG.info(\"Processing S3 bucket://%s/%s\", bucket, s3_key)\n email_body = S3.Object(bucket, s3_key).get()['Body'].read().decode('utf-8')\n\n # Process PBS job info and push the metadata doc to AWS ES\n _process_pbs_job_info(email_body)\n else:\n LOG.info(\"No new/updated email record found in the S3 bucket\")",
"def __process_table_event(self, item):\n if item is not None:\n if isinstance(item, TableEvent) \\\n and item.table_event_type == TableEventType.INSERT \\\n and item.records is not None \\\n and len(item.records) > 0:\n for index, message in enumerate(item.records):\n print(message)\n elif isinstance(item, TableEvent) \\\n and (item.table_event_type in [TableEventType.DELETE, TableEventType.UPDATE]) \\\n and item.count is not None \\\n and item.count > 0:\n if item.table_event_type == TableEventType.DELETE:\n print(\"Records deleted = %s\" % item.count)\n else:\n print(\"Records updated = %s\" % item.count)",
"def _ProcessLog(self, log_processor, logfile): # pylint: disable=R0201\n for line in open(os.path.join(self.data_directory, logfile)):\n log_processor.ProcessLine(line)",
"def lambda_handler(event, context):\n\n try:\n\n global AWS_REGION\n\n AWS_REGION = event['Records'][0]['awsRegion']\n\n s3objkey = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')\n s3objsize = event['Records'][0]['s3']['object']['size']\n bucket = event['Records'][0]['s3']['bucket']['name']\n \n print(\"\\nDealing with <- {} -> image from album [- {} -]\".format(str(s3objkey).split('/')[-1],\n str(s3objkey).split('/')[-2]))\n\n full_table_name = DDB_TABLE + '_' + bucket\n ddb_table = boto3.resource('dynamodb', region_name=AWS_REGION).Table(full_table_name)\n\n resp = ddb_table.get_item(Key={'FileName': s3objkey})\n okey = resp.get('Item', {'OptimizedSizeKey': 'None'}).get('OptimizedSizeKey', 'No')\n\n if okey == 'Yes': # In DB file marked as optimized\n\n lbls = detect_rk_labels(s3objkey, s3objsize, bucket)\n exiftags = fetch_exif_tags(s3objkey, bucket)\n update_pig_config_ddb(s3objkey, bucket, lbls, exiftags, okey)\n\n elif okey == 'No' or okey == 'None': # file size is not optimized according to DB record\n\n _, optimized_img_path = img_optimizer(s3objkey, bucket)\n update_pig_config_ddb(s3objkey, bucket, ok='Yes', _json=False)\n\n # Now we can PUT optimized file from /tmp to bucket\n s3client = boto3.client('s3', region_name=AWS_REGION)\n with open(optimized_img_path, 'rb') as content: # Upload optimized image to S3 (invokes another Lambda)\n\n ext = s3objkey.split('/')[-1].split('.')[-1]\n for e, m in metadata.items():\n if e == ext:\n s3client.upload_fileobj(content,\n bucket,\n s3objkey,\n ExtraArgs={'ContentType': m})\n\n print(\"Image {} size was optimized and uploaded to bucket as {}\".format(\n str(s3objkey).split('/')[-1], s3objkey))\n\n print(\"\\nTime remaining (MS): {}\\n\".format(context.get_remaining_time_in_millis()))\n\n except Exception as e:\n print('ERROR: ', e)",
"def process_log_event(event, context):\n\n with open('config.json') as fh:\n config = json.load(fh)\n serialized = event['awslogs'].pop('data')\n data = json.loads(zlib.decompress(\n base64.b64decode(serialized), 16+zlib.MAX_WBITS))\n message = [\n \"An error was detected\",\n \"\",\n \"Log Group: %s\" % data['logGroup'],\n \"Log Stream: %s\" % data['logStream'],\n \"Log Owner: %s\" % data['owner'],\n \"\",\n \"Log Contents\",\n \"\"]\n\n for evt in data['logEvents']:\n message.append(message_event(evt))\n message.append(\"\")\n\n params = dict(\n TopicArn=config['topic'],\n Subject=config['subject'],\n Message='\\n'.join(message))\n sns.publish(**params)",
"def ingest(self):\n datetime_retrieved = datetime.now()\n prefix = self.prefix_template.format(**self.feed, year=datetime_retrieved.strftime('%Y'), month=datetime_retrieved.strftime('%m'))\n fp = self.generate_fp(\n template='{feedname}_{datetime_retrieved}',\n feedname=self.feed['feedname'],\n datetime_retrieved=datetime_retrieved\n )\n\n url_to_request = self.url_dict[(self.feed['state'],self.feed['feedname'])]\n try:\n r = requests.get(url_to_request)\n if r.status_code == 200:\n data_to_write = r.content\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('Raw data ingested from {} to {} at {} UTC'.format(url_to_request, prefix+fp, datetime_retrieved))\n else:\n self.print_func('Received status code {} from {} feed.'.format(r.status_code,self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))\n return\n except BaseException as e:\n data_to_write = f'The feed at {datetime_retrieved.isoformat()}.'.encode('utf-8')\n fp += '__FEED_NOT_RETRIEVED'\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('We could not ingest data from {} at {} UTC'.format(url_to_request, datetime_retrieved))\n raise e\n\n # trigger semi-parse ingest\n if self.feed['pipedtosandbox'] == True:\n self.print_func('Trigger {} for {}'.format(self.lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n\n # trigger ingest to socrata\n if self.feed['pipedtosocrata'] == True:\n self.print_func('Trigger {} for {}'.format(self.socrata_lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.socrata_lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))",
"def process_line(self,\n event_file,\n raw_log_line,\n header_length=29,\n log_filename=\"\"):\n\n event_data = {}\n for filter_name, regex in self._filters_dict.items():\n match = regex.search(raw_log_line)\n if match:\n match_data = match.groups()\n event_data[filter_name] = match_data\n\n if event_data:\n if log_filename:\n event_data[\"log_filename\"] = log_filename\n event_data[\"raw_log_line\"] = raw_log_line.rstrip()[header_length:]\n event_data[\"system_timestamp\"] = raw_log_line[1:27]\n event_data[\"matched_timestamp\"] = \\\n datetime.datetime.now().strftime(TIMESTAMP_FORMAT)\n event_file.write(json.dumps(event_data) + \"\\n\")\n event_file.flush()",
"def lambda_handler(event, context):\r\n print(\"Function triggered\")\r\n if 'local' == environ.get('APP_STAGE'):\r\n dynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:8000')\r\n table = dynamodb.Table(\"audiobooksDB\")\r\n else:\r\n dynamodb = boto3.resource('dynamodb')\r\n table = dynamodb.Table(environ[\"TABLE_NAME\"])\r\n s3 = boto3.client('s3')\r\n \r\n s3FileName = event['Records'][0]['s3']['object']['key'].replace(\"+\", \" \")\r\n bucketName = event['Records'][0]['s3']['bucket']['name']\r\n # Download file from the S3 bucket\r\n try:\r\n book = s3.get_object(Bucket=bucketName, Key=s3FileName)\r\n print(\"Loading file from S3 bucket\")\r\n bookContent = book[\"Body\"].read().decode(\"utf-8\", errors=\"ignore\").split(\"------ END METADATA --------\")\r\n metadata = json.loads(bookContent[0])\r\n bookContent = bookContent[1]\r\n # Polly accepts 100,000 chars at a time. We make chunks of 99990 because we put the part 1 maker in\r\n bookContent = [bookContent[i:i+99990] for i in range(0, len(bookContent), 99990)]\r\n except Exception as e:\r\n print(\"Error while downloading file \" + s3FileName + \"from the S3 bucket \" + bucketName)\r\n raise\r\n # Add part marker to book\r\n if len(bookContent) > 1:\r\n count = 0\r\n for chunk in bookContent:\r\n chunk += \"Part \" + str(count)\r\n hasShortPart = False\r\n audioURLs = []\r\n pollyClient = boto3.client('polly')\r\n for chunk in bookContent:\r\n try:\r\n chunk = convert_text_to_ssml(chunk)\r\n print(\"Asking Polly to record the current chunk\")\r\n response = pollyClient.start_speech_synthesis_task(\r\n Engine='standard',\r\n LanguageCode='en-GB',\r\n OutputFormat='mp3',\r\n OutputS3BucketName=environ['AUDIO_S3_BUCKET'],\r\n Text=chunk,\r\n TextType='ssml',\r\n VoiceId='Brian',\r\n SnsTopicArn=environ[\"SNS_TOPIC\"],\r\n )\r\n\r\n audioURLs.append(response[\"SynthesisTask\"][\"OutputUri\"].split(\"amazonaws.com/\")[-1])\r\n if len(chunk) <= 2000:\r\n hasShortPart = True\r\n print(response)\r\n print(\"Polly was successfully asked to to record the current chunk\")\r\n except Exception as e:\r\n print(\"Error parsing chunk or requesting Polly to say it\")\r\n raise\r\n try:\r\n randomString = ''.join([random.choice(string.ascii_letters \r\n + string.digits) for n in range(32)]) \r\n audiobook = {\r\n \"id\": randomString,\r\n \"bookName\": metadata[\"bookName\"],\r\n \"imageURL\": metadata[\"imageURL\"],\r\n \"authorName\":metadata[\"authorName\"],\r\n \"genres\": metadata[\"genres\"],\r\n \"audioURLs\": audioURLs,\r\n \"description\": metadata[\"description\"],\r\n \"hidden\": False,\r\n \"hasShortPart\": hasShortPart,\r\n \"addedAt\": Decimal(datetime.now().timestamp())\r\n }\r\n response = table.put_item(\r\n Item=audiobook\r\n )\r\n except Exception as e:\r\n print(\"Exception inserting into database\")\r\n print(audiobook)\r\n print(response)\r\n raise\r\n return {\r\n \"statusCode\": 200,\r\n \"body\": json.dumps({\r\n \"message\": audioURLs\r\n }),\r\n }",
"def events(self):\n for line_num, line in enumerate(self.file_handler):\n if not line:\n break\n # process line input to dictionary\n data = json.loads(line)\n # add id information\n data['id'] = line_num\n # update timestamp history\n timestamp = self._get_timestamp(data)\n self.last_two_timestamps = [self.last_two_timestamps[-1], timestamp]\n self.event_timestamps[line_num] = timestamp\n\n self.alarms.append(0) # add field for alarms\n self.users.append(data['user']) # add field for user\n self.anomalies.append(data.get('is_anomaly', 0)) # add field for anomalies\n if 'is_anomaly' in data:\n del data['is_anomaly'] # remove anomaly information from data for contestants\n\n # return line id and serialized JSON as string representing one event\n str_dump = json.dumps(data)\n logger.info(self._get_inner_time() + ' > ' + str_dump)\n yield line_num, str_dump",
"def handler(event, context):\n if event['Records'][0]['Sns']['Message'] is None:\n _print_info('Unrecognized event, function will not be executed. Enable debug to log the actual event.')\n _print_debug('event: {}'.format(event))\n return\n\n message = event['Records'][0]['Sns']['Message']\n _print_debug('message received: {}'.format(message))\n\n event = json.loads(message)\n _print_info('event: {}'.format(json.dumps(event)))\n\n if event[ACTION] in ALLOWED_ACTIONS:\n\n _print_info('Requested action: {}'.format(event[ACTION]))\n\n _print_info('Initializing.')\n _init_vars_()\n\n # create a hive cursor which can be passed around and then closed when done.\n cursor = _create_hive_cursor()\n\n if event[ACTION] == FULL_SYNC:\n _sync_all(cursor)\n if event[ACTION] == DELTA_SYNC:\n if event[USER] and event[NAMESPACE]:\n _sync_delta(cursor, event[USER], event[NAMESPACE])\n else:\n _print_error(\n 'Invalid request. Expecting both: a valid \\'{}\\' and a valid \\'{}\\''.format(\n USER, NAMESPACE))\n\n # close the hive cursor when done\n _close_hive_cursor(cursor)\n else:\n _print_error(\n 'Unknown action. Expecting one of: \\'{}\\', \\'{}\\''.format(FULL_SYNC,\n DELTA_SYNC))",
"def event(event, context):\n# Sample event:\n #\n # _event = { \"Records\":[\n # {\n # \"eventVersion\":\"2.1\",\n # \"eventSource\":\"aws:s3\",\n # \"awsRegion\":\"us-east-1\",\n # \"eventTime\":\"2021-10-14T07:40:55.113Z\",\n # \"eventName\":\"ObjectCreated:Put\",\n # \"userIdentity\":{\n # \"principalId\":\"AWS:AROA6L2YJX2JCJYHEJ4UI:serverless-image-processing-test-create\"\n # },\n # \"requestParameters\":{\n # \"sourceIPAddress\":\"94.140.8.209\"\n # },\n # \"responseElements\":{\n # \"x-amz-request-id\":\"7CJHSGZ9MZF9995F\",\n # \"x-amz-id-2\":\"X5OtpRb+P9CuYKDHvjT8z9prnqqsH1yatZchN2uw8/158mcRUVhQNSW/z5ffXLqkLhu+4Kc163vZiRgVk3XaGd8H1NhZCu8N\"\n # },\n # \"s3\":{\n # \"s3SchemaVersion\":\"1.0\",\n # \"configurationId\":\"9b8f4135-35d4-4e07-b8a5-7d68cc95870b\",\n # \"bucket\":{\n # \"name\":\"serverless-image-processing-test-serverless-image-processing\",\n # \"ownerIdentity\":{\n # \"principalId\":\"A5IHQSLNTJKZN\"\n # },\n # \"arn\":\"arn:aws:s3:::serverless-image-processing-test-serverless-image-processing\"\n # },\n # \"object\":{\n # \"key\":\"test/6e7ef3f0-dcb6-4db6-9518-3bc6ec0ba492\",\n # \"size\":116716,\n # \"eTag\":\"f04e70e100f653a0e67f32f6098dea1c\",\n # \"sequencer\":\"006167DF06C888A626\"\n # }\n # }\n # }\n # ]\n # }\n\n logger.debug('event: {}'.format(event))\n for record in event['Records']:\n processRecord(record)\n\n return {'statusCode': httplib.ACCEPTED}",
"def run(self, event, context):\n logger.debug('Number of Records: %d', len(event.get('Records', [])))\n\n config = load_config()\n env = load_env(context)\n\n for record in event.get('Records', []):\n payload = StreamPayload(raw_record=record)\n classifier = StreamClassifier(config=config)\n classifier.map_source(payload)\n\n # If the kinesis stream or s3 bucket is not in our config,\n # go onto the next record\n if not payload.valid_source:\n continue\n\n if payload.service == 's3':\n self.s3_process(payload, classifier)\n elif payload.service == 'kinesis':\n self.kinesis_process(payload, classifier)\n elif payload.service == 'sns':\n self.sns_process(payload, classifier)\n else:\n logger.info('Unsupported service: %s', payload.service)\n\n # returns the list of generated alerts\n if self.return_alerts:\n return self.alerts\n # send alerts to SNS\n self.send_alerts(env, payload)",
"def s3_process(self, payload, classifier):\n s3_file_lines = StreamPreParsers.pre_parse_s3(payload.raw_record)\n for line in s3_file_lines:\n data = line.rstrip()\n payload.refresh_record(data)\n self.process_alerts(classifier, payload, data)",
"def process_log_file(cur, filepath):\n \n # open log file\n \n df = pd.read_json(filepath, lines = True)\n \n # filter by NextSong action\n df = df[df['page']=='NextSong']\n # convert timestamp column to datetime\n t = pd.to_datetime(df.ts, unit='ms')\n df.ts = t\n \n # insert time data records\n time_data = [t, t.dt.hour, t.dt.day, t.dt.weekofyear,\n t.dt.month, t.dt.year, t.dt.weekday]\n \n # column_labels = ['timestamp','Hour', \n # 'Day','Month','Year''Weekday']'\n column_labels = ['timestamp','hour','day','weekofyear','month','year','weekday']\n time_df = pd.DataFrame(dict(zip(column_labels, time_data)))\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n \n # load user table\n user_df = df[['userId','firstName', \n 'lastName','gender','level']]\n\n # insert user records\n for i, row in user_df.iterrows(): \n cur.execute(user_table_insert, row)\n \n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist,\n row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (index, row.ts, row.userId, row.level,\n songid, artistid, row.sessionId, \n row.location, row.userAgent)\n \n \n cur.execute(songplay_table_insert, songplay_data)",
"def _ProcessStreamingOutput(line):\n line = line.strip()\n if not line:\n return\n log_record = json.loads(line)\n if 'status' in log_record:\n feedback = log_record['status'].strip()\n if 'progress' in log_record:\n feedback += ': ' + log_record['progress'] + '\\r'\n else:\n feedback += '\\n'\n log.info(feedback)\n elif 'error' in log_record:\n error = log_record['error'].strip()\n log.error(error)\n raise Error('Unable to push the image to the registry: \"%s\"' % error)\n elif 'errorDetail' in log_record:\n error_detail = log_record['errorDetail'] or 'Unknown Error'\n raise Error('Unable to push the image to the registry: \"%s\"'\n % error_detail)",
"def process_log_file(cursor, filepath):\n\n def get_timestamp_data(df):\n # convert timestamp column to datetime\n timestamp = pd.to_datetime(df['ts'], unit='ms')\n\n return (df['ts'].values,\n timestamp.dt.hour.values,\n timestamp.dt.day.values,\n timestamp.dt.week.values,\n timestamp.dt.month.values,\n timestamp.dt.year.values,\n timestamp.dt.weekday.values)\n\n # open log file\n df = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action\n df = df[df['page'] == 'NextSong']\n\n # insert time data records\n time_data = get_timestamp_data(df)\n column_labels = ('timestamp', 'hour', 'day', 'week', 'month', 'year', 'weekday')\n time_df = pd.DataFrame(data=dict(zip(column_labels, time_data)))\n\n for i, row in time_df.iterrows():\n cursor.execute(time_table_insert, list(row))\n\n # load user table\n user_columns = ['userId', 'firstName', 'lastName', 'gender', 'level']\n user_df = df[user_columns]\n\n # insert user records\n for i, row in user_df.iterrows():\n cursor.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in df.iterrows():\n\n # get song_id and artist_id from song and artist tables\n cursor.execute(song_select, (row.song, row.artist, row.length))\n results = cursor.fetchone()\n\n if results:\n song_id, artist_id = results\n else:\n song_id, artist_id = None, None\n\n # insert songplay record\n songplay_data = (\n row['ts'], row['userId'], row['level'], song_id, artist_id, row['sessionId'], row['location'],\n row['userAgent'])\n cursor.execute(songplay_table_insert, songplay_data)"
] | [
"0.6164194",
"0.5861876",
"0.5760497",
"0.57345635",
"0.57181984",
"0.5717499",
"0.5713318",
"0.5705176",
"0.5685681",
"0.56830186",
"0.56588256",
"0.5655359",
"0.561172",
"0.55994326",
"0.55731946",
"0.5565909",
"0.5533072",
"0.5529683",
"0.546801",
"0.5466014",
"0.5438785",
"0.5425353",
"0.5424664",
"0.54234695",
"0.54231244",
"0.53923154",
"0.53886384",
"0.53876454",
"0.5359892",
"0.53534085"
] | 0.69749135 | 0 |
Strip a malformed YAML directive from the top of a file. Returns the slurped (!) file. | def strip_malformed_directive(yaml_file):
lines = list(yaml_file)
first_line = lines[0]
if first_line.startswith('%') and ":" in first_line:
return "\n".join(lines[1:])
else:
return "\n".join(lines) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_yml_file_without_meta(yml_file):\n with open(yml_file, \"r\") as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n # get the meta index first\n mi = -1\n for i in range(len(data)):\n if \"meta\" not in data[i]:\n mi = i\n break\n assert mi >= 0\n\n return data[mi:]",
"def strip_yaml_from_docstring(docstring):\n split_lines = trim_docstring(docstring).split('\\n')\n\n cut_off = None\n for index in range(len(split_lines) - 1, -1, -1):\n line = split_lines[index]\n line = line.strip()\n if line == '---':\n cut_off = index\n break\n if cut_off is not None:\n split_lines = split_lines[0:cut_off]\n\n return \"\\n\".join(split_lines)",
"def load_yaml_raw(path: str, indent: int = 0) -> str:\n lines = []\n with open(path, \"r\", encoding=\"utf8\") as fp:\n for line in fp:\n # Remove: empty lines and ---\n if line in (\"---\\n\", \"---\\r\\n\", \"\\n\", \"\\r\\n\"):\n continue\n # Remove: comments\n if line.startswith(\"#\"):\n continue\n lines.append(\" \" * indent + line)\n return \"\".join(lines)",
"def to_normal_yaml(filename):\n\n with open(filename, 'r') as f:\n data = f.read()\n\n out_str = ''\n next_line_break = False\n for line in data.split('\\n'):\n # get_input not supported by run-os-net-config.sh script\n line = line.replace('get_input: ', '')\n # Normal comments\n m = re.match(\" +comment[0-9]+_([0-9]+): '(.*)'.*\", line)\n # Inline comments\n i = re.match(\" +inline_comment[0-9]+: '(.*)'.*\", line)\n if m:\n if next_line_break:\n out_str += '\\n'\n next_line_break = False\n for x in range(0, int(m.group(1))):\n out_str += \" \"\n out_str += \"#%s\\n\" % m.group(2)\n elif i:\n out_str += \" #%s\\n\" % i.group(1)\n next_line_break = False\n else:\n if next_line_break:\n out_str += '\\n'\n out_str += line\n next_line_break = True\n\n if next_line_break:\n out_str += '\\n'\n\n with open(filename, 'w') as f:\n f.write(out_str)\n\n return out_str",
"def read_yaml_file(yaml_file):\n with open(yaml_file, 'r') as yfile:\n loaded_file = yaml.safe_load(yfile)\n return loaded_file",
"def load_yaml_guess_indent(stream, **kw):\n # type: (StreamTextType, Any) -> Any\n from .main import round_trip_load\n\n # load a YAML document, guess the indentation, if you use TABs you're on your own\n def leading_spaces(line):\n # type: (Any) -> int\n idx = 0\n while idx < len(line) and line[idx] == \" \":\n idx += 1\n return idx\n\n if isinstance(stream, text_type):\n yaml_str = stream # type: Any\n elif isinstance(stream, binary_type):\n # most likely, but the Reader checks BOM for this\n yaml_str = stream.decode(\"utf-8\")\n else:\n yaml_str = stream.read()\n map_indent = None\n indent = None # default if not found for some reason\n block_seq_indent = None\n prev_line_key_only = None\n key_indent = 0\n for line in yaml_str.splitlines():\n rline = line.rstrip()\n lline = rline.lstrip()\n if lline.startswith(\"- \"):\n l_s = leading_spaces(line)\n block_seq_indent = l_s - key_indent\n idx = l_s + 1\n while line[idx] == \" \": # this will end as we rstripped\n idx += 1\n if line[idx] == \"#\": # comment after -\n continue\n indent = idx - key_indent\n break\n if map_indent is None and prev_line_key_only is not None and rline:\n idx = 0\n while line[idx] in \" -\":\n idx += 1\n if idx > prev_line_key_only:\n map_indent = idx - prev_line_key_only\n if rline.endswith(\":\"):\n key_indent = leading_spaces(line)\n idx = 0\n while line[idx] == \" \": # this will end on ':'\n idx += 1\n prev_line_key_only = idx\n continue\n prev_line_key_only = None\n if indent is None and map_indent is not None:\n indent = map_indent\n return round_trip_load(yaml_str, **kw), indent, block_seq_indent",
"def stripSignature(file):\n\n while '|9999|' not in file[-1]:\n file.pop(-1)\n\n return file",
"def drop_yaml_node(\n orig_lines: FileLines,\n key: str,\n *,\n filename: str,\n) -> FileLines:\n lines = iter(enumerate(orig_lines))\n where = None\n for n, line in lines:\n if line.startswith(f'{key}:'):\n if where is not None:\n warn(\n f\"Duplicate {key}: setting in {filename}\"\n f\" (lines {where + 1} and {n + 1})\"\n )\n where = n\n if where is None:\n return orig_lines\n\n lines = iter(enumerate(orig_lines[where + 1:], where + 1))\n\n start = where\n end = start + 1\n for n, line in lines:\n if line and line[0] != ' ':\n break\n else:\n end = n + 1\n new_lines = orig_lines[:start] + orig_lines[end:]\n\n return new_lines",
"def UnDumpInfo(self):\n filename = '%s/preprocess_info.yaml' % self.logdir\n f = open(filename,'r')\n self.info = yaml.load(f.read())\n f.close()",
"def read_file_and_maybe_fix_it(filename, fix):\n\twith open(filename, 'r') as istr:\n\t\tlines = istr.readlines()\n\tif fix:\n\t\tlines = list(map(fix_horizontal, fix_vertical(lines)))\n\t\tbackupname = make_backup_file_name(filename)\n\t\tshutil.copy(filename, backupname)\n\t\twith open(filename, 'w') as ostr:\n\t\t\tostr.writelines(lines)\n\treturn lines",
"def re_dump(file_path: Union[Path, str]):\n file_: Path = Path(file_path)\n xyz = YAML(typ=\"safe\", pure=True).load(file_.open().read())\n\n with YAML(output=sys.stdout) as yaml:\n yaml.indent(mapping=2, sequence=4, offset=2)\n yaml.width = PAGE_WIDTH\n yaml.dump(xyz)",
"def clean_file(file_contents):\n commentless_file = _strip_comments(file_contents)\n assembly_code = _remove_whitespace(commentless_file)\n return assembly_code",
"def parse_frontmatter_and_strip(self):\n assert self._raw_content\n raw_content = self._raw_content\n\n if raw_content.startswith('---'):\n raw_content = raw_content[3:]\n\n tridash_re = re.compile('^-{3,5}\\s*$', re.MULTILINE)\n m = tridash_re.search(raw_content)\n if m:\n start, end = m.span()\n # start is the 1st dash index\n # end is the index of '\\n' in the same line\n self.frontmatter = raw_content[:start]\n self.md = raw_content[end+1:]\n else:\n self.frontmatter = None\n self.md = raw_content\n if self.frontmatter:\n # strings in fm is unicode or ascii depending on whether\n # the object is an ascii string or not\n fm = yaml.load(self.frontmatter)\n else:\n fm = {}\n self.set_tags(fm)\n self.set_title(fm)\n self.set_category(fm)",
"def test_load_dangling(self):\n with NamedTemporaryFile(suffix=\".yaml\") as config:\n with open(config.name, \"w\") as write_stream:\n write_stream.write(\n \"\"\"\n pipeline:\n - !LinearController\n low_utilisation: 0.9\n high_utilisation: 1.1\n - !MockPool\n random_things:\n foo: bar\n \"\"\"\n )\n with pytest.raises(ConfigurationError):\n with load(config.name):\n assert False",
"def skip_gutenberg_header(fp):\n for line in fp:\n if line.startswith(''):\n break",
"def read_config_developer_file(cfg_file=None):\n if cfg_file is None:\n cfg_file = Path(__file__).parents[1] / 'config-developer.yml'\n\n with open(cfg_file, 'r') as file:\n cfg = yaml.safe_load(file)\n\n if 'obs4mips' in cfg:\n logger.warning(\n \"Correcting capitalization, project 'obs4mips'\"\n \" should be written as 'obs4MIPs' in %s\", cfg_file)\n cfg['obs4MIPs'] = cfg.pop('obs4mips')\n\n return cfg",
"def strip_attributes(arff_file):\r\n start = arff_file.find('% filename')\r\n new_arff = arff_file[start:]\r\n return new_arff",
"def _LoadTemplate(self,fname):\n f = open(fname, 'r')\n lines = f.readlines()\n data = ''\n for line in lines:\n if not line.startswith('---'):\n data += line\n data = data.replace('\\t',' ')\n if '\\t' in data:\n errstr = \\\n 'Illegal tabs encountered in template file. Use spaces instead.'\n raise ScannerError(errstr)\n proc.LogErrors(errstr)\n tmplt = yaml.load(data)\n f.close()\n return tmplt",
"def read_yaml(fname):\n\n with open(fname, 'r') as stream:\n try:\n return yaml.load(stream)\n except yaml.YAMLError as exc:\n return None",
"def strip_header(book):\n\theader_regex = header_string_regex()\n\theader_match = re.search(header_regex, book)\n\n\theader_end = 0\n\tif header_match:\n\t\theader_end = header_match.end()\n\n\treturn book[header_end:]",
"def stripped_tokens_of_file(filename):\n tokens = list(tokenize.tokenize(BytesIO(bytes(open(filename).read(),\"utf8\")).readline))\n str_tokens = [x.string for x in tokens]\n str_tokens = [x for x in str_tokens if x != '\\n']\n str_tokens = [x for x in str_tokens if x.strip() != '']\n # remove comments\n str_tokens = [x for x in str_tokens if len(x) >= 0 and x[0] != '#']\n return str_tokens[1:] # get rid of utf-8 encoding prefix",
"def strip(self, src):\r\n # single-quoted character\r\n p = \"('.')\"\r\n \r\n # double-quoted string\r\n p += \"|(\\\"(?:[^\\\"\\\\\\\\]|\\\\\\\\.)*\\\")\"\r\n \r\n # single and multi-line comment\r\n p += \"|(//.*?$)|(/\\\\*[^*]*(?:\\\\*(?!/)[^*]*)*\\\\*/)\"\r\n \r\n # pre-processor directive\r\n p += \"|\" + \"(^\\\\s*#.*?$)\"\r\n\r\n regex = re.compile(p, re.MULTILINE)\r\n return regex.sub(' ', src)",
"def load_yaml(filepath):\n with open(filepath, 'r') as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)",
"def _load_file(self, f):\n if not os.path.exists(f):\n msg = '%s is a non-existant definition file' % f\n raise ValueError(msg)\n\n with open(f, 'r') as fh:\n return yaml.load(fh.read())",
"def strip_file(f,leave_header=True):\n thefile = open(f)\n if leave_header : n=1\n else : n = 2\n r = map(str.strip,thefile.readlines())\n thefile.close()\n try :\n r = r[r.index(\"\")+n:]\n except :\n print( \"Incorrect headers in %s\" % f)\n \n return(r)",
"def read_yaml(file):\n with open(file, mode='r') as stream:\n out = yaml.load(stream)\n\n return out",
"def invalid_yaml_error():\n\n clowder_output = yaml_file('clowder.yaml')\n return '\\n' + clowder_output + ' appears to be invalid'",
"def missing_yaml_error():\n\n clowder_output = yaml_file('clowder.yaml')\n return clowder_output + ' appears to be missing'",
"def get_yaml(path):\n end = False\n yaml = \"\"\n num = 0\n\n with open(path, 'r') as f:\n\n for line in f.readlines():\n if line.strip() == '---':\n if end:\n break\n else:\n end = True\n continue\n else:\n num += 1\n\n yaml += line\n\n return yaml, num",
"def load_yaml(file: Text):\n with open(file) as fp:\n return yaml.load(fp, yaml.FullLoader)"
] | [
"0.63308746",
"0.62082046",
"0.5571192",
"0.5300692",
"0.522862",
"0.5136453",
"0.51294416",
"0.5127715",
"0.511309",
"0.5069137",
"0.5064061",
"0.50461096",
"0.503939",
"0.50142545",
"0.49752197",
"0.49716073",
"0.49489355",
"0.4923321",
"0.49160346",
"0.49102274",
"0.49076545",
"0.49073148",
"0.49013847",
"0.48903933",
"0.48858318",
"0.48786426",
"0.4873066",
"0.48598558",
"0.48514545",
"0.4831413"
] | 0.67927957 | 0 |
Convert an erroneous custom tag, !!opencvmatrix, to the correct !opencvmatrix, in a stream of YAML events. | def convert_opencvmatrix_tag(yaml_events):
for event in yaml_events:
if hasattr(event, "tag") and event.tag == u"tag:yaml.org,2002:opencv-matrix":
event.tag = u"!opencv-matrix"
yield event | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_reads_opencv_from_xml() -> None:\n imgsz = (4288, 2848)\n f = {\"fx\": 3.57e03, \"fy\": 3.58e03}\n c = {\"cx\": 2.15e03, \"cy\": 1.43e03}\n coeffs = {\n \"k1\": 1.1e-01,\n \"k2\": -1.2e-01,\n \"p1\": -9.98e-03,\n \"p2\": 9.99e-03,\n \"k3\": 1.0e-02,\n \"k4\": 1.1e-03,\n \"k5\": 1.2e-03,\n \"k6\": 1.3e-03,\n \"s1\": 1.0e-05,\n \"s2\": 1.1e-05,\n \"s3\": 1.2e-05,\n \"s4\": 1.3e-05,\n }\n arrays: Dict[str, Any] = {\n \"cameraMatrix\": [(f[\"fx\"], 0, c[\"cx\"]), (0, f[\"fy\"], c[\"cy\"]), (0, 0, 1)],\n \"distCoeffs\": list(coeffs.values()),\n }\n path = os.path.join(\"tests\", \"opencv.xml\")\n xcam_auto = OpenCV.from_xml(path, imgsz=imgsz)\n xcam_params = OpenCV(imgsz=imgsz, **{**f, **c, **coeffs})\n assert vars(xcam_auto) == vars(xcam_params)\n xcam_arrays = OpenCV.from_arrays(imgsz=imgsz, **arrays)\n assert vars(xcam_auto) == vars(xcam_arrays)",
"def process_cvat_xml(xml_file, image_dir, output_dir):\n KNOWN_TAGS = {'box', 'image', 'attribute'}\n #output_dir = os.path.join(output_dir, \"Annotations\")\n os.makedirs(output_dir, exist_ok=True)\n cvat_xml = etree.parse(xml_file)\n\n basename = os.path.splitext( os.path.basename( xml_file ) )[0]\n\n tracks= cvat_xml.findall( './/track' )\n\n if (tracks is not None) and (len(tracks) > 0):\n frames = {}\n\n for track in tracks:\n trackid = int(track.get(\"id\"))\n label = track.get(\"label\")\n boxes = track.findall( './box' )\n for box in boxes:\n frameid = int(box.get('frame'))\n outside = int(box.get('outside'))\n ## occluded and pose are not tested within tracks\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified'\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n #keyframe = int(box.get('keyframe')) #currently unused\n xtl = float(box.get('xtl'))\n ytl = float(box.get('ytl'))\n xbr = float(box.get('xbr'))\n ybr = float(box.get('ybr'))\n \n frame = frames.get( frameid, {} )\n \n if outside == 0:\n frame[ trackid ] = { 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr, 'label': label,\n 'pose': pose, 'truncated': occluded }\n\n frames[ frameid ] = frame\n\n width = int(cvat_xml.find('.//original_size/width').text)\n height = int(cvat_xml.find('.//original_size/height').text)\n\n # Spit out a list of each object for each frame\n for frameid in sorted(frames.keys()):\n print( frameid )\n\n image_name = \"%s_%08d.jpg\" % (basename, frameid) ## KM: Revisit this for tracks. Hardcoded?\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n frame = frames[frameid]\n\n objids = sorted(frame.keys())\n\n for objid in objids:\n\n box = frame[objid]\n\n label = box.get('label')\n occluded = box.get('occluded')\n pose = box.get('pose')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n writer.save(os.path.join(anno_dir, anno_name))\n\n else:\n for img_tag in cvat_xml.findall('image'):\n ## Discard path component; we expect user to provide path to images directory.\n ## It is probably easier for users to provide full path to images directory\n ## rather than having to figure out how much of the path is embedded in the XML\n ## as a relative or absolute path by CVAT.\n image_name = os.path.basename(img_tag.get('name'))\n width = img_tag.get('width')\n height = img_tag.get('height')\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n unknown_tags = {x.tag for x in img_tag.iter()}.difference(KNOWN_TAGS)\n if unknown_tags:\n log.warn('Ignoring tags for image {}: {}'.format(image_path, unknown_tags))\n\n for box in img_tag.findall('box'):\n label = box.get('label')\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified' ## Default if not found\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = output_dir #os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n #print(\"Writing {} (image: {})\".format(anno_name, image_name))\n writer.save(os.path.join(anno_dir, anno_name))",
"def make_opencv_matrix_xml_element(root, mat, name):\n mat_element = etree.SubElement(root, name, attrib={\"type_id\": \"opencv-matrix\"})\n rows_elem = etree.SubElement(mat_element, \"rows\")\n rows_elem.text = str(mat.shape[0])\n cols_elem = etree.SubElement(mat_element, \"cols\")\n cols_elem.text = str(mat.shape[1])\n dt_elem = etree.SubElement(mat_element, \"dt\")\n if mat.dtype == np.dtype('float64'):\n dt_elem.text = \"d\"\n elif mat.dtype == np.dtype(\"float32\"):\n dt_elem.text = \"f\"\n else:\n raise ValueError(\"dtype \" + str(mat.dtype) + \"not supported. Expecting float64 or float32.\")\n\n data_elem = etree.SubElement(mat_element, \"data\")\n data_string = str(mat.flatten()).replace(\"\\n\", \"\").replace(\"[\", \"\").replace(\"]\", \"\")\n data_string = re.sub(\"\\s+\", \" \", data_string)\n data_elem.text = data_string\n return mat_element",
"def test_process_metadata_0(self):\n data = ET.parse(\"data/metadata_0.xml\")\n data_str = ET.tostring(data.getroot())\n\n pre = tesse_ros_bridge.enu_T_unity\n post = tesse_ros_bridge.brh_T_blh\n\n dict = tesse_ros_bridge.utils.parse_metadata(data_str)\n proc = tesse_ros_bridge.utils.process_metadata(dict, dict['time']-2,\n [0,0,0], np.identity(3))\n\n transform = proc['transform']\n transform_R = transform[:3,:3]\n transform_t = transform[:3,3]\n\n # First check the transformation matrix.\n # Right-handed check.\n self.assertEqual(np.linalg.det(transform_R), 1)\n # X and Z axes are switched:\n self.assertEqual(transform_t[0], dict['position'][0])\n self.assertEqual(transform_t[1], dict['position'][2])\n self.assertEqual(transform_t[2], dict['position'][1])\n\n truth_quat = tf.transformations.quaternion_from_matrix((pre.dot(\n post)).dot(tf.transformations.quaternion_matrix(\n dict['quaternion'])))\n self.assertTrue(np.allclose(proc['quaternion'], truth_quat))\n\n self.assertTrue(np.allclose(proc['velocity'],\n post[:3,:3].dot(dict['velocity'])))\n\n # TODO(marcus): this is not correct.\n self.assertTrue(np.allclose(proc['ang_vel'],\n post[:3,:3].dot(dict['ang_vel'])))\n\n # print dict['ang_vel']\n\n self.assertTrue(np.allclose(proc['acceleration'], proc['velocity']*0.5))\n\n self.assertEqual(proc['time'], dict['time'])\n self.assertEqual(proc['collision_status'], dict['collision_status'])",
"def _decode(self, lnestream: np.ndarray) -> np.ndarray:\n symstream = lnestream.copy()\n\n if \"replace\" in self.config.options:\n for label in self.config.options[\"replace\"][\"affected_labels\"]:\n for label_col_idx in self.config.label_columns:\n # Assume the traces starts and ends with the same app...\n if (\n symstream[0, label_col_idx]\n in self.config.options[\"replace\"][\"affected_labels\"]\n and symstream[-1, label_col_idx]\n in self.config.options[\"replace\"][\"affected_labels\"]\n ):\n # Find first label not to replace and set zero/last label\n other_idxes = np.where(symstream[:, label_col_idx] != label)[0]\n symstream[0 : other_idxes[0], label_col_idx] = symstream[\n other_idxes[0], label_col_idx\n ]\n\n symstream[other_idxes[-1] + 1 :, label_col_idx] = symstream[\n other_idxes[-1], label_col_idx\n ]\n elif (\n symstream[0, label_col_idx]\n in self.config.options[\"replace\"][\"affected_labels\"]\n ):\n symstream[0, label_col_idx] = symstream[-1, label_col_idx]\n elif (\n symstream[-1, label_col_idx]\n in self.config.options[\"replace\"][\"affected_labels\"]\n ):\n symstream[-1, label_col_idx] = symstream[0, label_col_idx]\n labels_to_replace = np.where(symstream[:, label_col_idx] == label)[0]\n if len(labels_to_replace) == 0:\n continue\n if self.config.options[\"replace\"][\"with\"] == \"before\":\n\n def replace_function(idx):\n symstream[idx, label_col_idx] = symstream[idx - 1, label_col_idx]\n\n elif self.config.options[\"replace\"][\"with\"] == \"after\":\n\n def replace_function(idx):\n symstream[idx, label_col_idx] = symstream[idx + 1, label_col_idx]\n\n labels_to_replace[::-1].sort()\n vec_replace_function = np.vectorize(replace_function)\n vec_replace_function(labels_to_replace)\n\n if \"smoothen\" in self.config.options:\n for label_col_idx in self.config.label_columns:\n label_changes = np.where(np.diff(symstream[:, label_col_idx], axis=0) != 0)[0]\n for idx in range(len(label_changes[:-1])):\n if (\n symstream[label_changes[idx + 1], 0] - symstream[label_changes[idx], 0]\n < self.config.options[\"smoothen\"][\"threshold_s\"]\n ):\n # print(symstream[label_changes[idx],0] - symstream[label_changes[idx+1],0])\n middle_idx = label_changes[idx] + math.ceil(\n (label_changes[idx + 1] - label_changes[idx]) / 2\n )\n symstream[label_changes[idx] : middle_idx, label_col_idx] = symstream[\n label_changes[idx] - 1, label_col_idx\n ]\n symstream[\n middle_idx : label_changes[idx + 1] + 1, label_col_idx\n ] = symstream[label_changes[idx + 1] + 1, label_col_idx]\n\n if \"extend\" in self.config.options:\n for label_col_idx in self.config.label_columns:\n label_changes = np.where(np.diff(symstream[:, label_col_idx], axis=0) != 0)[0]\n labels_to_change = self.config.options[\"extend\"][\"affected_labels\"]\n front = self.config.options[\"extend\"][\"extension_period_s\"][0]\n back = self.config.options[\"extend\"][\"extension_period_s\"][1]\n\n for idx in label_changes:\n cur_label = symstream[idx, label_col_idx]\n nxt_label = symstream[idx + 1, label_col_idx]\n\n if cur_label in labels_to_change: # and nxt_label not in labels_to_change:\n # Extend cur_label by back - always do this and also overwrite front extension\n extension_idx = symstream.shape[0]\n time_idx = np.where(symstream[idx:, 0] >= symstream[idx, 0] + back)[0]\n if len(time_idx) > 0:\n extension_idx = min([extension_idx, time_idx[0] + idx])\n next_valid = np.where(symstream[idx + 1 :, label_col_idx] != nxt_label)[\n 0\n ]\n if next_valid.size > 0:\n extension_idx = min([extension_idx, next_valid[0] + idx + 1])\n symstream[idx:extension_idx, label_col_idx] = cur_label\n\n if cur_label not in labels_to_change and nxt_label in labels_to_change:\n # Extend nxt_label by front only if label before is not to be extended\n extension_idx = 0\n time_idx = np.where(symstream[:idx, 0] <= symstream[idx, 0] - front)[0]\n if len(time_idx) > 0:\n extension_idx = max([extension_idx, time_idx[-1]])\n last_valid = np.where(symstream[: idx + 1, label_col_idx] != cur_label)[\n 0\n ]\n if last_valid.size > 0:\n extension_idx = max([extension_idx, last_valid[-1]])\n symstream[extension_idx : idx + 1, label_col_idx] = nxt_label\n\n if \"remove_labels\" in self.config.options:\n for label in self.config.options[\"remove_labels\"]:\n for label_col_idx in self.config.label_columns:\n sampling_period = np.diff(symstream[:, 0]).mean()\n symstream = symstream[symstream[:, label_col_idx] != label[\"int\"], :]\n discontinuities = np.where(\n np.diff(symstream[:, 0]) > (sampling_period * 1.5)\n )[0]\n symstream[:, 0] = np.linspace(\n 0, (symstream.shape[0] - 1) * sampling_period, symstream.shape[0]\n )\n for idx in range(len(discontinuities)):\n if len(symstream[discontinuities[idx] + 1 :, 0]) > 0:\n symstream[discontinuities[idx] + 1 :, 1:-2] = self.augment(\n symstream[discontinuities[idx] + 1 :, 0]\n - symstream[discontinuities[idx], 0],\n symstream[discontinuities[idx] + 1 :, 1:-2],\n symstream[discontinuities[idx], 1:-2],\n )\n\n X_cols = list(range(1, symstream.shape[1]))\n for ycol in self.config.label_columns:\n X_cols.remove(ycol)\n return np.hstack(\n (\n symstream[:, 0].reshape((-1, 1)),\n symstream[:, X_cols].reshape((-1, len(X_cols))),\n symstream[:, self.config.label_columns].reshape(\n (-1, len(self.config.label_columns))\n ),\n )\n )",
"def test_eval_omim(self):\n evaluator = self.engine\n eos = evaluator.eval(\"omim\")\n with open(PREDICTIONS_OMIM_OUT, \"w\") as f:\n yaml.dump(eos.dict(), f)",
"def from_matrix(matrix: types.Matrix) -> \"MatrixLieGroup\":",
"def _process_img_semantic(self, sensor_data):\n sensor_data.convert(self.cc)\n img = np.array(sensor_data.raw_data).reshape((self.img_y, self.img_x, 4))\n img = img[:, :, :3] # sensor is actualy rgba, we dont need alpha values\n self.semantic = img # need to scale rgb values to be {0,1}",
"def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result",
"def parse_xml_matrix(mat_element):\n rows = int(mat_element.find(\"rows\").text)\n cols = int(mat_element.find(\"cols\").text)\n type_flag = mat_element.find(\"dt\").text\n if type_flag == \"f\":\n dtype = np.float32\n elif type_flag == \"d\":\n dtype = np.float64\n else:\n raise ValueError(\"dtype flag \" + type_flag + \" not supported.\")\n data_string = mat_element.find(\"data\").text\n data = np.array([float(part) for part in data_string.strip().split(\" \") if len(part) > 0])\n return data.reshape((rows, cols)).astype(dtype)",
"def matrixconverter(seqmatrix):\n\n\tdic = {0:\"A\",1:\"C\",2:\"G\",3:\"T\"} # dictionary of indexes of each nucleotide for matrices\n\ta = np.transpose(np.nonzero(np.transpose(seqmatrix))).tolist()\n\tseqstring = \"\"\n\tfor i in a:\n\t\tseqstring += dic[i[1]]\n\treturn seqstring",
"def fix_montage(raw, timestamp):\n # These channels are not recorded during an EEG experiment or are not included in standard 10/20 montage.\n \n non_eeg = ['SaO2 SpO2', 'HR HR','Pulse Plet', 'ExG1', 'ExG2', 'EEG A1', 'EEG A2']\n \n #Check if EOG was recorded. If so, save it so it can later be added to the data.\n EOG_CHANNEL_FOUND = False\n if('ExG1' in raw.ch_names): \n eog_data = raw.copy().pick_channels(['ExG1']).get_data()\n EOG_CHANNEL_FOUND = True\n \n exclude = list(set(non_eeg).intersection(raw.ch_names))\n raw.drop_channels(exclude)\n \n raw.info['ch_names'] = [name.split(' ')[-1] for name in raw.info['ch_names']]\n\n orig_names = raw.ch_names\n montage = mne.channels.read_montage(kind = 'standard_1020', ch_names=raw.info['ch_names'])\n \n data = raw.get_data()\n \n channels_dict = {}\n \n for channel_name, channel_data in zip(orig_names, data):\n channels_dict[channel_name] = channel_data\n \n reordered_data = np.zeros(shape = data.shape) \n \n for idx, channel_name in enumerate(montage.ch_names):\n reordered_data[idx, :] = channels_dict[channel_name]\n \n new_info = mne.create_info(\n ch_names= list(montage.ch_names),\n sfreq = raw.info['sfreq'],\n ch_types = ['eeg'] * len(list(montage.ch_names)),\n #meas_date = [timestamp[0], 0] # Time of the first sample and something else. Not well documented.\n )\n \n # Create new dataset with reordered channels\n new_raw = mne.io.RawArray(reordered_data, new_info)\n # Set electrode localizations using standard 1020 montage\n new_raw.set_montage(montage)\n \n if(EOG_CHANNEL_FOUND): # Add it to other channels\n eog_channel = mne.io.RawArray(eog_data, mne.create_info( ch_names= ['ExG1'], sfreq = raw.info['sfreq'], ch_types = ['eog']))\n new_raw = new_raw.add_channels([eog_channel])\n \n return new_raw",
"def invalid_yaml_error():\n\n clowder_output = yaml_file('clowder.yaml')\n return '\\n' + clowder_output + ' appears to be invalid'",
"def fix_map_to_odom_transform(self, msg):\n (translation, rotation) = convert_pose_inverse_transform(self.robot_pose)\n p = PoseStamped(pose=convert_translation_rotation_to_pose(translation, rotation),\n header=Header(stamp=msg.header.stamp, frame_id=self.base_frame))\n self.odom_to_map = self.tf_listener.transformPose(self.odom_frame, p)\n (self.translation, self.rotation) = convert_pose_inverse_transform(self.odom_to_map.pose)",
"def convert_matrix(infile, names,refdict,nosamples):\n \n if infile.endswith(\".gz\"):\n inf = gzip.open(infile, \"rb\")\n \n else:\n inf = open(infile, \"r\")\n for line in inf:\n line = line.rsplit()\n if line[0] == \"chromosome\":\n pass # header\n else:\n \n\n chrom = line[0]\n start = line[1]\n stop = line[2]\n TE = line[4]\n n_te = str(len(TE.split(\",\")))\n tes=TE.split(\",\")\n tefam=[]\n tesuperfamily=[]\n \n \n for i in xrange(len(tes)):\n \n tefam.append(refdict[tes[i]][0])\n \n tesuperfamily.append(refdict[tes[i]][1])\n \n \n superfamily=list(set(tesuperfamily))\n if 'Unknown' in superfamily:\n superfamily.remove('Unknown')\n if not superfamily:\n superfamily.append('Unknown')\n \n pos = line[5].split(\",\")\n neg = line[6].split(\",\")\n#missing = 305-(len(pos)+len(neg))/305\n te_id = \"\\t\".join([chrom, start, stop])\n status = get_status(pos, neg, names)\n column_ordered = []\n for i in names:\n column_ordered.append(status[i])\n noNA = filter(lambda x: x != \"NA\", status.values()) \n noNA = map(int, noNA)\n pos_count = sum(noNA)\n l = len(noNA)\n neg_count = l - pos_count\n TE_present=pos_count\n TE_absent=neg_count\n if(pos_count < neg_count):\n Minor_allele=\"presence\"\n\n else:\n Minor_allele=\"absence\"\n#print Minor_allele\n q20=int(0.2*nosamples)\n q80=int(0.8*nosamples)\n if (TE_absent < q20):\n Absence_classification=\"True deletion\"\n elif (TE_absent > q80):\n Absence_classification=\"No insertion\"\n else:\n Absence_classification=\"NA\"\n original_call_deletion = 'T'\n MAF=float(min(TE_present, TE_absent))/nosamples\n #print int(min(TE_present, TE_absent)) ,MAF\n if(MAF < 0.025):\n Frequency_classification = \"Rare\"\n else:Frequency_classification =\"Common\"\n print(te_id + \"\\t\" + TE + \"\\t\" + \",\".join(tefam) + \"\\t\" +\",\".join(superfamily) + \"\\t\" +n_te + \"\\t\" + str(pos_count) + \"\\t\" + str(neg_count) + \"\\t\" +str(Minor_allele) + \"\\t\" +original_call_deletion + \"\\t\" +str(Absence_classification) + \"\\t\" +str(MAF) + \"\\t\" +str(Frequency_classification) + \"\\t\"+\"\\t\".join(column_ordered))\n inf.close()",
"def convert_mel_spectrogram_to_linear(self, mel: 'torch.tensor', **kwargs) -> 'torch.tensor':",
"def detect_matrix(self, current_frame):\n self.frame = current_frame[0]\n contours = self.take_contours()\n for cnt, cnt2 in zip(contours, self.contours):\n val = cv2.matchShapes(cnt, cnt2, 1, 0.0)\n if val < 0.2:\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n if self.read_matrix(box):\n cv2.drawContours(self.frame, [box], 0, (0, 0, 255), 2)\n cv2.drawContours(self.frame, cnt, -1, (255, 0, 0), 3)\n if self.detected:\n img = self.info_displayer.display(self.id, self.frame)\n logging.debug(\"Data Matrix detected\")\n current_frame = [self.frame]",
"def process_cvat_xml(xml_file, image_dir, output_dir,username,password,ilabels):\n KNOWN_TAGS = {'box', 'image', 'attribute'}\n\n if (image_dir is None):\n image_dir=os.path.join(output_dir,\"data/obj\")\n os.makedirs(image_dir, exist_ok=True)\n\n os.makedirs(output_dir, exist_ok=True)\n cvat_xml = etree.parse(xml_file)\n basename = os.path.splitext( os.path.basename( xml_file ) )[0]\n current_labels = {}\n traintxt = \"\"\n auto_lbl_count = 0\n\n if (ilabels is not None):\n vlabels=ilabels.split(',')\n for _label in vlabels:\n current_labels[_label]=auto_lbl_count\n auto_lbl_count+=1\n\n tracks= cvat_xml.findall( './/track' )\n\n if (tracks is not None) and (len(tracks) > 0):\n frames = {}\n\n for track in tracks:\n trackid = int(track.get(\"id\"))\n label = track.get(\"label\")\n boxes = track.findall( './box' )\n for box in boxes:\n frameid = int(box.get('frame'))\n outside = int(box.get('outside'))\n #occluded = int(box.get('occluded')) #currently unused\n #keyframe = int(box.get('keyframe')) #currently unused\n xtl = float(box.get('xtl'))\n ytl = float(box.get('ytl'))\n xbr = float(box.get('xbr'))\n ybr = float(box.get('ybr'))\n\n frame = frames.get( frameid, {} )\n\n if outside == 0:\n frame[ trackid ] = { 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr, 'label': label }\n\n frames[ frameid ] = frame\n\n width = int(cvat_xml.find('.//original_size/width').text)\n height = int(cvat_xml.find('.//original_size/height').text)\n\n taskid = int(cvat_xml.find('.//task/id').text)\n\n urlsegment = cvat_xml.find(\".//segments/segment/url\").text\n urlbase = urlsegment.split(\"?\")[0]\n\n httpclient = requests.session()\n httpclient.get(urlbase)\n\n csrftoken = \"none\"\n sessionid = \"none\"\n\n # Spit out a list of each object for each frame\n for frameid in sorted(frames.keys()):\n image_name = \"%s_%08d.jpg\" % (basename, frameid)\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n if username is None:\n log.warn('{} image cannot be found. Is `{}` image directory correct?\\n'.format(image_path, image_dir))\n else:\n log.info('{} image cannot be found. Downloading from task ID {}\\n'.format(image_path, taskid))\n\n if sessionid == \"none\":\n if \"csrftoken\" in httpclient.cookies:\n csrftoken = httpclient.cookies[\"csrftoken\"]\n elif \"csrf\" in httpclient.cookies:\n csrftoken = httpclient.cookies[\"csrf\"]\n\n login_data = dict(username=username, password=password,\n csrfmiddlewaretoken=csrftoken, next='/dashboard')\n\n urllogin = urlbase+\"/auth/login\"\n httpclient.post(urllogin, data=login_data,\n headers=dict(Referer=urllogin))\n\n if (\"sessionid\" in httpclient.cookies):\n sessionid = httpclient.cookies[\"sessionid\"]\n\n url = urlbase+\"/api/v1/tasks/\"+str(taskid)+\"/frames/\"+ str(frameid)\n\n req = httpclient.get(url, headers=dict(\n csrftoken=csrftoken, sessionid=sessionid))\n\n with open(image_path, 'wb') as fo:\n fo.write(req.content)\n print('Url saved as %s\\n' % image_path)\n\n\n frame = frames[frameid]\n\n _yoloAnnotationContent=\"\"\n\n objids = sorted(frame.keys())\n\n for objid in objids:\n\n box = frame[objid]\n\n label = box.get('label')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n if not label in current_labels:\n current_labels[label] = auto_lbl_count\n auto_lbl_count+=1\n\n labelid=current_labels[label]\n yolo_x= (xmin + ((xmax-xmin)/2))/width\n yolo_y= (ymin + ((ymax-ymin)/2))/height\n yolo_w = (xmax - xmin) / width\n yolo_h = (ymax - ymin) / height\n\n if len(_yoloAnnotationContent) != 0:\n _yoloAnnotationContent += \"\\n\"\n\n _yoloAnnotationContent+=str(labelid)+\" \"+\"{:.6f}\".format(yolo_x) +\" \"+\"{:.6f}\".format(yolo_y) +\" \"+\"{:.6f}\".format(yolo_w) +\" \"+\"{:.6f}\".format(yolo_h)\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.txt')\n anno_path = os.path.join(image_dir, anno_name)\n\n _yoloFile = open(anno_path, \"w\", newline=\"\\n\")\n _yoloFile.write(_yoloAnnotationContent)\n _yoloFile.close()\n\n if len(traintxt)!=0:\n traintxt+=\"\\n\"\n\n traintxt+=image_path\n\n else:\n for img_tag in cvat_xml.findall('image'):\n image_name = img_tag.get('name')\n width = img_tag.get('width')\n height = img_tag.get('height')\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n\n unknown_tags = {x.tag for x in img_tag.iter()}.difference(KNOWN_TAGS)\n if unknown_tags:\n log.warn('Ignoring tags for image {}: {}'.format(image_path, unknown_tags))\n\n _yoloAnnotationContent = \"\"\n\n for box in img_tag.findall('box'):\n label = box.get('label')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n if not label in current_labels:\n current_labels[label] = auto_lbl_count\n auto_lbl_count += 1\n\n labelid = current_labels[label]\n yolo_x = (xmin + ((xmax-xmin)/2))/width\n yolo_y = (ymin + ((ymax-ymin)/2))/height\n yolo_w = (xmax - xmin) / width\n yolo_h = (ymax - ymin) / height\n\n if len(_yoloAnnotationContent) != 0:\n _yoloAnnotationContent += \"\\n\"\n\n _yoloAnnotationContent += str(labelid)+\" \"+\"{:.6f}\".format(yolo_x) + \" \"+\"{:.6f}\".format(\n yolo_y) + \" \"+\"{:.6f}\".format(yolo_w) + \" \"+\"{:.6f}\".format(yolo_h)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.txt')\n anno_path = os.path.join(image_dir, anno_name)\n\n _yoloFile = open(anno_path, \"w\", newline=\"\\n\")\n _yoloFile.write(_yoloAnnotationContent)\n _yoloFile.close()\n\n traintxt_file=open(output_dir+\"/train.txt\",\"w\",newline=\"\\n\")\n traintxt_file.write(traintxt)\n traintxt_file.close()",
"def fix_matrix_gauge(emat):\n # fix mean\n for j in range(emat.shape[1]):\n emat[:,j] = emat[:,j] -sp.mean(emat[:,j])\n # fix sum of variances equal to length of matrix\n svar = np.sum(np.var(emat,axis=0))\n emat = sp.sqrt(emat.shape[1])*emat/sp.sqrt(svar)\n return emat",
"def removeMatrixTranslate(matrix):\n\n float_matrix = [matrix(i, j) for i in xrange(4) for j in xrange(4)]\n for idx in range(12, 15):\n float_matrix[idx] = 0.0\n \n outMatrix = OpenMaya.MFloatMatrix()\n OpenMaya.MScriptUtil.createFloatMatrixFromList(float_matrix , outMatrix)\n\n return outMatrix",
"def test_process_yaml_invalid(caplog):\n data = \"\"\"\n text_key: incorrect format\n - listitem\n - listitem\n \"\"\"\n\n with patch(\"builtins.open\", mock_open(read_data=data)):\n result = process_yaml(\"test/file.yaml\")\n\n for record in caplog.records:\n assert (\n \"Incorrect test/file.yaml. Error: while parsing a block mapping\"\n in record.message\n )\n assert record.levelname == \"ERROR\"\n assert result == []",
"def transform_val(self, sample):\n img = sample[\"image\"]\n bboxes = sample[\"bboxes\"]\n\n imgH = img.shape[0]\n imgW = img.shape[1]\n\n if imgW / imgH < 2.5:\n scale_factor = min(self.args.img_size[0] / imgH, self.args.img_size[1] / imgW)\n else:\n scale_factor = 1.0\n\n random_scale = np.random.randint(8, 11) / 10\n\n if bboxes.size == 0:\n bboxes = np.array([[0.1, 0.1, 0.1, 0.1, 0.0, 0.0]]) # this is just a dummy - all values must be inside (0,1)\n\n annotations = {'image': img, 'bboxes': bboxes}\n\n transforms = ([#Resize(height=int(scale_factor * imgH), width=int(scale_factor * imgW),\n # p=1.0),\n # PadIfNeeded(min_height=self.args.img_size[0], min_width=self.args.img_size[1],\n # border_mode=cv2.BORDER_REPLICATE,\n # p=1.0),\n # changing image size - mainting aspect ratio for later resize\n # OneOf([RandomCrop(height=self.args.img_size[0], width=self.args.img_size[1], p=0.5),\n # RandomCrop(height=int(random_scale * self.args.img_size[0]),\n # width=int(random_scale * self.args.img_size[1]), p=0.5)], p=1.0),\n # making sure resize fits with yolo input size\n Resize(height=self.args.img_size[0], width=self.args.img_size[1], p=1.0),\n Normalize(p=1.0)])\n\n preform_augmentation = Compose(transforms, bbox_params=BboxParams(format='yolo',\n min_visibility=0.3))\n augmented_sample = preform_augmentation(**annotations)\n\n augmented_sample[\"bboxes\"] = np.array(augmented_sample[\"bboxes\"])\n\n return augmented_sample",
"def preprocess(self, matrix: np.array) -> typing.Tuple[np.array, np.array]:\n # Impute values for some preprocessors\n matrix = self._impute_values(matrix)\n\n # Apply the preprocessors manually\n processed_features = []\n for index, preprocessor in enumerate(self._preprocessors):\n features = [line[index] for line in matrix]\n if self._is_loaded:\n try:\n current_preprocessed = preprocessor.transform(features)\n\n except ValueError:\n # If there is a difference between features count, pad the\n # vectors\n features = self._impute_values(features,\n preprocessor.n_features_in_)\n current_preprocessed = preprocessor.transform(features)\n else:\n current_preprocessed = preprocessor.fit_transform(features)\n\n processed_features.append(current_preprocessed)\n\n # Transpose the matrix of features to let each line represent a sample\n processed_features = list(map(list, zip(*processed_features)))\n\n # Drop the array and sparse matrix representations\n converted_features = []\n length_already_stored = bool(self._preprocessors_output_lengths)\n for sample_id, _ in enumerate(processed_features):\n current_features = []\n for feature_id in range(len(processed_features[sample_id])):\n feature = processed_features[sample_id][feature_id]\n if isinstance(feature, scipy.sparse.csr.csr_matrix):\n current_features.extend(feature.toarray()[0])\n elif isinstance(feature, list):\n current_features.extend(feature)\n else:\n current_features.append(feature)\n\n # Save the lengths if they are not already set\n if not length_already_stored:\n if isinstance(feature, scipy.sparse.csr.csr_matrix):\n length = feature.shape[1]\n elif isinstance(feature, list):\n length = len(feature)\n else:\n length = 1\n\n self._preprocessors_output_lengths.append(length)\n\n converted_features.append(current_features)\n\n # Apply a scalar\n if self._is_loaded:\n converted_features = self._last_scalar_model.transform(\n converted_features)\n else:\n # If the core is not loaded from dumped models, then create a new\n # scalar, fit it and transform the data\n self._last_scalar_model = MinMaxScaler()\n converted_features = self._last_scalar_model.fit_transform(\n converted_features)\n\n # Create a model if one is not loaded\n if not self._is_loaded:\n if self._reduction_algorithm == ReductionAlgorithm.PCA:\n self._reduction_model = PCA(\n n_components=self._reduction_components_count)\n elif self._reduction_algorithm == ReductionAlgorithm.FAST_ICA:\n self._reduction_model = FastICA(\n n_components=self._reduction_components_count)\n elif self._reduction_algorithm == ReductionAlgorithm.NMF:\n self._reduction_model = NMF(\n n_components=self._reduction_components_count)\n\n reduced_features = self._reduction_model.fit_transform(\n converted_features)\n else:\n reduced_features = self._reduction_model.transform(\n converted_features)\n\n return (converted_features, reduced_features)",
"def test_yaml_serialization(self, molecule):\n serialized = molecule.to_yaml()\n molecule_copy = Molecule.from_yaml(serialized)\n assert molecule == molecule_copy\n assert molecule_copy.n_conformers == molecule.n_conformers\n assert np.allclose(molecule_copy.conformers[0], molecule.conformers[0])",
"def image_transform(im_bytes):\n img = [mx.image.imdecode(bytes.fromhex(im.lstrip('0x'))) for im in im_bytes]\n out = gcv.data.transforms.presets.yolo.transform_test(img)\n return out[0]",
"def parse_emission_matrix(emissions_matrix_path):\r\n f = open(emissions_matrix_path)\r\n f.readline() # remove first line\r\n lines = f.readlines()\r\n k_counter = len(lines)\r\n emissions_mat = np.zeros([k_counter + NOT_MOTIF_STATES, ALPHABET_LEN])\r\n # B start\r\n emissions_mat[0, 0] = 1\r\n # B end\r\n emissions_mat[-1, -1] = 1\r\n # B_1\r\n emissions_mat[1, 1:-1] = UNIFORM_PROB\r\n # B_2\r\n emissions_mat[-2, 1:-1] = UNIFORM_PROB\r\n for k, line in enumerate(lines, 2): # go over every line\r\n emissions = line.split('\t')\r\n for letter in range(len(alphabet)): # create emissions for every S_i\r\n emissions_mat[k, letter + 1] = float(emissions[letter])\r\n return wrap_log(emissions_mat), k_counter",
"def test__parse_emoji__0():\n emoji_0 = BUILTIN_EMOJIS['x']\n emoji_1 = Emoji.precreate(202301010089, name = 'replica', animated = True)\n \n for input_value, expected_output in (\n (emoji_0.as_emoji, emoji_0),\n (emoji_1.as_emoji, emoji_1),\n ):\n output = parse_emoji(input_value)\n vampytest.assert_eq(output, expected_output)",
"def parse_string(self, matrix):\n for idx, row in enumerate(matrix):\n for idy, item in enumerate(row):\n self.o_pos_to_num[(idx, idy)] = item\n self.pos_to_num[(idx, idy)] = item\n self.o_num_to_pos[item] = (idx, idy)\n self.num_to_pos[item] = (idx, idy)",
"def test_right_handed_frame_0(self):\n data = ET.parse(\"data/metadata_0.xml\")\n data_str = ET.tostring(data.getroot())\n\n dict = tesse_ros_bridge.utils.parse_metadata(data_str)\n quat = np.array(dict['quaternion'])\n rot = tf.transformations.quaternion_matrix(quat)\n self.assertEqual(np.linalg.det(rot), 1)",
"def vid2tensor( self, current_frame):"
] | [
"0.5062877",
"0.48402",
"0.4789745",
"0.47699156",
"0.47673038",
"0.47382063",
"0.4707494",
"0.46134594",
"0.46037513",
"0.45340943",
"0.44836348",
"0.447637",
"0.44666317",
"0.44268033",
"0.43910342",
"0.43851766",
"0.4379255",
"0.43756458",
"0.43580797",
"0.43555278",
"0.43542936",
"0.43537503",
"0.43487638",
"0.43421465",
"0.43383014",
"0.4324914",
"0.43088093",
"0.4308236",
"0.42887357",
"0.4283101"
] | 0.8253259 | 0 |
Computes a new Camera Info in the case of mirroring | def new_mirror(self,alongx,alongy):
Knew = K.clone()
if alongx:
Knew[0,2] = size[0]-Knew[0,2]
if alongy:
Knew[1,2] = size[1]-Knew[1,2]
return CameraInfo(self.size,Knew,self.dist) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_camera(self):\n _, frame = self.camera.read()\n return self.mirror(frame)",
"def info_callback(self, info):\n if self.need_cam_info:\n print(\"got camera info\")\n self.camera_model.fromCameraInfo(info)\n self.need_cam_info = False",
"def process_camera():\n\n pic_array = take_picture()\n detections, shapes, descriptors = detect_faces(person_database,pic_array)\n\n names = []\n\n for desc in descriptors:\n name = find_match(person_database, desc)\n names.append(name)\n\n return pic_array, names, detections, shapes, descriptors",
"def query_camera(self):\n ok, orig_pic = self.vs.read() # Read video stream\n if ok: # If no errors\n orig_pic = imutils.rotate(orig_pic, angle=self.camera_rot)\n curr_pic = imutils.resize(orig_pic, width=self.image_width)\n return curr_pic, orig_pic\n else:\n return None, None",
"def _makeCamera(self, *args, **kwargs):\n if not self._cameraCache:\n self._cameraCache = CameraMapper._makeCamera(self, *args, **kwargs)\n return self._cameraCache",
"def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass",
"def _update_camera(self, render=False):\n self._renderer.set_camera(\n # needs fix, distance moves when focal point updates\n distance=self._renderer.plotter.camera.distance * 0.9,\n focalpoint=tuple(self._ras),\n reset_camera=False)",
"def test_generate_camera_info(self):\n data = ET.parse('data/cam_data_0.xml')\n data_str = ET.tostring(data.getroot())\n\n dict = tesse_ros_bridge.utils.parse_cam_data(data_str)\n\n (left, right) = tesse_ros_bridge.utils.generate_camera_info(dict, dict)\n self.assertEqual(left.header.frame_id, \"left_cam\")\n self.assertEqual(right.header.frame_id, \"right_cam\")\n self.assertEqual(left.width, dict['parameters']['width'])\n self.assertEqual(left.height, dict['parameters']['height'])\n self.assertEqual(right.width, dict['parameters']['width'])\n self.assertEqual(right.height, dict['parameters']['height'])\n\n # TODO(marcus): add more checks",
"def new_undistorted(self):\n return CameraInfo(self.size,self.K,None)",
"def test_make_camera_info_msg(self):\n # TODO(marcus): complete\n pass",
"def camera_info_callback(self, ros_data):\n self.last_call_back_time = rospy.get_time()\n\n self.lastCameraInfo = ros_data",
"def camera_callback(self, data):\n self.camera_mutex.acquire()\n self.position = [data.pose.position.x, data.pose.position.z, data.pose.position.y]\n self.rotation = [data.pose.orientation.x, data.pose.orientation.z, data.pose.orientation.y, data.pose.orientation.w]\n self.camera_mutex.release()",
"def copy(self):\n return CameraExtrinsic(self.position, self.direction, self.up)",
"def build_camera_info(attributes):\n camera_info = CameraInfo()\n # store info without header\n camera_info.header.frame_id = \"velodyne\"\n camera_info.width = int(attributes['width'])\n camera_info.height = int(attributes['height'])\n camera_info.distortion_model = 'plumb_bob'\n cx = camera_info.width / 2.0\n cy = camera_info.height / 2.0\n fx = camera_info.width / (\n 2.0 * math.tan(float(attributes['fov']) * math.pi / 360.0))\n fy = fx\n camera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]\n camera_info.D = [0, 0, 0, 0, 0]\n camera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]\n camera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0]\n return camera_info",
"def __init__(self, camera):\r\n self.planes = []\r\n self.camera = camera",
"def camera(*args, aspectRatio: Union[float, bool]=0.0, cameraScale: Union[float, bool]=0.0,\n centerOfInterest: Union[float, bool]=0.0, clippingPlanes: bool=True, depthOfField:\n bool=True, displayFieldChart: bool=True, displayFilmGate: bool=True,\n displayFilmOrigin: bool=True, displayFilmPivot: bool=True, displayGateMask:\n bool=True, displayResolution: bool=True, displaySafeAction: bool=True,\n displaySafeTitle: bool=True, fStop: Union[float, bool]=0.0, farClipPlane:\n Union[float, bool]=0.0, farFocusDistance: Union[float, bool]=0.0, filmFit:\n Union[AnyStr, bool]=\"\", filmFitOffset: Union[float, bool]=0.0, filmRollOrder:\n Union[AnyStr, bool]=\"\", filmRollValue: Union[float, bool]=0.0, filmTranslateH:\n Union[float, bool]=0.0, filmTranslateV: Union[float, bool]=0.0, focalLength:\n Union[float, bool]=0.0, focusDistance: Union[float, bool]=0.0, homeCommand:\n Union[AnyStr, bool]=\"\", horizontalFieldOfView: Union[float, bool]=0.0,\n horizontalFilmAperture: Union[float, bool]=0.0, horizontalFilmOffset: Union[float,\n bool]=0.0, horizontalPan: Union[float, bool]=0.0, horizontalRollPivot: Union[float,\n bool]=0.0, horizontalShake: Union[float, bool]=0.0, journalCommand: bool=True,\n lensSqueezeRatio: Union[float, bool]=0.0, lockTransform: bool=True, motionBlur:\n bool=True, name: Union[AnyStr, bool]=\"\", nearClipPlane: Union[float, bool]=0.0,\n nearFocusDistance: Union[float, bool]=0.0, orthographic: bool=True,\n orthographicWidth: Union[float, bool]=0.0, overscan: Union[float, bool]=0.0,\n panZoomEnabled: bool=True, position: Union[List[float, float, float], bool]=None,\n postScale: Union[float, bool]=0.0, preScale: Union[float, bool]=0.0, renderPanZoom:\n bool=True, rotation: Union[List[float, float, float], bool]=None, shakeEnabled:\n bool=True, shakeOverscan: Union[float, bool]=0.0, shakeOverscanEnabled: bool=True,\n shutterAngle: Union[float, bool]=0.0, startupCamera: bool=True,\n stereoHorizontalImageTranslate: Union[float, bool]=0.0,\n stereoHorizontalImageTranslateEnabled: bool=True, verticalFieldOfView: Union[float,\n bool]=0.0, verticalFilmAperture: Union[float, bool]=0.0, verticalFilmOffset:\n Union[float, bool]=0.0, verticalLock: bool=True, verticalPan: Union[float, bool]=0.0,\n verticalRollPivot: Union[float, bool]=0.0, verticalShake: Union[float, bool]=0.0,\n worldCenterOfInterest: Union[List[float, float, float], bool]=None, worldUp:\n Union[List[float, float, float], bool]=None, zoom: Union[float, bool]=0.0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def camera(self):\n self.spectrum = self.spectrum",
"def updateView(self) :\n\t\tbase.camera.setPos(self.avatarNP.getPos())\n\t\t# uncomment the following line for a third-persion view\n\t#\tbase.camera.setY(self.avatarNP.getY()+10)\n\t\tbase.camera.setZ(self.avatarNP.getZ()+1.5)\n\t\tbase.camera.setH(self.avatarNP.getH()+180)\n\n\t\tif Util.AUTO : return\n\n\t\tif self.keyMap[\"view\"] :\n\t\t\tbase.camera.setPos(self.avatarNP,\n\t\t\t\t\t 0,-3,self.avatarNP.getZ()+1.5)\n\t\t\tbase.camera.setH(self.avatarNP.getH())\n\t\t\treturn\n\n\t\t# Update where camera is looking\n\t\tcamHeading = self.avatarNP.getH()+180\n\t\tif self.keyMap[\"cam-left\"] : camHeading += 40\n\t\telif self.keyMap[\"cam-right\"]: camHeading -= 40\n\t\tbase.camera.setH(camHeading)\n\n\t\tcamPitch = self.avatarNP.getP()\n\t\tif self.keyMap[\"cam-up\"] : camPitch = 20\n\t\telif self.keyMap[\"cam-down\"] : camPitch = -20\n\t\tbase.camera.setP(camPitch)\n\n\t\t#hpr = self.avatarNP.getHpr()\n\t\t#pos = self.avatarNP.getPos()\n\t\t#hpr[0] = hpr[0] + 180 + camAngleX\n\t\t#hpr[1] = camAngle\n\n\t\t# update field-of-view in response to zoom controls\n\t\tif self.keyMap[\"zoom-in\"] != 0 : \t\t\t\n\t\t\tif self.fieldAngle > 5 : self.fieldAngle *= .99\n\t\telif self.keyMap[\"zoom-out\"] != 0 :\t\n\t\t\tif self.fieldAngle < 120 : self.fieldAngle *= 1.01\n\t\telif self.keyMap[\"reset-view\"] != 0 :\n\t\t\tself.fieldAngle = 46.8\n\t\tbase.camLens.setFov(self.fieldAngle)",
"def adjust_camera(self):\n pose = deepcopy(self.data['poses']['marker']) # PoseStamped()\n eye_pose = deepcopy(pose)\n eye_pose.pose.position.x += 0.60\n eye_pose.pose.position.z += 0.20\n focus_pose = PoseStamped()\n base_eye_pose = PoseStamped()\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'], pose.header.frame_id)\n focus_pose = self.tfl.transformPose(self.params['world'], pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'],\n eye_pose.header.frame_id)\n base_eye_pose = self.tfl.transformPose(self.params['world'],\n eye_pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n cam_place = CameraPlacement()\n cam_place.target_frame = self.params['world']\n cam_place.time_from_start = Duration(1)\n # Position of the camera relative to target_frame\n cam_place.eye.header.frame_id = cam_place.target_frame\n cam_place.eye.point = base_eye_pose.pose.position\n # Target_frame-relative point for the focus\n cam_place.focus.header.frame_id = cam_place.target_frame\n cam_place.focus.point = focus_pose.pose.position\n # Target_frame-relative vector that maps to \"up\" in the view plane.\n cam_place.up.header.frame_id = cam_place.target_frame\n cam_place.up.vector.x = 0\n cam_place.up.vector.y = 0\n cam_place.up.vector.z = 1\n self.pub.publish(cam_place)\n return",
"def __init__(self, camera, cameras, settings):\n\n self.cam = None\n self.jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default\n # check picamera version\n try:\n picamversion = require('picamera')[0].version\n except:\n picamversion = '0'\n\n if 'threaded_read' in cameras[camera]: # threaded on non-threaded camera reading\n self.threaded_read = cameras[camera]['threaded_read']\n else:\n self.threaded_read = True\n if 'resolution' in cameras[camera]:\n self.resolution = literal_eval(cameras[camera]['resolution'])\n else:\n self.resolution = (320, 240)\n if 'framerate' in cameras[camera]:\n self.framerate = cameras[camera]['framerate']\n else:\n self.framerate = 32\n if 'vflip' in cameras[camera]:\n self.vflip = cameras[camera]['vflip']\n else:\n self.vflip = False\n if 'resize_width' in cameras[camera]:\n # resize_width is a percentage value\n # width in pixels will be computed later after reading a test image\n self.resize_width = cameras[camera]['resize_width']\n else:\n self.resize_width = None\n if 'viewname' in cameras[camera]:\n self.viewname = cameras[camera]['viewname']\n else:\n self.viewname = ' '\n if 'src' in cameras[camera]:\n self.src = cameras[camera]['src']\n else:\n self.src = 0\n if 'exposure_mode' in cameras[camera]:\n self.exposure_mode = cameras[camera]['exposure_mode']\n else:\n self.exposure_mode = None\n if 'iso' in cameras[camera]:\n self.iso = cameras[camera]['iso']\n else:\n self.iso = 0 # default value\n if 'shutter_speed' in cameras[camera]:\n self.shutter_speed = cameras[camera]['shutter_speed']\n else:\n self.shutter_speed = 0 # default value\n if 'sharpness' in cameras[camera]:\n self.sharpness = cameras[camera]['sharpness']\n else:\n self.sharpness = 0 # default value\n if 'contrast' in cameras[camera]:\n self.contrast = cameras[camera]['contrast']\n else:\n self.contrast = 0 # default value\n if 'brightness' in cameras[camera]:\n self.brightness = cameras[camera]['brightness']\n else:\n self.brightness = 50 # default value\n if 'exposure_compensation' in cameras[camera]:\n self.exposure_compensation = cameras[camera]['exposure_compensation']\n else:\n self.exposure_compensation = 0 # 0 default value, integer value between -25 and 25\n if 'awb_mode' in cameras[camera]:\n self.awb_mode = cameras[camera]['awb_mode']\n else:\n self.awb_mode = 'auto' # default value\n\n self.detectors = []\n if 'detectors' in cameras[camera]: # is there at least one detector\n self.setup_detectors(cameras[camera]['detectors'],\n settings.nodename,\n self.viewname)\n if camera[0].lower() == 'p': # this is a picam\n # start PiCamera and warm up; inherits methods from\n # imutils.VideoStream unless threaded_read is False; then uses class\n # PiCameraUnthreadedStream to read the PiCamera in an unthreaded way\n if self.threaded_read:\n self.cam = VideoStream(usePiCamera=True,\n resolution=self.resolution,\n framerate=self.framerate).start()\n else:\n self.cam = PiCameraUnthreadedStream(resolution=self.resolution,\n framerate=self.framerate)\n\n # if an exposure mode has been set in yaml, set it\n if self.exposure_mode:\n self.cam.camera.exposure_mode = self.exposure_mode\n # if an iso has been set in yaml, set it\n if self.iso:\n self.cam.camera.iso = self.iso\n # if an iso has been set in yaml, set it\n if self.shutter_speed:\n self.cam.camera.shutter_speed = self.shutter_speed\n # if an sharpness has been set in yaml, set it\n if self.sharpness:\n self.cam.camera.sharpness = self.sharpness\n # if an contrast has been set in yaml, set it\n if self.contrast:\n self.cam.camera.contrast = self.contrast\n # if an brightness has been set in yaml, set it\n if self.brightness:\n self.cam.camera.brightness = self.brightness\n # if an exposure_compensation has been set in yaml, set it\n if self.exposure_compensation:\n self.cam.camera.exposure_compensation = self.exposure_compensation\n # if an awb_mode has been set in yaml, set it\n if self.awb_mode:\n self.cam.camera.awb_mode = self.awb_mode\n self.cam_type = 'PiCamera'\n else: # this is a webcam (not a picam)\n self.cam = VideoStream(src=0).start()\n self.cam_type = 'webcam'\n sleep(3.0) # allow camera sensor to warm up\n\n # self.text is the text label for images from this camera.\n # Each image that is sent is sent with a text label so the hub can\n # file them by nodename, viewname, and send_type\n # example: JeffOffice Window|jpg\n # Nodename and View name are in one field, separated by a space.\n # send_type is in the next field\n # The 2 field names are separaged by the | character\n node_and_view = ' '.join([settings.nodename, self.viewname]).strip()\n self.text = '|'.join([node_and_view, settings.send_type])\n\n # set up camera image queue\n self.cam_q = deque(maxlen=settings.queuemax)",
"def __init__(self):\n self.active = True # Camera activation control\n self.stream = cv2.VideoCapture(0) # Open video stream\n while not self.stream.isOpened():\n pass\n _,self.image = self.stream.read()# Save the first frame\n cv2.waitKey(10)\n self.frame = self.image[196:304,:546,:]# Cropped frame\n self.diff_frame = self.frame\n# self.reference_frame = copy.deepcopy(self.frame)\n# self.abs_diff_frame = copy.deepcopy(self.frame)\n self.reference_frame = self.frame\n self.abs_diff_frame = self.frame\n self.frame_count = 1 # Used for framerate estimation\n self.frame_rate = 0\n self.tic = time()",
"def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()",
"def getCameraInfo(self):\n data = BytesIO(self._sendCommand(self.SONY_CMD_DevInfoSender_GetModelInfo))\n plistSize = parse32le(data.read(4))\n plistData = data.read(plistSize)\n data.read(4)\n modelSize = parse8(data.read(1))\n modelName = data.read(modelSize).decode('latin1')\n modelCode = binascii.hexlify(data.read(5)).decode('latin1')\n serial = binascii.hexlify(data.read(4)).decode('latin1')\n return CameraInfo(plistData, modelName, modelCode, serial)",
"def get_calibration_info():\n mjpeg_info_dict = redis_tools.get_dict(db,'mjpeg_info_dict')\n calibration_info = mct_introspection.get_homography_calibration_info()\n for camera in mjpeg_info_dict:\n if not camera in calibration_info:\n calibration_info[camera] = {'modified': ''}\n return calibration_info",
"def setupCamera(self) :\n\t\tbase.disableMouse()\n\t\tbase.camera.setPos(self.avatarNP.getPos())\n\t\tbase.camera.setZ(self.avatarNP.getZ()+1.5)\n\t\tbase.camera.setHpr(self.avatarNP.getHpr()[0],0,0)\t\t\n\t\tself.fieldAngle = 46.8\t# similar to human eye;\n\t\t\t\t\t# change this to zoom in/out\n\t\tbase.camLens.setFov(self.fieldAngle)",
"def get_things1(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_max):\n # Initializing the arrays\n points_3d = []\n points_2d = []\n camera_ind = []\n points_ind = []\n cam_params = []\n\n dst_3d = kp_3d\n dst_2d = kp_2d\n src_3d = map_3d\n src_2d = map_2d\n src_cam = map_cam\n low_bound = []\n up_bound = []\n my_min = 0\n\n # Updating the Camera parameters in map and setting the bounds for the update \n for i in range(my_min,my_max+1):\n cam_param = [map_view[i,0], map_view[i,1], map_view[i,2], map_view[i,3], map_view[i,4], map_view[i,5], f,0,0]\n cam_params.append(cam_param)\n\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n \n # Updating the Camera parameters for frame and setting the bounds for the update\n r = (R.from_matrix((H[0:3, 0:3]))).as_rotvec()\n t = H[:,3]\n cam_param = [r[0], r[1], r[2], t[0], t[1], t[2], f, 0, 0]\n cam_params.append(cam_param)\n \n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n\n new_cam = len(cam_params)-1\n cam_params = np.array(cam_params).reshape(-1,9)\n count = 0\n \n # listing variables to iterate \n l1 = []\n l2 = []\n count = 0\n \n for m in comp_list:\n count+=1\n l1.append(m.queryIdx)\n l2.append(m.trainIdx)\n\n l1 = np.array(l1).reshape(1,-1)\n l2 = np.array(l2).reshape(1,-1)\n l = np.vstack((l1,l2))\n l_fin = l[:,l[1, :].argsort()]\n j = 0\n count = len(points_3d)\n prev = -1\n final_l1 = []\n final_l2 = []\n final_des = []\n\n # Iterating through the list made and making sure no duplicates\n while(j<(len(l_fin[0]))):\n i1 = l_fin[0,j]\n i2 = l_fin[1,j]\n if(i2!=prev):\n # Map points insertion\n \n check = 0\n for ii in range(len(src_2d[i1])):\n m_2d = src_2d[i1][ii]\n check = 1\n ind = int(src_cam[i1][ii])\n points_2d.append([int((m_2d[0]%(2*cx))-cx), int((m_2d[1]%(2*cy))-cy),0])\n\n points_ind.append(count)\n camera_ind.append(ind)\n final_l1.append(i1)\n final_l2.append(0)\n \n # Taking Mean Desciptor if needed un comment 2 lines below\n # x = ((map_des[i1]*len(src_2d[i1]))+des[i2])/(len(src_2d[i1])+1)\n # map_des[i1] = x\n \n if(check==1):\n # Frame points insersion\n points_2d.append([int((dst_2d[i2,0])-cx), int((dst_2d[i2,1])-cy), 0])\n points_ind.append(count)\n camera_ind.append(new_cam)\n final_l1.append(i2)\n final_l2.append(1)\n wld_pt = src_3d[i1]\n points_3d.append([wld_pt[0], wld_pt[1], wld_pt[2]])\n prev = i2\n count = len(points_3d)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n src_2d[i1].append([int((dst_2d[i2,0])), int((dst_2d[i2,1]))])\n j+=1\n \n # Final Output\n cam_params = np.array(cam_params).reshape(-1,9)\n points_3d = np.array(points_3d)\n points_2d = np.array(points_2d)\n camera_ind = np.array(camera_ind).reshape(len(camera_ind))\n points_ind = np.array(points_ind).reshape(len(points_ind))\n final_l1 = np.array(final_l1)\n final_l2 = np.array(final_l2)\n return cam_params, points_3d, points_2d, camera_ind, points_ind, final_l1, final_l2, low_bound, up_bound, map_des, src_2d",
"def capture_camera(mirror=True, size=None):\n # カメラをキャプチャする\n cap = cv2.VideoCapture(0) # 0はカメラのデバイス番号\n #HAAR分類器の顔検出用の特徴量\n cascade_path = \"haarcascade_frontalface_alt.xml\"\n color = (255, 255, 255) #白\n #カスケード分類器の特徴量を取得する\n cascade = cv2.CascadeClassifier(cascade_path)\n\n while True:\n count = 0 #参照フレームのカウント\n # retは画像を取得成功フラグ\n ret, frame = cap.read()\n\n # 鏡のように映るか否か\n if mirror is True:\n frame = frame[:,::-1]\n\n # フレームをリサイズ\n # sizeは例えば(800, 600)\n if size is not None and len(size) == 2:\n frame = cv2.resize(frame, size)\n\n k = cv2.waitKey(1) # 1msec待つ\n\n if k == 13: # Enterキーで保存\n cv2.imwrite(\"test.png\", frame)\n\n if k == 27: # ESCキーで終了\n break\n\n\n if count == 10 or count == 0: # 参照フレーム軽減\n #グレースケール変換\n image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #物体認識(顔認識)の実行\n facerect = cascade.detectMultiScale(image_gray, scaleFactor=1.1, minNeighbors=1, minSize=(1, 1))\n count = 1\n else:\n count = count + 1\n #rect = (50,50,50,50)\n image = cv2.imread('lena.jpeg')\n #cv2.rectangle(image), tuple([50,50]), tuple([50,50]), color, thickness=2)\n\n if len(facerect) > 0:\n #if True:\n #検出した顔を囲む矩形の作成\n print (\"face rectangle\")\n print (facerect)\n for rect in facerect:\n cv2.rectangle(image, tuple(rect[0:2]),tuple(rect[0:2]+rect[2:4]), color, thickness=2)\n print('check')\n\n # フレームを表示する\n cv2.imshow('camera capture', frame)\n\n # キャプチャを解放する\n cap.release()\n cv2.destroyAllWindows()",
"def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img",
"def compute_camera_calib_distortion_params():\r\n nx = 9#number of inside corners in x\r\n ny = 6#number of inside corners in y\r\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\n objp = np.zeros((ny*nx,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\r\n\r\n # Arrays to store object points and image points from all the images.\r\n objpoints = [] # 3d points in real world space\r\n imgpoints = [] # 2d points in image plane.\r\n\r\n # Read a particular image just to get image size (all images in the directory are same size)\r\n img = cv2.imread('./camera_cal/calibration3.jpg')\r\n img_size = (img.shape[1], img.shape[0])\r\n # Make a list of calibration images\r\n images = glob.glob('./camera_cal/calibration*.jpg')\r\n # Step through the list and search for chessboard corners\r\n for idx, fname in enumerate(images):\r\n img = cv2.imread(fname)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # Find the chessboard corners\r\n ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)\r\n\r\n # If found, add object points, image points\r\n if ret == True:\r\n objpoints.append(objp)\r\n imgpoints.append(corners)\r\n # # Draw and display the corners\r\n # cv2.drawChessboardCorners(img, (nx,ny), corners, ret)\r\n # #write_name = 'corners_found'+str(idx)+'.jpg'\r\n # #cv2.imwrite(write_name, img)\r\n # cv2.imshow('img', img)\r\n # cv2.waitKey(500)\r\n\r\n # Do camera calibration given object points and image points\r\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\r\n # Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\r\n dist_pickle = {}\r\n dist_pickle[\"mtx\"] = mtx\r\n dist_pickle[\"dist\"] = dist\r\n pickle.dump( dist_pickle, open( \"data/cam_calib_pickle.p\", \"wb\" ) )\r\n print(\"Pickling done\")",
"def retrieve(self, clone: bool = False, *args, **kwargs) -> np.ndarray:\n if not self._frame_in_buffer:\n _, img = super(Camera, self).retrieve(*args, **kwargs)\n self._frame_buffer = cv.remap(img, *self.undistort_rectify_map, cv.INTER_LINEAR)\n self._frame_in_buffer = True\n\n if clone:\n return self._frame_buffer.copy()\n\n return self._frame_buffer"
] | [
"0.65635204",
"0.64050907",
"0.6285134",
"0.6201622",
"0.6198665",
"0.61947",
"0.6176285",
"0.6160507",
"0.6101986",
"0.60852295",
"0.6056886",
"0.6027456",
"0.6011429",
"0.5980591",
"0.59622884",
"0.59386176",
"0.5919779",
"0.5908327",
"0.58563316",
"0.5838457",
"0.5831417",
"0.58259237",
"0.58065003",
"0.5802026",
"0.5795303",
"0.57931185",
"0.57882214",
"0.5758901",
"0.57508516",
"0.5744911"
] | 0.7299222 | 0 |
Removes Undistortion from CameraInfo | def new_undistorted(self):
return CameraInfo(self.size,self.K,None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def undistort(self, image):\n return cv2.undistort(image, self.camera_matrix, self.distortion_coeffs, None, self.camera_matrix)",
"def undistort_image(frame, mtx, dist, display=True):\r\n frame_undistorted = cv2.undistort(frame, mtx, dist, newCameraMatrix=mtx)\r\n\r\n if display:\r\n fig, ax = plt.subplots(nrows=1, ncols=2)\r\n # fig.suptitle('Undistort Image Before & After')\r\n ax[0].set_title('Before calibration')\r\n ax[1].set_title('After calibration')\r\n ax[0].imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\r\n ax[1].imshow(cv2.cvtColor(frame_undistorted, cv2.COLOR_BGR2RGB))\r\n \r\n #for comparing camera undistorted\r\n plt.savefig('../output_images/undistort_image_before_to_after.jpg',dpi=300)\r\n plt.show()\r\n\r\n return frame_undistorted",
"def initialize_undistortion_maps(self):\n\n new_camera_matrix, valid_roi = cv2.getOptimalNewCameraMatrix(\n self.camera_matrix, self.distortion_coefficients, self.image_size,\n 0)\n\n self.map1, self.map2 = cv2.initUndistortRectifyMap(\n self.camera_matrix, self.distortion_coefficients, None,\n new_camera_matrix, self.image_size, cv2.CV_16SC2)",
"def check_extra_cameras___fix():\n cameras = pm.ls(type=\"camera\")\n\n for camera in cameras:\n if camera not in [\"frontShape\", \"perspShape\", \"sideShape\", \"topShape\"]:\n pm.delete(camera.getParent())\n else:\n camera.getParent().setAttr(\"visibility\", 0)",
"def remove_shadow(self):\n #Separate the RGB\n rgb_planes = cv.split(self.frame)\n\n result_norm_planes = []\n #Go through the planes, get a dilated image and a blur image, then get the difference between the two images, then normalize the final image\n for plane in rgb_planes:\n dilated_img = cv.dilate(plane, np.ones((7,7), np.uint8))\n bg_img = cv.medianBlur(dilated_img, 21)\n diff_img = 255 - cv.absdiff(plane, bg_img)\n norm_img = cv.normalize(diff_img,None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8UC1)\n result_norm_planes.append(norm_img)\n\n result_norm = cv.merge(result_norm_planes)\n\n self.frame = result_norm",
"def remove_mass_unsafe(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]",
"def __call__(self, img):\n if self.camera_matrix is not None and self.distortion_coef is not None:\n return cv2.undistort(\n img, self.camera_matrix, self.distortion_coef, None, self.camera_matrix)\n else:\n print(\"You should calculate Camera Matrix and Distortion coefficient first!\")\n return img",
"def image_undistort():\n # read test images\n all_test_images = os.listdir('test_images')\n test_images = []\n for name in all_test_images:\n if name.endswith(\".jpg\"):\n test_images.append(name)\n # apply distortion correction on test images\n undistort_images(test_images, './camera_calib_dist_pickle.p')\n print(\"DONE: undistorted test-images saved\")",
"def remove_noise(self):\n kernel = np.ones((5, 5), np.uint8)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_CLOSE, kernel)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_OPEN, kernel)",
"def un_distort_point(point):\n points = np.array([[(point.x, point.y)]], np.float32)\n temp = cv2.undistortPoints(points, _camera_matrix, _camera_distortion)\n fx, fy = _camera_tuned_matrix[0][0], _camera_tuned_matrix[1][1]\n cx, cy = _camera_tuned_matrix[0][2], _camera_tuned_matrix[1][2]\n x = temp[0][0][0] * fx + cx\n y = temp[0][0][1] * fy + cy\n return ge.Point(x, y)",
"def un_distort_image(image):\n global _remap_x, _remap_y\n image = cv2.UMat(image)\n res = cv2.remap(image, _remap_x, _remap_y, cv2.INTER_LINEAR) # 进行remap\n res = res.get()\n return res",
"def cleanupCamera():\n # Delete the preview camera.\n bpy.data.cameras.remove(cache.values[\"camera\"].data, do_unlink=True)\n # Reset the scene camera.\n if cache.values[\"sceneCamera\"]:\n bpy.context.scene.camera = cache.values[\"sceneCamera\"]\n # Reset the view camera.\n view3d = get3dView()\n try:\n if cache.values[\"viewCamera\"]:\n view3d.camera = cache.values[\"viewCamera\"]\n except ReferenceError:\n pass",
"def bs_removeHeadsUpDisplay():\n # remove all headsUpDisplay.\n if pm.windows.headsUpDisplay(lh=True):\n for each in pm.windows.headsUpDisplay(lh=True):\n pm.windows.headsUpDisplay(each, rem=True)\n # remove resolution gates.\n shotCam = pm.PyNode('shot_cam')\n # add resolution gates.\n pm.camera(shotCam, e=True, dsa=False, dfc=False, displayFilmGate=False, displayResolution=False,\n displaySafeTitle=False)\n pm.setAttr(shotCam + '.displayGateMaskOpacity', 0)\n pm.setAttr(shotCam + '.displayGateMaskColor', [0, 0, 0], type='double3')\n pm.setAttr(shotCam + '.displayGateMask', 0)\n # delete expression.\n pm.delete('focalLengthUpdateEXP')\n pm.delete('frameCounterUpdateEXP')",
"def remove_camera(self, camera):\n self._cameras.remove(camera)",
"def stop_aperture(self):\n self.aperture_id = None\n self.mode = \"\"",
"def reset_camera_clipping_range(self):\n self.ResetCameraClippingRange()",
"def _scrub_auth_info(param_info, auth_param_name):\n info = param_info.copy()\n info[auth_param_name] = {key: '*' * len(str(value))\n for key, value in info[auth_param_name].items()}\n\n return info",
"def remove_mass(self, *focal_elements):\n for focal in focal_elements:\n if focal[0] in self.focals:\n self.focals[focal[0]] -= focal[1]\n else:\n self.focals[focal[0]] = -focal[1]",
"def remove(self) -> None:\n self.map.cameras.remove(self)\n if self.is_active():\n self.set_inactive_all()",
"def clear_transforms(self): # -> None:\n ...",
"def undistort(basedir, img_extension, output_dir, output_prefix, calibration, distortion, output_image_shape=(640, 480), scaling_param=1):\n search = os.path.join(basedir, '*'+img_extension)\n img_paths = glob.glob(search)\n img_paths.sort()\n print(\"Number of Images: \", len(img_paths))\n maxlen = len(img_paths)\n if maxlen == 0:\n raise IOError(\n 'No images were found (maybe wrong \\'image extension\\' parameter?)')\n\n if not os.path.exists(os.path.dirname(output_dir)):\n os.makedirs(os.path.dirname(output_dir))\n\n for img_idx, img_path in enumerate(img_paths):\n img = cv2.imread(img_path, 1)\n height, width, _ = img.shape\n new_camera_matrix = calibration\n\n # scaling parameter between 0 (when all the pixels in the undistorted image are valid)\n # and 1 (when all the source image pixels are retained in the undistorted image)\n new_camera_mtx, roi = cv2.getOptimalNewCameraMatrix(\n calibration, distortion, (width, height), scaling_param, output_image_shape)\n print(\"calibration\", calibration)\n print(\"new_camera_matrix\", new_camera_matrix)\n\n # undistort\n mapx, mapy = cv2.initUndistortRectifyMap(\n calibration, distortion, None, new_camera_mtx, output_image_shape, 5)\n dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)\n\n # crop the image\n x, y, w, h = roi\n dst = dst[y:y+h, x:x+w]\n\n output_path = output_dir+output_prefix+'_%d' % img_idx+img_extension\n print(output_path)\n cv2.imwrite(output_path, dst)\n return True",
"def filter_out_contour_from_marker_result(marker_result):\n return {k: v for k, v in marker_result.iteritems() if k != \"contour\"}",
"def undistort_image(self, img, calibration_dict: dict):\n if img is None:\n return None\n\n if 'mtx' not in calibration_dict or 'dist' not in calibration_dict:\n raise ValueError('Missing mtx or dist in calibration dictionary.')\n\n return cv2.undistort(img, calibration_dict['mtx'], calibration_dict['dist'], None, calibration_dict['mtx'])",
"def remove_some_extraneous_information(variant):\n for key in ['xpos','xstop','vep_annotations',]: variant.pop(key, None)",
"def drop_rotors(self):\n\n self.rotors = []",
"def drop_reflector(self):\n\n self.reflector = None",
"def filter_rotation(frame):\n return frame[frame['direction'] != 'none'].copy()",
"def _handle_forgotten_camera_cap(self, image):\n # type: (np.array) -> None\n # Calc the mean brightness of the image to detect a forgotten camera cap\n mean = cv2.mean(image)\n\n # Notify if there is a camera cap detected\n if sum(mean) < self._blind_threshold:\n rospy.logerr(\"Image is too dark! Camera cap not removed?\", logger_name=\"vision\")\n ros_utils.speak(\"Hey! Remove my camera cap!\", self._pub_audio)",
"def undistort_img(img, mtx, dist, debug=False):\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n if (debug):\n window_name = \"Undistorted Image\"\n cv2.imshow('Undistorted Image', undist)\n cv2.moveWindow(\"Undistorted Image\", 10, 50);\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return undist",
"def handleCleanMetadataRecon(self):\n logging.debug(\"Removing compromising personal info and remaking the file...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n fileType = self.filesList.getFileObj(filePath).type\n self.printPdfPersonalData(filePath, \n fileType,\n AddedFile.changeBase(filePath, self.outputPath))\n self.tabArea.setCurrentIndex(1)\n self.changeCursor()\n self.filesList.getFileObj(filePath).reconMetaCleaned = True"
] | [
"0.58126134",
"0.55463165",
"0.55052596",
"0.5433217",
"0.5391852",
"0.5368674",
"0.5345672",
"0.53102624",
"0.5289878",
"0.5272877",
"0.52263516",
"0.5208682",
"0.51604915",
"0.51445305",
"0.5133489",
"0.51326793",
"0.5113074",
"0.50990415",
"0.5032956",
"0.5016939",
"0.4976452",
"0.49494162",
"0.49481755",
"0.49411216",
"0.49047658",
"0.49041995",
"0.48916492",
"0.48744985",
"0.48673767",
"0.48663652"
] | 0.61903244 | 0 |
computes the optimal transformation for the undistortion toward another or similar size. Wraps cv2.getOptimalNewCameraMatrix | def new_makeOptimal(self,alpha,otherSize=None,centerPrincipalPoint=False):
OK,validROI = cv2.getOptimalNewCameraMatrix(self.K,self.dist,alpha,otherSize,centerPrincipalPoint)
if otherSize is None:
otherSize = self.size
return CameraInfo(otherSize,OK,None),validPixROI | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_",
"def compute_camera_calib_distortion_params():\r\n nx = 9#number of inside corners in x\r\n ny = 6#number of inside corners in y\r\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\n objp = np.zeros((ny*nx,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\r\n\r\n # Arrays to store object points and image points from all the images.\r\n objpoints = [] # 3d points in real world space\r\n imgpoints = [] # 2d points in image plane.\r\n\r\n # Read a particular image just to get image size (all images in the directory are same size)\r\n img = cv2.imread('./camera_cal/calibration3.jpg')\r\n img_size = (img.shape[1], img.shape[0])\r\n # Make a list of calibration images\r\n images = glob.glob('./camera_cal/calibration*.jpg')\r\n # Step through the list and search for chessboard corners\r\n for idx, fname in enumerate(images):\r\n img = cv2.imread(fname)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # Find the chessboard corners\r\n ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)\r\n\r\n # If found, add object points, image points\r\n if ret == True:\r\n objpoints.append(objp)\r\n imgpoints.append(corners)\r\n # # Draw and display the corners\r\n # cv2.drawChessboardCorners(img, (nx,ny), corners, ret)\r\n # #write_name = 'corners_found'+str(idx)+'.jpg'\r\n # #cv2.imwrite(write_name, img)\r\n # cv2.imshow('img', img)\r\n # cv2.waitKey(500)\r\n\r\n # Do camera calibration given object points and image points\r\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\r\n # Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\r\n dist_pickle = {}\r\n dist_pickle[\"mtx\"] = mtx\r\n dist_pickle[\"dist\"] = dist\r\n pickle.dump( dist_pickle, open( \"data/cam_calib_pickle.p\", \"wb\" ) )\r\n print(\"Pickling done\")",
"def get_best_transform(x,y):\n # test a simple translation\n if False:\n x = np.array([[0,0],[0,1],[1,0],[1,1]])\n y = np.array([[1,1],[1,2],[2,1],[2,2]]) \n M_correct = np.array([[1,0,1],[0,1,1],[0,0,1]])\n \n x = np.float32(x)\n y = np.float32(y)\n all_idx = [i for i in range(0,len(x))]\n combos = tuple(combinations(all_idx,4))\n min_err = np.inf\n bestM = 0\n for comb in combos:\n M = cv2.getPerspectiveTransform(x[comb,:],y[comb,:])\n xtf = transform_pt_array(x,M)\n err = avg_transform_error(xtf,y)\n if err < min_err:\n min_err = err\n bestM = M\n bestComb = comb\n return bestM",
"def perspective_transform():\n src = np.float32([(220,720), (1110, 720), (570, 470), (722, 470)]) # Manually get these numbers from plot\n dst = np.float32([[320, 720], [920, 720], [320, 1], [920, 1]])\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n return M, Minv",
"def correct_foreshortening(self, degrees):\n \n self.new_height = math.floor(self.orig_height * math.cos(math.radians(degrees)))\n self.new_width = math.floor(self.orig_width * math.cos(math.radians(degrees)))\n # Corners, clockwise from top left\n pts1 = np.float32([[0,0],\n [self.orig_width,0],\n [self.orig_width, self.orig_height],\n [0,self.orig_height]])\n self.bottom_x_offset = math.floor((self.orig_width - self.new_width) / 2)\n pts2 = np.float32([[0,0],\n [self.orig_width, 0],\n [self.orig_width - self.bottom_x_offset, self.new_height],\n [self.bottom_x_offset, self.new_height]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n dst = cv2.warpPerspective(self.img,M,(self.orig_width,self.orig_height))\n self.alt_img = dst\n return dst",
"def perspective_transform(self, undistorted, direction='forward'):\n\t\t# Source image points\n\t\tsrc = np.float32([[255, 695], [585, 455], [700, 455], [1060, 690]])\n\t\t# Destination image points\n\t\tdst = np.float32([[305, 695], [305, 0], [1010, 0], [1010, 690]])\n\t\t# Perform forward or inverse perspective transform\n\t\tif direction == 'forward':\n\t\t\t# Compute the perspective transform, M\n\t\t\tM = cv2.getPerspectiveTransform(src, dst)\n\t\t\t# Create warped image - uses linear interpolation\n\t\t\treturn cv2.warpPerspective(undistorted, M, (undistorted.shape[1], undistorted.shape[0]), flags=cv2.INTER_LINEAR)\n\t\telif direction == 'inverse':\n\t\t\t# Compute the inverse also by swapping the input parameters\n\t\t\tMinv = cv2.getPerspectiveTransform(dst, src)\n\t\t\treturn cv2.warpPerspective(undistorted, Minv, (undistorted.shape[1], undistorted.shape[0]), flags=cv2.INTER_LINEAR)",
"def distort_img(input_img, d_limit=4):\n if d_limit == 0:\n return input_img\n rows, cols, ch = input_img.shape\n pts2 = np.float32([[0, 0], [rows - 1, 0], [0, cols - 1], [rows - 1, cols - 1]])\n pts1 = np.float32(pts2 + np.random.uniform(-d_limit, d_limit, pts2.shape))\n M = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(input_img, M, (cols, rows), borderMode=1)\n return dst",
"def apply_perspective_correction(image, M, width, height):\n warped = cv.warpPerspective(image, M, (width, height))\n return warped",
"def perspectiveTransform(img, pts, newPts, size=None):\n\targs = len(img.shape)\n\ttmp = img.copy()\n\tif args is 3:\n\t\trows, cols, ch = img.shape\n\telse:\n\t\trows, cols = img.shape\n\tpts1 = np.float32(pts)\n\tpts2 = np.float32(newPts)\n\tif size is None:\n\t\txy = zip(*pts)\n\t\tpt = map(max, xy)\n\t\tsize =(pt[0], pt[1])\n\tM = cv2.getPerspectiveTransform(pts1, pts2)\n\tdst = cv2.warpPerspective(tmp, M, size)\n\treturn dst",
"def resize_img(input_img, scale=1.1):\n if scale == 1.0:\n return input_img\n rows, cols, ch = input_img.shape\n d = rows * (scale - 1) # overall image size change from rows, cols, to rows - 2d, cols - 2d\n pts1 = np.float32([[d, d], [rows - 1 - d, d], [d, cols - 1 - d], [rows - 1 - d, cols - 1 - d]])\n pts2 = np.float32([[0, 0], [rows - 1, 0], [0, cols - 1], [rows - 1, cols - 1]])\n M = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(input_img, M, (cols, rows), borderMode=1)\n return dst",
"def warp(img1, img2, M):\n\n # Get width and height of input images \n w1,h1 = img1.shape[:2]\n w2,h2 = img2.shape[:2]\n\n # Get the canvas dimesions\n img2_dims = np.float32([ [0,0], [0,w2], [h2, w2], [h2,0] ]).reshape(-1,1,2)\n img1_dims_temp = np.float32([ [0,0], [0,w1], [h1, w1], [h1,0] ]).reshape(-1,1,2)\n\n # Find out the boundary of img1 after projected onto the coord. system of img2\n img1_dims = myPerspectiveTransform(img1_dims_temp, M)\n\n # Resulting dimensions\n result_dims = np.concatenate( (img1_dims, img2_dims), axis = 0)\n \n # Getting images together\n # Calculate dimensions of match points\n x_min, y_min = np.int32(result_dims.min(axis=0).ravel() - 0.5)\n x_max, y_max = np.int32(result_dims.max(axis=0).ravel() + 0.5)\n\n # Create output array after affine transformation \n transform_dist = [-x_min,-y_min]\n transform_array = np.array([[1, 0, transform_dist[0]], \n [0, 1, transform_dist[1]], \n [0,0,1]]) \n \n # Warp images to get the resulting image\n result_img = myWarpPerspective(img1, transform_array.dot(M),\n (x_max-x_min, y_max-y_min))\n alpha = 0.1\n #result_img[transform_dist[1]:w1+transform_dist[1], \n # transform_dist[0]:h1+transform_dist[0]] = img2 \n print(transform_dist)\n #result_img[transform_dist[1]:w1+transform_dist[1], transform_dist[0]:transform_dist[0]+h1] = img1[transform_dist[1]:w1+transform_dist[1], transform_dist[0]:transform_dist[0]+h1] \n #result_img[transform_dist[1]:w1+transform_dist[1], \n # transform_dist[0]:transform_dist[0]+50] = img2[0:w1 , 0 : 50] \n alpha = 0.5\n img1_rest = x_max-x_min - h1\n print(img1_rest)\n #print(h1)\n for j in range(0 , h1):\n for i in range(0 , w1):\n alpha = 0.02 * j\n if alpha > 1:\n alpha = 1\n \n result_img[i + transform_dist[1], j + transform_dist[0]] = img2[i , j] * alpha + result_img[i + transform_dist[1] , j + transform_dist[0]] *(1 - alpha)\n #result_img[i + transform_dist[1], j + transform_dist[0]] = img2[i , j] * alpha \n return result_img",
"def warp_to_overhead(self, undistorted_img):\n return cv2.warpPerspective(undistorted_img, self.overhead_transform, dsize=(self.img_width, self.img_height))",
"def testCalculateTransform(self):\n # Create some points in the first frame.\n z = 4.0\n self.evaluator.camera_height = z\n first_points = numpy.array(\n [[0, 0, z], [2, 0, z], [2, 5, z], [0, 5, z]], dtype=numpy.float32)\n # Create a transformation that will move the camera\n R = numpy.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n t = numpy.array([[3.0], [-5.0], [0.0]])\n expected_result = numpy.eye(4)\n expected_result[0:3, 0:3] = R\n expected_result[0:3, 3:] = t\n # Determine where the second points would be given that.\n second_points = (numpy.matmul(\n R, first_points.transpose()) + t).transpose()\n # Create a simple intrinsic matrix to project onto a fictional camera\n intrinsic = numpy.array(\n [[1.0, 0.0, 20.0], [0.0, 1.0, 20.0], [0.0, 0.0, 1.0]])\n # Use no distortion or transformations\n rvec = numpy.zeros((3, 1))\n tvec = rvec\n distortion = numpy.zeros((5, 1))\n # Project the points into the camera\n (camera_first_points, _) = cv2.projectPoints(\n first_points, rvec, tvec, intrinsic, distortion)\n camera_first_points = camera_first_points.squeeze()\n (camera_second_points, _) = cv2.projectPoints(\n second_points, rvec, tvec, intrinsic, distortion)\n camera_second_points = camera_second_points.squeeze()\n # Using these projected points, can the object recover the correct initial transform\n result = self.evaluator._calculateTransform(\n camera_first_points, camera_second_points, intrinsic)\n # The matrix comparisions aren't reliable near zero, so check elements manually.\n for i in range(expected_result.shape[0]):\n for j in range(expected_result.shape[1]):\n result_element = result[i, j]\n expected_element = expected_result[i, j]\n self.assertAlmostEqual(result_element, expected_element, 6,\n 'Matrix element ({0:d}, {1:d}) is incorrect.'.format(i, j))",
"def image_warp(img, mtx, dist, M):\n\n # 1) Undistort using mtx and dist\n img = cv2.undistort(img, mtx, dist, None, mtx)\n\n img_size = (img.shape[1], img.shape[0]) # note switch of x,y order\n\n # 2) warp image to top-down view\n warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)\n\n return warped",
"def myWarpPerspective(img, H, output_shapes):\n c, r = output_shapes\n \n # Create an output canvas according to the parameter \"output_shapes\"\n if len(img.shape) == 3:\n output = np.zeros((r, c, 3))\n else:\n output = np.zeros((r, c, 1))\n\n # List of pixel coordinates in canvas\n inverse_map = [[i, j] for i in range(c) for j in range(r)]\n\n # Covert the coordinates in the system of img2 back to the system of img1 \n # to find out the reference points\n inverse_map = np.asarray(inverse_map)\n inverse_map = myPerspectiveTransform(inverse_map, np.linalg.inv(H))\n \n \n for i in range(c):\n for j in range(r):\n index = i*r + j\n ix, iy = inverse_map[index]\n \n # Because the converted coords. are float, \n # we need to find out four ref. points to do bilinear interpolation\n tix, bix = np.ceil(ix), np.floor(ix)\n tiy, biy = np.ceil(iy), np.floor(iy)\n\n x_ratio = ix - bix\n y_ratio = iy - biy\n\n # Indexing does not allow float indices\n tix, bix, tiy, biy = np.int32(tix), np.int32(bix), np.int32(tiy), np.int32(biy)\n \n # Boundary checking: each ref point should locate within the input image\n if bix < 0 or biy < 0 or tix >= img.shape[1] or tiy >= img.shape[0]:\n continue\n else:\n # Bilinear interpolation\n output[j, i] = x_ratio*y_ratio*img[tiy, tix] \\\n + x_ratio*(1-y_ratio)*img[biy, tix] \\\n + (1-x_ratio)*y_ratio*img[tiy, bix] \\\n + (1-x_ratio)*(1-y_ratio)*img[biy, bix]\n output[j, i] = np.round(output[j, i])\n\n # Cast back to uint8 because of displaying and return results\n return np.uint8(output)",
"def __call__(self, img):\n if self.camera_matrix is not None and self.distortion_coef is not None:\n return cv2.undistort(\n img, self.camera_matrix, self.distortion_coef, None, self.camera_matrix)\n else:\n print(\"You should calculate Camera Matrix and Distortion coefficient first!\")\n return img",
"def __set_perspective(self):\n\n src = np.float32([[(.42 * self.img_shape[1],.65 * self.img_shape[0] ),\n (.58 * self.img_shape[1], .65 * self.img_shape[0]),\n (0 * self.img_shape[1],self.img_shape[0]),\n (1 * self.img_shape[1], self.img_shape[0])]])\n\n dst = np.float32([[0,0],\n [self.img_shape[1],0],\n [0,self.img_shape[0]],\n [self.img_shape[1],self.img_shape[0]]])\n\n self.M = cv2.getPerspectiveTransform(src, dst)\n self.M_inv = cv2.getPerspectiveTransform(dst, src)",
"def get_opt_translate(obj_img,\n back_img,\n back_center_x,\n back_center_y,\n obj_center_x,\n obj_center_y,\n prev_row_trans=0,\n prev_col_trans=0,\n is_erosion=False):\n width = obj_img.shape[0]\n obj_center_x = int(obj_center_x)\n obj_center_y = int(obj_center_y)\n curr_row_trans, curr_col_trans = prev_row_trans, prev_col_trans\n induce_x = int(back_center_x - obj_center_x + curr_col_trans)\n induce_y = int(back_center_y - obj_center_y + curr_row_trans)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= obj_img\n neg_count = len(np.argwhere(combine_img < 0))\n if is_erosion:\n trans_amount = 4\n else:\n trans_amount = 8\n while trans_amount > 1:\n trans_amount = trans_amount / 2\n neg_count_1 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=trans_amount,\n trans_col=0)\n neg_count_2 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=(-trans_amount),\n trans_col=0)\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_row_trans += trans_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_row_trans -= trans_amount\n\n induce_y = back_center_y - obj_center_y + curr_row_trans\n if is_erosion:\n trans_amount = 4\n else:\n trans_amount = 16\n while trans_amount > 1:\n trans_amount = trans_amount / 2\n neg_count_1 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=0,\n trans_col=trans_amount)\n neg_count_2 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=0,\n trans_col=(-trans_amount))\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_col_trans += trans_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_col_trans -= trans_amount\n # print('Negative Pix Count Translation: %d.' % neg_count)\n # print(curr_row_trans, curr_col_trans)\n return curr_row_trans, curr_col_trans, neg_count",
"def perspective_transform(image, src, sizex, sizey, rotate=True):\n src = np.float32(src)\n \n if rotate and np.sum((src[0] - src[2])**2) > np.sum((src[0] - src[1])**2):\n dst = np.float32([(0, sizey), (0, 0), (sizex, sizey), (sizex, 0)])\n else:\n dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])\n #if np.sum((src[0] - src[2])**2) <= np.sum((src[0] - src[1])**2):\n # dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])\n #else:\n \n M = cv.getPerspectiveTransform(src, dst)\n\n warped = cv.warpPerspective(image, M, (sizex, sizey))\n\n return warped",
"def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals",
"def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)",
"def initialize_undistortion_maps(self):\n\n new_camera_matrix, valid_roi = cv2.getOptimalNewCameraMatrix(\n self.camera_matrix, self.distortion_coefficients, self.image_size,\n 0)\n\n self.map1, self.map2 = cv2.initUndistortRectifyMap(\n self.camera_matrix, self.distortion_coefficients, None,\n new_camera_matrix, self.image_size, cv2.CV_16SC2)",
"def stereoWarpK_noMotion_singleSided(curImageInfo, conversionParam, globalParam): \n h, w, u = curImageInfo.originalImageResized.shape # shape after resize\n K = 1\n N = h * w * K\n gr = np.mean(curImageInfo.originalImageResized, 2) # not 3 as it is zero based :3\n grs = cv2.GaussianBlur(gr, (5, 5), 1)\n \n # One heuristic for converting depth to disparity\n disparity0 = imnormalize(1/(1+imnormalize(curImageInfo.depthResized)))*conversionParam.maxDisp - conversionParam.maxDisp/2;\n \n if conversionParam.spatialSmoothnessSwitch == True:\n # Smoothing the depth spatially according to adjacent pixels by using Gx, Gy gradients\n # Vertical and Horizontal Edges\n dx = cv2.filter2D(grs, -1, np.transpose(np.array([[-1, 1, 0]])))\n dy = cv2.filter2D(grs, -1, np.array([[-1, 1, 0]]))\n \n W = ( imnormalize(disparity0) + sigmoid(np.sqrt(np.power(dx, 2) + np.power(dy, 2)), 0.01, 500) ) / 2 \n \n A = np.transpose(spdiags(np.transpose(W).flatten(), 0, N, N, \"csc\") \\\n + (conversionParam.spatialSmoothCoeff_x * globalParam.Gx.transpose() * globalParam.Gx) \\\n + (conversionParam.spatialSmoothCoeff_y * globalParam.Gy.transpose() * globalParam.Gy))\n \n b = np.transpose(W).flatten() * np.transpose(disparity0).flatten()\n \n [x, flag] = cg(A, b, np.transpose(disparity0).flatten(), 5e-1, 50)\n \n disparity = np.transpose(np.reshape(x, (w, h))) # remove (h, w, 1, K)\n else:\n disparity = disparity0\n \n curImageInfo.leftImage = curImageInfo.originalImage\n \n # The -ve sign to convert the white to black and black to white \n warpright = -disparity\n \n # only the warping interp2 is done on the original size image with no resizing to have good estimation\n warpright = cv2.resize(warpright, (curImageInfo.originalImage.shape[1], curImageInfo.originalImage.shape[0]), \n interpolation=cv2.INTER_LINEAR)\n \n curImageInfo.rightImage = (clip(warpImage_v2((curImageInfo.originalImage), (warpright), \n conversionParam.resizeFactor, globalParam.xx, globalParam.yy, globalParam.YY)))\n \n return disparity",
"def get_transformation(markers):\r\n # get bounds of markers\r\n q1 = markers[1][1]\r\n q2 = markers[2][0]\r\n q3 = markers[2][3]\r\n q4 = markers[1][2]\r\n src_rect = np.array([q1, q2, q3, q4], np.float32)\r\n \r\n # get bounds of destination markers\r\n box_ratio = KEYBOARD_DIM[0] / MARKER_DIM\r\n box_h = math.hypot(q3[0] - q2[0], q3[1] - q2[1])\r\n box_w = box_ratio * box_h\r\n \r\n r1 = [0, 0]\r\n r2 = [box_w, 0]\r\n r3 = [box_w, box_h]\r\n r4 = [0, box_h]\r\n dest_rect = np.array([r1, r2, r3, r4], np.float32)\r\n \r\n # get expected height of keyboard + box height\r\n keyboardbox_ratio = (KEYBOARD_DIM[1] + MARKER_DIM)/ KEYBOARD_DIM[0]\r\n expected_h = keyboardbox_ratio * box_w\r\n \r\n # get perspective transformation matrix\r\n M = cv2.getPerspectiveTransform(src_rect, dest_rect)\r\n # apply y shift\r\n for j in range(3):\r\n M[1][j] += M[2][j] * -box_h\r\n \r\n return M, (math.ceil(box_w), math.ceil(expected_h - box_h))",
"def get_perspective_transform(src, dst):\n if not isinstance(src, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(src)))\n\n if not isinstance(dst, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(dst)))\n\n if not src.shape[-2:] == (4, 2):\n raise ValueError(\"Inputs must be a Bx4x2 tensor. Got {}\".format(src.shape))\n\n if not src.shape == dst.shape:\n raise ValueError(\"Inputs must have the same shape. Got {}\".format(dst.shape))\n\n if not (src.shape[0] == dst.shape[0]):\n raise ValueError(\n \"Inputs must have same batch size dimension. Expect {} but got {}\".format(src.shape, dst.shape)\n )\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n p = []\n for i in [0, 1, 2, 3]:\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'x'))\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'y'))\n\n # A is Bx8x8\n A = torch.stack(p, dim=1)\n\n # b is a Bx8x1\n b = torch.stack(\n [\n dst[:, 0:1, 0],\n dst[:, 0:1, 1],\n dst[:, 1:2, 0],\n dst[:, 1:2, 1],\n dst[:, 2:3, 0],\n dst[:, 2:3, 1],\n dst[:, 3:4, 0],\n dst[:, 3:4, 1],\n ],\n dim=1,\n )\n\n # solve the system Ax = b\n X, LU = _torch_solve_cast(b, A)\n\n # create variable to return\n batch_size = src.shape[0]\n M = torch.ones(batch_size, 9, device=src.device, dtype=src.dtype)\n M[..., :8] = torch.squeeze(X, dim=-1)\n\n return M.view(-1, 3, 3) # Bx3x3",
"def _transform_warp_impl3d(\n src: Tensor,\n dst_pix_trans_src_pix: Tensor,\n dsize_src: tuple[int, int, int],\n dsize_dst: tuple[int, int, int],\n grid_mode: str,\n padding_mode: str,\n align_corners: bool,\n) -> Tensor:\n dst_norm_trans_src_norm: Tensor = normalize_homography3d(dst_pix_trans_src_pix, dsize_src, dsize_dst)\n\n src_norm_trans_dst_norm = torch.inverse(dst_norm_trans_src_norm)\n return homography_warp3d(src, src_norm_trans_dst_norm, dsize_dst, grid_mode, padding_mode, align_corners, True)",
"def myWarpPerspectiveSparse(src, H, out_size):\n output = np.zeros(out_size)\n \n # Get all indices from the src matrix\n row, col = np.indices(src.shape[:2])\n \n # Store as x,y,1\n indices = [(c, r, 1) for r, c in zip(row.ravel(), col.ravel())]\n \n for idx in indices:\n new_idx = np.matmul(H, idx)\n new_idx = new_idx / new_idx[2]\n c = int(round(new_idx[0]))\n r = int(round(new_idx[1]))\n output[r, c,0] = src[idx[1], idx[0],0]\n output[r, c,1] = src[idx[1], idx[0],1]\n output[r, c,2] = src[idx[1], idx[0],2]\n \n return np.uint8(output[:out_size[0], :out_size[1]])",
"def get_perspective_transform3d(src: Tensor, dst: Tensor) -> Tensor:\n if not isinstance(src, (Tensor)):\n raise TypeError(f\"Input type is not a Tensor. Got {type(src)}\")\n\n if not isinstance(dst, (Tensor)):\n raise TypeError(f\"Input type is not a Tensor. Got {type(dst)}\")\n\n if not src.shape[-2:] == (8, 3):\n raise ValueError(f\"Inputs must be a Bx8x3 tensor. Got {src.shape}\")\n\n if not src.shape == dst.shape:\n raise ValueError(f\"Inputs must have the same shape. Got {dst.shape}\")\n\n if not (src.shape[0] == dst.shape[0]):\n raise ValueError(f\"Inputs must have same batch size dimension. Expect {src.shape} but got {dst.shape}\")\n\n if not (src.device == dst.device and src.dtype == dst.dtype):\n raise AssertionError(\n f\"Expect `src` and `dst` to be in the same device (Got {src.dtype}, {dst.dtype}) \"\n f\"with the same dtype (Got {src.dtype}, {dst.dtype}).\"\n )\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n p = []\n\n # 000, 100, 110, 101, 011\n for i in [0, 1, 2, 5, 7]:\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'x'))\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'y'))\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'z'))\n\n # A is Bx15x15\n A = stack(p, 1)\n\n # b is a Bx15x1\n b = stack(\n [\n dst[:, 0:1, 0],\n dst[:, 0:1, 1],\n dst[:, 0:1, 2],\n dst[:, 1:2, 0],\n dst[:, 1:2, 1],\n dst[:, 1:2, 2],\n dst[:, 2:3, 0],\n dst[:, 2:3, 1],\n dst[:, 2:3, 2],\n # dst[:, 3:4, 0], dst[:, 3:4, 1], dst[:, 3:4, 2],\n # dst[:, 4:5, 0], dst[:, 4:5, 1], dst[:, 4:5, 2],\n dst[:, 5:6, 0],\n dst[:, 5:6, 1],\n dst[:, 5:6, 2],\n # dst[:, 6:7, 0], dst[:, 6:7, 1], dst[:, 6:7, 2],\n dst[:, 7:8, 0],\n dst[:, 7:8, 1],\n dst[:, 7:8, 2],\n ],\n 1,\n )\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return\n batch_size: int = src.shape[0]\n M = torch.empty(batch_size, 16, device=src.device, dtype=src.dtype)\n M[..., :15] = X[..., 0]\n M[..., -1].fill_(1)\n\n return M.view(-1, 4, 4) # Bx4x4",
"def transform(self, image):\n # e) use cv2.warpPerspective() to warp your image to a top-down view\n # Warp the image using OpenCV warpPerspective()\n w, h = image.shape[1], image.shape[0]\n return cv2.warpPerspective(image, self.p_mat, (w, h))",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M"
] | [
"0.6404395",
"0.60454637",
"0.5960749",
"0.58915275",
"0.5875036",
"0.57737654",
"0.57172453",
"0.56888026",
"0.5688326",
"0.5684905",
"0.5589449",
"0.5587895",
"0.5575258",
"0.55513036",
"0.55342984",
"0.5509691",
"0.5499112",
"0.5479626",
"0.5454087",
"0.541782",
"0.5413724",
"0.53886884",
"0.5374953",
"0.5365407",
"0.53300536",
"0.5303518",
"0.52995884",
"0.5243978",
"0.52394515",
"0.5221106"
] | 0.6066039 | 1 |
calls OpenCV initUndistortRectifyMap using CameraInfo data. R is optional. otherK is used for stereo | def initUndistortRectifyMap(self,m1type,R=None,otherK=None):
return cv2.initUndistortRectifyMap(self.K,self.dist,R,otherK is None and self.K or otherK,self.size,m1type) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize_undistortion_maps(self):\n\n new_camera_matrix, valid_roi = cv2.getOptimalNewCameraMatrix(\n self.camera_matrix, self.distortion_coefficients, self.image_size,\n 0)\n\n self.map1, self.map2 = cv2.initUndistortRectifyMap(\n self.camera_matrix, self.distortion_coefficients, None,\n new_camera_matrix, self.image_size, cv2.CV_16SC2)",
"def undistort_rectify_map(self):\n return cv.initUndistortRectifyMap(self._k, self._dist, np.eye(3), self._k, self.frame_size[::-1], cv.CV_16SC2)",
"def initialize_reconstruction(keypoints, matches, K, img_idx1, img_idx2):\n kpts_i, kpts_j, kpts_i_idxs, kpts_j_idxs = get_aligned_kpts(img_idx1, img_idx2, keypoints, matches)\n E, _ = cv2.findEssentialMat(kpts_i, kpts_j, K, cv2.FM_RANSAC, 0.999, 1.0)\n points, R1, t1, mask = cv2.recoverPose(E, kpts_i, kpts_j, K)\n assert abs(np.linalg.det(R1)) - 1 < 1e-7\n\n R0 = np.eye(3, 3)\n t0 = np.zeros((3, 1))\n\n points3d_with_views = []\n points3d_with_views = triangulate_points_and_reproject(\n R0, t0, R1, t1, K, points3d_with_views, img_idx1, img_idx2, kpts_i, kpts_j, kpts_i_idxs, kpts_j_idxs, reproject=False)\n\n return R0, t0, R1, t1, points3d_with_views",
"def compute_camera_calib_distortion_params():\r\n nx = 9#number of inside corners in x\r\n ny = 6#number of inside corners in y\r\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\n objp = np.zeros((ny*nx,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\r\n\r\n # Arrays to store object points and image points from all the images.\r\n objpoints = [] # 3d points in real world space\r\n imgpoints = [] # 2d points in image plane.\r\n\r\n # Read a particular image just to get image size (all images in the directory are same size)\r\n img = cv2.imread('./camera_cal/calibration3.jpg')\r\n img_size = (img.shape[1], img.shape[0])\r\n # Make a list of calibration images\r\n images = glob.glob('./camera_cal/calibration*.jpg')\r\n # Step through the list and search for chessboard corners\r\n for idx, fname in enumerate(images):\r\n img = cv2.imread(fname)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # Find the chessboard corners\r\n ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)\r\n\r\n # If found, add object points, image points\r\n if ret == True:\r\n objpoints.append(objp)\r\n imgpoints.append(corners)\r\n # # Draw and display the corners\r\n # cv2.drawChessboardCorners(img, (nx,ny), corners, ret)\r\n # #write_name = 'corners_found'+str(idx)+'.jpg'\r\n # #cv2.imwrite(write_name, img)\r\n # cv2.imshow('img', img)\r\n # cv2.waitKey(500)\r\n\r\n # Do camera calibration given object points and image points\r\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\r\n # Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\r\n dist_pickle = {}\r\n dist_pickle[\"mtx\"] = mtx\r\n dist_pickle[\"dist\"] = dist\r\n pickle.dump( dist_pickle, open( \"data/cam_calib_pickle.p\", \"wb\" ) )\r\n print(\"Pickling done\")",
"def get_things1(kp_3d, kp_2d, des, comp_list, H, map_3d, map_2d, map_des, map_cam, map_view, my_max):\n # Initializing the arrays\n points_3d = []\n points_2d = []\n camera_ind = []\n points_ind = []\n cam_params = []\n\n dst_3d = kp_3d\n dst_2d = kp_2d\n src_3d = map_3d\n src_2d = map_2d\n src_cam = map_cam\n low_bound = []\n up_bound = []\n my_min = 0\n\n # Updating the Camera parameters in map and setting the bounds for the update \n for i in range(my_min,my_max+1):\n cam_param = [map_view[i,0], map_view[i,1], map_view[i,2], map_view[i,3], map_view[i,4], map_view[i,5], f,0,0]\n cam_params.append(cam_param)\n\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n \n # Updating the Camera parameters for frame and setting the bounds for the update\n r = (R.from_matrix((H[0:3, 0:3]))).as_rotvec()\n t = H[:,3]\n cam_param = [r[0], r[1], r[2], t[0], t[1], t[2], f, 0, 0]\n cam_params.append(cam_param)\n \n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-np.pi)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n low_bound.append(f-1)\n low_bound.append(-1)\n low_bound.append(-1)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(np.pi)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n up_bound.append(f)\n up_bound.append(0)\n up_bound.append(0)\n\n new_cam = len(cam_params)-1\n cam_params = np.array(cam_params).reshape(-1,9)\n count = 0\n \n # listing variables to iterate \n l1 = []\n l2 = []\n count = 0\n \n for m in comp_list:\n count+=1\n l1.append(m.queryIdx)\n l2.append(m.trainIdx)\n\n l1 = np.array(l1).reshape(1,-1)\n l2 = np.array(l2).reshape(1,-1)\n l = np.vstack((l1,l2))\n l_fin = l[:,l[1, :].argsort()]\n j = 0\n count = len(points_3d)\n prev = -1\n final_l1 = []\n final_l2 = []\n final_des = []\n\n # Iterating through the list made and making sure no duplicates\n while(j<(len(l_fin[0]))):\n i1 = l_fin[0,j]\n i2 = l_fin[1,j]\n if(i2!=prev):\n # Map points insertion\n \n check = 0\n for ii in range(len(src_2d[i1])):\n m_2d = src_2d[i1][ii]\n check = 1\n ind = int(src_cam[i1][ii])\n points_2d.append([int((m_2d[0]%(2*cx))-cx), int((m_2d[1]%(2*cy))-cy),0])\n\n points_ind.append(count)\n camera_ind.append(ind)\n final_l1.append(i1)\n final_l2.append(0)\n \n # Taking Mean Desciptor if needed un comment 2 lines below\n # x = ((map_des[i1]*len(src_2d[i1]))+des[i2])/(len(src_2d[i1])+1)\n # map_des[i1] = x\n \n if(check==1):\n # Frame points insersion\n points_2d.append([int((dst_2d[i2,0])-cx), int((dst_2d[i2,1])-cy), 0])\n points_ind.append(count)\n camera_ind.append(new_cam)\n final_l1.append(i2)\n final_l2.append(1)\n wld_pt = src_3d[i1]\n points_3d.append([wld_pt[0], wld_pt[1], wld_pt[2]])\n prev = i2\n count = len(points_3d)\n low_bound.append(-20)\n low_bound.append(-np.inf)\n low_bound.append(-20)\n up_bound.append(20)\n up_bound.append(np.inf)\n up_bound.append(20)\n src_2d[i1].append([int((dst_2d[i2,0])), int((dst_2d[i2,1]))])\n j+=1\n \n # Final Output\n cam_params = np.array(cam_params).reshape(-1,9)\n points_3d = np.array(points_3d)\n points_2d = np.array(points_2d)\n camera_ind = np.array(camera_ind).reshape(len(camera_ind))\n points_ind = np.array(points_ind).reshape(len(points_ind))\n final_l1 = np.array(final_l1)\n final_l2 = np.array(final_l2)\n return cam_params, points_3d, points_2d, camera_ind, points_ind, final_l1, final_l2, low_bound, up_bound, map_des, src_2d",
"def main():\n cam = Realsense()\n # cam.access_intr_and_extr()\n profile = cam.pipeline.start(cam.config)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_scale = depth_sensor.get_depth_scale()\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n objp = np.zeros((3*4,3), np.float32)\n objp[:,:2] = np.mgrid[0:4,0:3].T.reshape(-1,2)\n axis = np.float32([[1,0,0], [0,1,0], [0,0,-1]]).reshape(-1,3)\n # print(objp)\n\n try:\n while (True):\n # detect ArUco markers in RGB images\n frames = cam.pipeline.wait_for_frames()\n aligned_frames = align.process(frames)\n color_frame = aligned_frames.get_color_frame()\n color_image = np.asanyarray(color_frame.get_data()) \n frame = color_image\n font = cv2.FONT_HERSHEY_SIMPLEX\n corners, ids, rvecs, tvecs = cam.detect_markers_realsense(frame)\n \n if np.all(ids != None): # if markers are detected\n for i in range(0, ids.size):\n aruco.drawAxis(frame, cam.newcameramtx, cam.dist, rvecs[i],\n tvecs[i], 0.1) # Draw axis\n aruco.drawDetectedMarkers(frame, corners) # draw square around markers\n\n ###### DRAW ID #####\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,25), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n\t ###### Output marker positions in camera frame ######\n \t # output tvec\n y0 = 60\n dy = 40\n for i in range(0, ids.size):\n y = y0 + i*dy\n cv2.putText(frame, str(tvecs[i][0]), (0, y), font, 1, (0,255,0),\n 2, cv2.LINE_AA)\n\n else:\n ##### DRAW \"NO IDS\" #####\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0), 2,\n cv2.LINE_AA)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (4,3), None)\n if ret == True:\n corners2 = cv2.cornerSubPix(gray, corners,(11,11), (-1,-1),\n cam.criteria)\n corners2 = corners2[::-1]\n # print(corners2)\n # print(objp)\n frame = cv2.drawChessboardCorners(frame, (4,3), corners2, ret)\n # Find the rotation and translation vectors.\n _, rvecs, tvecs = cv2.solvePnP(objp, corners2, cam.newcameramtx,\n cam.dist)\n rot, _ = cv2.Rodrigues(rvecs)\n # print(rot)\n # project 3D points to image plane\n imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs,\n cam.newcameramtx, cam.dist)\n frame = draw(frame, corners2, imgpts)\n\n # Display the resulting frame\n cv2.imshow('frame',frame)\n cv2.waitKey(5)\n\n # When everything done, release the capture\n cv2.destroyAllWindows()\n\n finally:\n cam.pipeline.stop()",
"def new_camera_intrinsics_callback(self, new_camera_info):\n self.camera_intrinsics = new_camera_info\n self.k_mat = np.matrix(\n np.array(self.camera_intrinsics.K).reshape((3, 3))\n )\n\n self.k_inv = self.k_mat.I",
"def new_undistorted(self):\n return CameraInfo(self.size,self.K,None)",
"def reconstruction(args):\n\n print('Loading 2D keypoints ...')\n keypoints, scores, _, _ = load_json(args.keypoints_file)\n\n # Loading only one person's keypoints\n if len(keypoints.shape) == 4:\n keypoints = keypoints[0]\n assert len(keypoints.shape) == 3\n\n # Transform the keypoints format from different dataset (MSCOCO, MPII) to h36m format\n if args.kpts_format == 'coco':\n keypoints, valid_frames = coco_h36m(keypoints)\n elif args.kpts_format == 'mpii':\n keypoints, valid_frames = mpii_h36m(keypoints)\n elif args.kpts_format == 'openpose':\n # Convert 'Openpose' format to MSCOCO\n order_coco = [i for i in range(18) if i != 1]\n keypoints = keypoints[:, order_coco]\n keypoints, valid_frames = coco_h36m(keypoints)\n else:\n valid_frames = np.where(np.sum(keypoints.reshape(-1, 34), axis=1) != 0)[0]\n assert args.kpts_format == 'h36m'\n\n # Get the width and height of video\n cap = cv2.VideoCapture(args.video_path)\n width = int(round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))\n height = int(round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\n # normalize keypoints\n input_keypoints = normalize_screen_coordinates(keypoints[..., :2], w=width, h=height)\n\n if args.frames == 27:\n filter_widths = [3, 3, 3]\n channels = 128\n elif args.frames == 81:\n filter_widths = [3, 3, 3, 3]\n channels = 64\n else:\n filter_widths = [3, 3, 3, 3, 3]\n channels = 32\n\n model_pos = SpatioTemporalModel(adj, 17, 2, 17, filter_widths=filter_widths, channels=channels, dropout=0.05)\n\n if torch.cuda.is_available():\n model_pos = model_pos.cuda()\n\n # load trained model\n print('Loading checkpoint', args.weight)\n chk_file = os.path.join('./checkpoint/gastnet', args.weight)\n checkpoint = torch.load(chk_file, map_location=lambda storage, loc: storage)\n model_pos.load_state_dict(checkpoint['model_pos'])\n\n receptive_field = model_pos.receptive_field()\n pad = (receptive_field - 1) // 2 # Padding on each side\n causal_shift = 0\n\n print('Reconstructing ...')\n gen = UnchunkedGenerator(None, None, [input_keypoints[valid_frames]],\n pad=pad, causal_shift=causal_shift, augment=True,\n kps_left=kps_left, kps_right=kps_right, joints_left=joints_left, joints_right=joints_right)\n prediction = evaluate(gen, model_pos, return_predictions=True)\n prediction = camera_to_world(prediction, R=rot, t=0)\n\n # We don't have the trajectory, but at least we can rebase the height\n prediction[:, :, 2] -= np.min(prediction[:, :, 2])\n\n prediction_new = np.zeros((*input_keypoints.shape[:-1], 3), dtype=np.float32)\n prediction_new[valid_frames] = prediction\n\n print('Rendering ...')\n anim_output = {'Reconstruction': prediction_new}\n render_animation(keypoints, keypoints_metadata, anim_output, h36m_skeleton, 25, 3000,\n np.array(70., dtype=np.float32), args.viz_output, limit=-1, downsample=1, size=5,\n input_video_path=args.video_path, viewport=(width, height), input_video_skip=0)",
"def __init__(self, camID, camera_cal_file='camera_cal_bnl.yaml'):\n self.camID=camID\n with open(camera_cal_file,\"r\") as yfile:\n params=yaml.load(yfile)\n # exit gracefully if yfile doesn't open\n self.nx0=params[camID]['nx0']\n self.ny0=self.nx0\n # pr0 is nx0/2, i.e. probably initial radius estimate.\n # pr0 rather than nx0 should be in the camera_cal_SSS.yaml config file\n self.pr0=(self.nx0+self.ny0)/4.\n self.ndy0=params[camID]['ndy0']\n self.ndx0=params[camID]['ndx0']\n self.cx=params[camID]['cx']\n self.cy=params[camID]['cy']\n self.rot=params[camID]['rot']\n self.beta=params[camID]['beta']\n self.azm=params[camID]['azm']\n self.c1=params[camID]['c1']\n self.c2=params[camID]['c2']\n self.c3=params[camID]['c3']\n self.lat=params[camID]['lat']\n self.lon=params[camID]['lon']\n# may need to resurrect this\n# xstart=int(params[camID]['cy']-nx0/2+0.5); ystart=int(params[camID]['cx']-ny0/2+0.5)\n self.nx0=int(self.nx0+0.5)\n self.ny0=int(self.ny0+0.5)",
"def __init__(self, chessboard_img_fnames, chessboard_size, lane_shape, scale_correction=(30 / 720, 3.7 / 700)):\n # Get image size\n example_img = cv2.imread(chessboard_img_fnames[0])\n self.img_size = example_img.shape[0:2]\n self.img_height = self.img_size[0]\n self.img_width = self.img_size[1]\n\n # Calibrate\n self.camera_matrix, self.distortion_coeffs = self.calibrate(chessboard_img_fnames, chessboard_size)\n\n # Define overhead transform and its inverse\n top_left, top_right, bottom_left, bottom_right = lane_shape\n source = np.float32([top_left, top_right, bottom_right, bottom_left])\n destination = np.float32([(bottom_left[0], 0), (bottom_right[0], 0),\n (bottom_right[0], self.img_height - 1), (bottom_left[0], self.img_height - 1)])\n self.overhead_transform = cv2.getPerspectiveTransform(source, destination)\n self.inverse_overhead_transform = cv2.getPerspectiveTransform(destination, source)\n self.y_m_per_pix = scale_correction[0]\n self.x_m_per_pix = scale_correction[1]",
"def stereo_match(self, cam0_points):\r\n cam0_points = np.array(cam0_points)\r\n if len(cam0_points) == 0:\r\n return []\r\n\r\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)\r\n cam1_points = self.distort_points(\r\n cam0_points_undistorted, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n cam1_points_copy = cam1_points.copy()\r\n\r\n # Track features using LK optical flow method.\r\n cam0_points = cam0_points.astype(np.float32)\r\n cam1_points = cam1_points.astype(np.float32)\r\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam0_pyramid, self.curr_cam1_pyramid,\r\n cam0_points, cam1_points, **self.config.lk_params)\r\n\r\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam1_pyramid, self.curr_cam0_pyramid, \r\n cam1_points, cam0_points.copy(), **self.config.lk_params)\r\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\r\n\r\n # cam1_points_undistorted = self.undistort_points(\r\n # cam1_points, self.cam1_intrinsics,\r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)\r\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\r\n \r\n\r\n \r\n inlier_markers = np.logical_and.reduce(\r\n [inlier_markers.reshape(-1), err < 3, disparity < 20])\r\n\r\n # Mark those tracked points out of the image region as untracked.\r\n img = self.cam1_curr_img_msg.image\r\n for i, point in enumerate(cam1_points):\r\n if not inlier_markers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n inlier_markers[i] = 0\r\n\r\n # Compute the relative rotation between the cam0 frame and cam1 frame.\r\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\r\n # Compute the essential matrix.\r\n E = skew(t_cam0_cam1) @ R_cam0_cam1\r\n\r\n # Further remove outliers based on the known essential matrix.\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs)\r\n cam1_points_undistorted = self.undistort_points(\r\n cam1_points, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n\r\n norm_pixel_unit = 4.0 / (\r\n self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +\r\n self.cam1_intrinsics[0] + self.cam1_intrinsics[1])\r\n\r\n for i in range(len(cam0_points_undistorted)):\r\n if not inlier_markers[i]:\r\n continue\r\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\r\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\r\n epipolar_line = E @ pt0\r\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\r\n epipolar_line[:2])\r\n\r\n if error > self.config.stereo_threshold * norm_pixel_unit:\r\n inlier_markers[i] = 0\r\n\r\n return cam1_points, inlier_markers",
"def calibrateCamera(config,cbrow = 4,cbcol = 3,calibrate=False,alpha=0.4,manualPoints=False):\n\n # Termination criteria\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n # Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((cbrow * cbcol, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cbcol, 0:cbrow].T.reshape(-1, 2)\n \n # Read the config file\n cfg_3d = auxiliaryfunctions.read_config(config)\n img_path,path_corners,path_camera_matrix,path_undistort=caf.Foldernames3Dproject(cfg_3d)\n \n # Make sure that the folders are present (if not, make them)\n if not os.path.exists(img_path):\n os.makedirs(img_path)\n if not os.path.exists(path_corners):\n os.makedirs(path_corners)\n \n # Get images and camera names\n images = glob.glob(os.path.join(img_path,'*.jpg'))\n cam_names = cfg_3d['camera_names']\n \n ## It's not clear to me why I want to do this or what this number represents... I need to read further into it\n # # update the variable snapshot* in config file according to the name of the cameras\n # try:\n # for i in range(len(cam_names)):\n # cfg_3d[str('config_file_'+cam_names[i])] = cfg_3d.pop(str('config_file_camera-'+str(i+1)))\n # for i in range(len(cam_names)):\n # cfg_3d[str('shuffle_'+cam_names[i])] = cfg_3d.pop(str('shuffle_camera-'+str(i+1)))\n # except:\n # pass\n \n project_path = cfg_3d['project_path']\n projconfigfile=os.path.join(str(project_path),'config.yaml')\n auxiliaryfunctions.write_config_3d(projconfigfile,cfg_3d)\n\n # Initialize the dictionary \n img_shape = {}\n objpoints = {} # 3d point in real world space\n imgpoints = {} # 2d points in image plane.\n dist_pickle = {} ## I think this is the intrinsic parameter file that needs to be read in\n stereo_params= {}\n for cam in cam_names:\n objpoints.setdefault(cam, [])\n imgpoints.setdefault(cam, [])\n dist_pickle.setdefault(cam, [])\n\n # Sort the images.\n images.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))\n if len(images)==0:\n raise Exception(\"No calibration images found. Make sure the calibration images are saved as .jpg and with prefix as the camera name as specified in the config.yaml file.\")\n direct_images = [img for img in images if 'direct' in img]\n mirror_images = [img for img in images if 'mirror' in img]\n\n if manualPoints:\n # This is where we read in the manually identified points and check them and stuff\n csvFiles = glob.glob(os.path.join(img_path,'*.csv'))\n for fname_csv in csvFiles:\n allPoints = caf.readCSV(fname_csv)\n for row in allPoints:\n continue\n # Start with mirror to figure out which BGR to use for direct\n for fname in mirror_images:\n \n filename=Path(fname).stem\n img = cv2.imread(fname)\n\n # Create a dictionary with all of the different image color conversions for testing\n img_colorConv = {\n \"BGR\":img,\n \"HSV\":cv2.cvtColor(img,40),\n \"Gray\":cv2.cvtColor(img,6)\n }\n\n thresh = 120\n ret = False\n for colorConv in img_colorConv:\n currImg = img_colorConv[colorConv]\n size = currImg.shape\n \n if len(size) == 2:\n \n ret, corners = cv2.findChessboardCorners(currImg, (cbcol,cbrow),None,)\n if ret == True: break\n\n currImg_bw = cv2.threshold(currImg,thresh,255,cv2.THRESH_BINARY)[1]\n ret, corners = cv2.findChessboardCorners(currImg_bw, (cbcol,cbrow),None,)\n if ret == True: break\n else: continue\n \n chanIdx = 0\n while (ret == False) and (chanIdx < size[2]):\n ret, corners = cv2.findChessboardCorners(currImg[:,:,chanIdx], (cbcol,cbrow),None,)\n if ret == True: break\n channel_bw = cv2.threshold(currImg[:,:,chanIdx],thresh,255,cv2.THRESH_BINARY)[1]\n ret, corners = cv2.findChessboardCorners(channel_bw, (cbcol,cbrow),None,)\n chanIdx += 1\n \n # If found, add object points, image points (after refining them)\n if ret == True:\n currImg = img_colorConv[\"Gray\"]\n img_shape[cam] = currImg.shape[::-1]\n objpoints[cam].append(objp)\n corners = cv2.cornerSubPix(currImg,corners,(11,11),(-1,-1),criteria)\n imgpoints[cam].append(corners)\n # Draw the corners and store the images\n img = cv2.drawChessboardCorners(currImg, (cbcol,cbrow), corners,ret)\n cv2.imwrite(os.path.join(str(path_corners),filename+'_corner.jpg'),img)\n else:\n print(\"Corners not found for the image %s\" %Path(fname).name)\n \n try:\n h, w = img.shape[:2]\n except:\n raise Exception(\"It seems that the name of calibration images does not match with the camera names in the config file. Please make sure that the calibration images are named with camera names as specified in the config.yaml file.\")\n\n # Perform calibration for each cameras and store the matrices as a pickle file\n if calibrate == True:\n \n # Read in the intrinsic parameters for each camera\n for cam in cam_names:\n dist_pickle[cam] = pickle.load(os.path.join(path_camera_matrix,cam+'_intrinsic_params.pickle'))\n\n # # Compute stereo calibration for each pair of cameras\n # camera_pair = [[cam_names[0], cam_names[1]]]\n # for pair in camera_pair:\n # print(\"Computing stereo calibration for \" %pair)\n # retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objpoints[pair[0]],imgpoints[pair[0]],imgpoints[pair[1]],dist_pickle[pair[0]]['mtx'],dist_pickle[pair[0]]['dist'], dist_pickle[pair[1]]['mtx'], dist_pickle[pair[1]]['dist'],(h, w),flags = cv2.CALIB_FIX_INTRINSIC)\n\n # # Stereo Rectification\n # rectify_scale = alpha # Free scaling parameter check this https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#fisheye-stereorectify\n # R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (h, w), R, T, alpha = rectify_scale)\n \n # stereo_params[pair[0]+'-'+pair[1]] = {\"cameraMatrix1\": cameraMatrix1,\"cameraMatrix2\": cameraMatrix2,\"distCoeffs1\": distCoeffs1,\"distCoeffs2\": distCoeffs2,\"R\":R,\"T\":T,\"E\":E,\"F\":F,\n # \"R1\":R1,\n # \"R2\":R2,\n # \"P1\":P1,\n # \"P2\":P2,\n # \"roi1\":roi1,\n # \"roi2\":roi2,\n # \"Q\":Q,\n # \"image_shape\":[img_shape[pair[0]],img_shape[pair[1]]]}\n \n # print('Saving the stereo parameters for every pair of cameras as a pickle file in %s'%str(os.path.join(path_camera_matrix)))\n \n # auxiliaryfunctions.write_pickle(os.path.join(path_camera_matrix,'stereo_params.pickle'),stereo_params)\n # print(\"Camera calibration done! Use the function ``check_undistortion`` to check the check the calibration\")\n # else:\n # print(\"Corners extracted! You may check for the extracted corners in the directory %s and remove the pair of images where the corners are incorrectly detected. If all the corners are detected correctly with right order, then re-run the same function and use the flag ``calibrate=True``, to calbrate the camera.\"%str(path_corners))",
"def __init__(self):\n self.index = 'r11_07_06c'\n self.parameters = {'run_index': 'r11_07_06c',\n 'h_1': 0.25,\n 'rho_0': 1.150,\n 'rho_1': 1.100,\n 'rho_2': 1.000,\n 'alpha': 0.5,\n 'D': 0.4,\n 'H': 0.25,\n 'sample': 1.0,\n 'perspective': 'old'}\n self.run_data = {'run_index': 'r11_07_06c',\n 'l0x': 2796,\n 'l0y': 1151,\n 'lsx': 2793,\n 'lsy': 716,\n 'j10x': 210,\n 'j10y': 1165,\n 'j1sx': 208,\n 'j1sy': 727,\n 'leakage': -76,\n 'odd_1': 'n',\n 'j20x': 2728,\n 'j20y': 1086,\n 'j2sx': 2730,\n 'j2sy': 670,\n 'r0x': 1097,\n 'r0y': 1095,\n 'rsx': 1093,\n 'rsy': 683,\n 'odd_2': 'n'}\n self.raw_image = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n self.bc_image = 'tests/data/bc/r11_07_06c/cam1/img_0001.jpg'\n self.processed_path = 'tests/data/processed_ref/r11_07_06c/cam1/img_0001.jpg'",
"def calibrateCameraArucoExtended(corners, ids, counter, board, imageSize, cameraMatrix, distCoeffs, rvecs=None, tvecs=None, stdDeviationsIntrinsics=None, stdDeviationsExtrinsics=None, perViewErrors=None, flags=None, criteria=None):\n pass",
"def _finish_calibration(self, objpoints, imgpoints_l, imgpoints_r, shape):\n left_camera = self.source._left._finish_calibration(objpoints, imgpoints_l, shape)\n right_camera = self.source._right._finish_calibration(objpoints, imgpoints_r, shape)\n\n ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(\n objpoints,\n imgpoints_l, imgpoints_r,\n left_camera.matrix, left_camera.distortion,\n right_camera.matrix, right_camera.distortion,\n shape,\n criteria=self._stereocalib_criteria, flags=self._flags)\n\n R1, R2, P1, P2, Q, vb1, vb2 = cv2.stereoRectify(\n M1,\n d1,\n M2,\n d2,\n shape,\n R,\n T,\n flags=cv2.CALIB_ZERO_DISPARITY)\n\n left_camera = PinholeCamera(shape, M1, d1, R1, P1)\n right_camera = PinholeCamera(shape, M2, d2, R2, P2)\n\n return StereoCamera(left_camera, right_camera, R, T, E, F, Q)",
"def main(argv):\n # Get default camera id based on current platform.\n if sys.platform == 'linux' or sys.platform == 'linux2':\n default_cam_ids = ['/dev/video0', '/dev/video1', '/dev/video2']\n else: # darwin win32 win64\n default_cam_ids = [0, 1, 2]\n\n # Parse CLI arguments\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--cam_ids', default=default_cam_ids,\n help=\"camera ids list (ex: ='[/dev/video0, /dev/video1]'\")\n # TODO: implement dict argument parsing settings\n ap.add_argument('-s', '--settings',\n help=\"camera settings list \"\n \"(ex:[[(3, 640), (4, 480)], [(3, 640), (4, 480)]]\")\n args = vars(ap.parse_args())\n\n # Default camera settings\n if args[\"settings\"]:\n settings = args[\"settings\"]\n else:\n settings = [[(cv2.CAP_PROP_FRAME_WIDTH, 1280),\n (cv2.CAP_PROP_FRAME_HEIGHT, 720),\n (cv2.CAP_PROP_FPS, 30),\n (cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')),\n (cv2.CAP_PROP_AUTOFOCUS, 1)],\n [(cv2.CAP_PROP_FRAME_WIDTH, 1280),\n (cv2.CAP_PROP_FRAME_HEIGHT, 720),\n (cv2.CAP_PROP_FPS, 30),\n (cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')),\n (cv2.CAP_PROP_AUTOFOCUS, 1)],\n [(cv2.CAP_PROP_FRAME_WIDTH, 1280),\n (cv2.CAP_PROP_FRAME_HEIGHT, 720),\n (cv2.CAP_PROP_FPS, 30),\n (cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')),\n (cv2.CAP_PROP_AUTOFOCUS, 1)]]\n\n aruco_dict_num = cv2.aruco.DICT_6X6_1000\n # also available: DICT_5X5_1000, DICT_4X4_50, DICT_ARUCO_ORIGINAL\n\n # Initialize Cameras objects with calibration and lens correction\n cam_ids = args['cam_ids']\n if sys.platform != 'linux' and sys.platform != 'linux2':\n cam_ids = [int(cam_id) for cam_id in cam_ids]\n cameras = []\n for cam_id, setting in zip(cam_ids, settings):\n print('Setting up camera %s.' % cam_id)\n cam = CameraCorrected(\n cam_id=cam_id, aruco_dict_num=aruco_dict_num, settings=setting)\n cam.initialize()\n cameras.append(cam)\n\n cameras_fusion = CamerasFusion(cameras)\n cameras_fusion.initialize()\n\n # Open basic live view\n print('Live view running...')\n print(' k to calibrate correction')\n print(' m to save frame')\n print(' v loop between gray2rgb and blue2rgb fusion')\n print(' ESC or q to exit.')\n\n selected_fused = cameras_fusion.read_blue2rgb_fused\n while True:\n if cameras_fusion.fusion_calibration_is_done:\n frame = selected_fused()\n frame = camera[0].draw_fps(frame)\n else:\n for camera in cameras_fusion.cameras:\n frame = camera.read_undistort()\n frame = camera.draw_text(\n frame, 'Please manually adjust Cameras overlapping, then c'\n 'alibrate.', y=camera.height - (camera.height/20),\n thickness=2)\n k = cv2.waitKey(50) % 256\n if k == 27 or k == ord('q'):\n break\n cv2.imshow(\"Live camera\", frame)\n k = cv2.waitKey(40) % 256\n if k == 27 or k == ord('q'):\n break\n elif k == ord('k'):\n if cameras_fusion.calibrate_fusion():\n print('Calibration done!')\n elif k == ord('m'):\n cv2.imwrite('frame_fused_%s.png' % cam.cam_id, frame)\n elif k == ord('v'):\n if selected_fused == cameras_fusion.read_blue2rgb_fused:\n selected_fused = cameras_fusion.read_gray2rgb_fused\n else:\n selected_fused = cameras_fusion.read_blue2rgb_fused\n\n cameras_fusion.release() # DO NOT FORGET TO RELEASE!\n cv2.destroyAllWindows()",
"def calibrateCameraAruco(corners, ids, counter, board, imageSize, cameraMatrix, distCoeffs, rvecs=None, tvecs=None, flags=None, criteria=None):\n pass",
"def _calibrate_camera():\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n vertical, horizon = 7, 9 # target corners in vertical and horizontal direction\n\n grid = np.zeros((vertical * horizon, 3), np.float32)\n grid[:, :2] = np.mgrid[:horizon, :vertical].T.reshape(-1, 2)\n\n obj_points = [] # 3d point in real world space\n img_points = [] # 2d points in image plane\n\n image_list = glob.glob(os.path.join('C:\\\\Users\\\\chuyangl\\\\Desktop\\\\liushuai\\\\calibrator\\\\board\\\\right', \"*.bmp\"))\n #print(image_list)\n gray = None\n for img_name in image_list:\n print(img_name)\n image = cv2.imread(img_name)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # find the chess board corners\n found, corners = cv2.findChessboardCorners(gray, (horizon, vertical), None)\n #print(\"corner shape\",corners.shape,corners)\n\n # add object points, image points (after refining them)\n if found:\n obj_points.append(grid)\n corners = cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), criteria) #corner是像素坐标 refine 坐标\n img_points.append(corners)\n #print(\"corner shape\",corners.shape,corners)\n else:\n print('can not find %s corners' % img_name)\n\n #print(corners)\n #print(grid)\n ret, matrix, distortion, rotation, translation = \\\n cv2.calibrateCamera(obj_points, img_points, gray.shape[::-1], None, None)\n print(type(img_points[0]),img_points[0].shape,img_points[0])\n print(type(obj_points[0]),obj_points[0].shape,obj_points[0]) \n #cv2.calibrateCamera([obj_points[-1]], [img_points[-1]], gray.shape[::-1], None, None)\n #print(\"RT Matrix\",np.array(rotation).shape,np.array(translation).shape,rotation)\n \n print(\"projMtx:\\n\",getProjMtx(np.array(rotation[0]),np.array(translation[0])))\n projMtx = getProjMtx(np.array(rotation[0]),np.array(translation[0]))\n _3d = np.array([8,0,0,1])\n _2d = np.dot(np.dot(matrix,projMtx,),_3d)\n _2d = _2d/_2d[2]\n print(\"2d:\",_2d,img_points[0][7])\n mean_error = 0\n for i in range(len(obj_points)):\n new_img_points, _ = cv2.projectPoints(obj_points[i], rotation[i], translation[i], matrix, distortion)#3D点投影到平面\n error = cv2.norm(img_points[i], new_img_points, cv2.NORM_L2) / len(new_img_points)\n mean_error += error\n print(\"new and old:\",new_img_points[1],img_points[i][1])\n print(\"mean error: \", mean_error / len(obj_points))\n\n return np.array(matrix), np.array(distortion)",
"def test_generate_camera_info(self):\n data = ET.parse('data/cam_data_0.xml')\n data_str = ET.tostring(data.getroot())\n\n dict = tesse_ros_bridge.utils.parse_cam_data(data_str)\n\n (left, right) = tesse_ros_bridge.utils.generate_camera_info(dict, dict)\n self.assertEqual(left.header.frame_id, \"left_cam\")\n self.assertEqual(right.header.frame_id, \"right_cam\")\n self.assertEqual(left.width, dict['parameters']['width'])\n self.assertEqual(left.height, dict['parameters']['height'])\n self.assertEqual(right.width, dict['parameters']['width'])\n self.assertEqual(right.height, dict['parameters']['height'])\n\n # TODO(marcus): add more checks",
"def calibrate():\n if os.path.exists('calibration_data.pkl'):\n with open('calibration_data.pkl', 'rb') as f:\n return pickle.load(f)\n\n objp = np.zeros((6 * 9, 3), np.float32)\n objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)\n\n # Arrays to store object points and image points from all the images.\n objpoints = [] # 3d points in real world space\n imgpoints = [] # 2d points in image plane.\n\n for fname in glob.glob('camera_cal/calibration*.jpg'):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # print('{}: {}'.format(fname, gray.shape))\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)\n\n if ret:\n objpoints.append(objp)\n imgpoints.append(corners)\n else:\n print('Failed to detect corners for {}'.format(fname))\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (1280, 720), None, None)\n assert ret\n\n with open('calibration_data.pkl', 'wb') as f:\n pickle.dump((mtx, dist), f)\n\n return mtx, dist",
"def initialize_camera(self, distance, yaw, pitch, x=0, y=0, z=0):\n lookat = [x, y, z]\n\n self._env.unwrapped._p.resetDebugVisualizerCamera(\n distance, yaw, pitch, lookat)",
"def __init__(self, n_pixels_u, n_pixels_v, detector_size_u, detector_size_v, source_to_detector_dist,\n source_to_object_dist, angular_inc=1, center_of_rot=0, **kwargs):\n\n self.n_pixels_u = n_pixels_u\n self.n_pixels_v = n_pixels_v\n\n self.detector_size_u = detector_size_u\n self.detector_size_v = detector_size_v\n self.source_to_detector_dist = source_to_detector_dist\n self.source_to_object_dist = source_to_object_dist\n self.angular_inc = angular_inc\n\n self.center_of_rot_u = center_of_rot\n\n # All values below are calculated\n\n self.projection_angs = np.arange(0., 360, self.angular_inc)\n self.n_projections = len(self.projection_angs)\n\n self.object_size_x = self.detector_size_u * self.source_to_object_dist / self.source_to_detector_dist\n self.object_size_y = self.detector_size_u * self.source_to_object_dist / self.source_to_detector_dist\n self.object_size_z = self.detector_size_v * self.source_to_object_dist / self.source_to_detector_dist\n\n self.voxel_size_x = self.object_size_x / self.n_pixels_u\n self.voxel_size_y = self.object_size_y / self.n_pixels_u\n self.voxel_size_z = self.object_size_z / self.n_pixels_v\n\n self.pixel_size_u = self.detector_size_u / self.n_pixels_u\n self.pixel_size_v = self.detector_size_v / self.n_pixels_v\n\n self.center_of_rot_y = self.center_of_rot_u * (\n self.source_to_object_dist / self.source_to_detector_dist) * self.pixel_size_u\n\n self.object_ys = (np.arange(self.n_pixels_u, dtype=np.float64) - self.n_pixels_u / 2.) * self.voxel_size_y\n self.object_xs = (np.arange(self.n_pixels_u, dtype=np.float64) - self.n_pixels_u / 2.) * self.voxel_size_x\n self.object_zs = (np.arange(self.n_pixels_v, dtype=np.float64) - self.n_pixels_v / 2.) * self.voxel_size_z\n\n self.detector_us = (np.arange(self.n_pixels_u,\n dtype=np.float64) - self.n_pixels_u / 2.) * self.pixel_size_u\n self.detector_vs = (np.arange(self.n_pixels_v,\n dtype=np.float64) - self.n_pixels_v / 2.) * self.pixel_size_v",
"def __init__(self, calibration_patterns, pattern_size=(9,6), retain_calibration_patterns=False):\n self.camera_matrix = None\n self.distortion_coef = None\n self.calibration_patterns_success = []\n self.calibration_patterns_error = []\n self.cal_dist_and_mtx(calibration_patterns, pattern_size, retain_calibration_patterns)",
"def depth_rendering(ref_view, disparity_map, lf_size = (64, 512, 512, 3)):\n lf_one_way = int(math.floor(math.sqrt(lf_size[0])))\n\n x_indices = np.arange(lf_size[1])\n y_indices = np.arange(lf_size[2])\n b_indices = np.arange(lf_size[0])\n\n #Create a grid of size lf_size[:3] consisting of the pixel co ordinates of each image\n _, x, y = np.meshgrid(b_indices, x_indices, y_indices, indexing= 'ij')\n\n # Create a grid of size (lf_size[0], 2) consiting of the row, col lf positions\n grid = np.meshgrid(np.arange(lf_one_way), np.arange(lf_one_way), indexing= 'ij')\n stacked = np.stack(grid, 2)\n positions = stacked.reshape(-1, 2)\n\n # Compute the distance from each lf position from the reference view\n # Repeat the elements of this to match the size of the disparity map\n ref_pos = np.array(\n [lf_one_way // 2, lf_one_way // 2])\n distance = (np.tile(ref_pos, (lf_size[0], 1)) - positions).T\n dis_repeated = np.repeat(distance, lf_size[1] * lf_size[2], axis = 1)\n dis_repeated = dis_repeated.reshape(2, lf_size[0], lf_size[1], lf_size[2])\n\n\n # Tile the disparity map so that there is one for each lf_position - lf_size[0]\n tiled_map = np.tile(disparity_map, (lf_size[0], 1, 1))\n\n # Compute the shifted pixels\n x_shifted = (x.astype(np.float32) - tiled_map * dis_repeated[0]).flatten()\n y_shifted = (y.astype(np.float32) - tiled_map * dis_repeated[1]).flatten()\n\n #indices for linear interpolation in a square around the central point\n x_low = np.around(x_shifted).astype(int)\n #x_high = x_low + 1\n\n y_low = np.around(y_shifted).astype(int)\n #y_high = y_low + 1\n\n #Place co-ordinates outside the image back into the image\n x_low_clip = np.clip(x_low, 0, ref_view.shape[0] - 1)\n #x_high_clip = np.clip(x_high, 0, ref_view.shape[0] - 1)\n y_low_clip = np.clip(y_low, 0, ref_view.shape[1] - 1)\n #y_high_clip = np.clip(y_high, 0, ref_view.shape[1] - 1)\n\n #Gather the interpolation points\n interp_pts_1 = np.stack((x_low_clip, y_low_clip))\n #interp_pts_2 = np.stack((x_low_clip, y_high_clip))\n #interp_pts_3 = np.stack((x_high_clip, y_low_clip))\n #interp_pts_4 = np.stack((x_high_clip, y_high_clip))\n\n #Index into the images\n desired_shape = lf_size\n res_1 = torch_big_sample(ref_view, interp_pts_1, desired_shape)\n return res_1\n res_2 = torch_big_sample(ref_view, interp_pts_2, desired_shape)\n res_3 = torch_big_sample(ref_view, interp_pts_3, desired_shape)\n res_4 = torch_big_sample(ref_view, interp_pts_4, desired_shape)\n\n #Compute interpolation weights\n x_low_f = x_low.astype(np.float32)\n d_x_low = 1.0 - (x_shifted.astype(np.float32) - x_low_f)\n d_x_high = 1.0 - d_x_low\n y_low_f = y_low.astype(np.float32)\n d_y_low = 1.0 - (y_shifted.astype(np.float32) - y_low_f)\n d_y_high = 1.0 - d_y_low\n\n w1 = torch.from_numpy(d_x_low * d_y_low)\n w2 = torch.from_numpy(d_x_low * d_y_high)\n w3 = torch.from_numpy(d_x_high * d_y_low)\n w4 = torch.from_numpy(d_x_high * d_y_high)\n\n #THEY AGREE AT THIS POINT\n weighted_1 = torch.mul(repeat_weights(w1, desired_shape), res_1)\n weighted_2 = torch.mul(repeat_weights(w2, desired_shape), res_2)\n weighted_3 = torch.mul(repeat_weights(w3, desired_shape), res_3)\n weighted_4 = torch.mul(repeat_weights(w4, desired_shape), res_4)\n\n novel_view = torch.add(torch.add(weighted_1, weighted_2), weighted_3)\n torch.add(novel_view, weighted_4, out=novel_view)\n return novel_view",
"def generate_depth_map(calib_dir, velo_filename, cam=2, vel_depth=False):\n # load calibration files\n cam2cam = read_calib_file(os.path.join(calib_dir, 'calib_cam_to_cam.txt'))\n velo2cam = read_calib_file(os.path.join(calib_dir, 'calib_velo_to_cam.txt'))\n velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))\n velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))\n\n # get image shape\n im_shape = cam2cam[\"S_rect_02\"][::-1].astype(np.int32)\n\n # compute projection matrix velodyne->image plane\n R_cam2rect = np.eye(4)\n R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)\n P_rect = cam2cam['P_rect_0'+str(cam)].reshape(3, 4)\n P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)\n\n # load velodyne points and remove all behind image plane (approximation)\n # each row of the velodyne data is forward, left, up, reflectance\n velo = load_velodyne_points(velo_filename)\n velo = velo[velo[:, 0] >= 0, :]\n\n # project the points to the camera\n velo_pts_im = np.dot(P_velo2im, velo.T).T\n velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis]\n\n if vel_depth:\n velo_pts_im[:, 2] = velo[:, 0]\n\n # check if in bounds\n # use minus 1 to get the exact same value as KITTI matlab code\n velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1\n velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1\n val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)\n val_inds = val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] < im_shape[0])\n velo_pts_im = velo_pts_im[val_inds, :]\n\n # project to image\n depth = np.zeros((im_shape[:2]))\n depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]\n\n # find the duplicate points and choose the closest depth\n inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])\n dupe_inds = [item for item, count in Counter(inds).items() if count > 1]\n for dd in dupe_inds:\n pts = np.where(inds == dd)[0]\n x_loc = int(velo_pts_im[pts[0], 0])\n y_loc = int(velo_pts_im[pts[0], 1])\n depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()\n depth[depth < 0] = 0\n\n return depth",
"def camera_callback(self, data):\n self.camera_mutex.acquire()\n self.position = [data.pose.position.x, data.pose.position.z, data.pose.position.y]\n self.rotation = [data.pose.orientation.x, data.pose.orientation.z, data.pose.orientation.y, data.pose.orientation.w]\n self.camera_mutex.release()",
"def main():\n # Placing imports here so it will be imported only if user want to test algorithm, not when importing\n # Class DepthCameraServer\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n import sensors_classes as sensors\n from images_processing_class import ImagesProcessing\n import struct\n import time\n\n # Starting Thread which receives data from VideoCamera, port od thread's socket must be the same as the port at\n # which data from VideoCamera is redirected, to be sure check where VideoCamera data stream is send in script env.py\n depth_camera_server = DepthCameraServer('localhost', 60012)\n depth_camera_server.run()\n\n pose_server = sensors.Pose_server('localhost', 60007)\n pose_server.run()\n\n # Waiting 1 sec to be sure than depth_camera_server has received minimum 1 image, because program will crash if\n # depth_camera_server doesn't have time to receive an image\n time.sleep(1)\n\n points = depth_camera_server.get_points()\n\n lista_punktow = []\n x = []\n y = []\n z = []\n\n data_pose_dict = pose_server.get_all()\n pose_x = data_pose_dict['x']\n pose_y = data_pose_dict['y']\n pose_z = data_pose_dict['z']\n\n yawp = data_pose_dict['yaw']\n pitchp = data_pose_dict['pitch']\n rollp = data_pose_dict['roll']\n\n # Each 3D point is a set of float(x,y,z). Each point has a size of 12 bytes because\n # 3*sizeof(float) = 12 bytes, that's why we are dividing data into parts with size of 12 and then\n # converting this data to tuple with 3 float (xyz).\n\n #\n # Processing cloud of points to seperate x, y and z was copied from dcam_old.py\n #\n\n for i in range(0, len(points) - 12, 12):\n xyz = struct.unpack('fff', points[i:i + 12])\n\n # rotation is included\n x1p, y1p, z1p = rotation(xyz[2], xyz[0], xyz[1], yawp, pitchp, rollp)\n\n # data from pose is included\n xp = round(x1p + pose_x, 1)\n yp = round(y1p + pose_y, 1)\n zp = round(z1p + pose_z, 1)\n temp = [xp, yp, zp]\n lista_punktow.append(temp)\n\n # Choosing only these points which have minimum 0.45 meters at z-axis, but why???\n for i in lista_punktow:\n x.append(i[0])\n y.append(i[1])\n z.append(i[2])\n\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x, y, z, cmap='viridis', linewidth=0.5)\n ax.scatter(x[0], y[0], z[0], c='red')\n ax.scatter(x[1], y[1], z[1], c='yellow')\n ax.scatter(x[2], y[2], z[2], c='black')\n ax.scatter(pose_x, pose_y, pose_z, c='green')\n plt.show()",
"def triangulate_points_and_reproject(R_l, t_l, R_r, t_r, K, points3d_with_views, img_idx1, img_idx2, kpts_i, kpts_j, kpts_i_idxs, kpts_j_idxs, reproject=True):\n\n print(f\"Triangulating: {len(kpts_i)} points.\")\n P_l = np.dot(K, np.hstack((R_l, t_l)))\n P_r = np.dot(K, np.hstack((R_r, t_r)))\n\n kpts_i = np.squeeze(kpts_i)\n kpts_i = kpts_i.transpose()\n kpts_i = kpts_i.reshape(2,-1)\n kpts_j = np.squeeze(kpts_j)\n kpts_j = kpts_j.transpose()\n kpts_j = kpts_j.reshape(2,-1)\n\n point_4d_hom = cv2.triangulatePoints(P_l, P_r, kpts_i, kpts_j)\n points_3D = cv2.convertPointsFromHomogeneous(point_4d_hom.transpose())\n for i in range(kpts_i.shape[1]):\n source_2dpt_idxs = {img_idx1:kpts_i_idxs[i], img_idx2:kpts_j_idxs[i]}\n pt = Point3D_with_views(points_3D[i], source_2dpt_idxs)\n points3d_with_views.append(pt)\n\n if reproject:\n kpts_i = kpts_i.transpose()\n kpts_j = kpts_j.transpose()\n rvec_l, _ = cv2.Rodrigues(R_l)\n rvec_r, _ = cv2.Rodrigues(R_r)\n projPoints_l, _ = cv2.projectPoints(points_3D, rvec_l, t_l, K, distCoeffs=np.array([]))\n projPoints_r, _ = cv2.projectPoints(points_3D, rvec_r, t_r, K, distCoeffs=np.array([]))\n delta_l , delta_r = [], []\n for i in range(len(projPoints_l)):\n delta_l.append(abs(projPoints_l[i][0][0] - kpts_i[i][0]))\n delta_l.append(abs(projPoints_l[i][0][1] - kpts_i[i][1]))\n delta_r.append(abs(projPoints_r[i][0][0] - kpts_j[i][0]))\n delta_r.append(abs(projPoints_r[i][0][1] - kpts_j[i][1]))\n avg_error_l = sum(delta_l)/len(delta_l)\n avg_error_r = sum(delta_r)/len(delta_r)\n print(f\"Average reprojection error for just-triangulated points on image {img_idx1} is:\", avg_error_l, \"pixels.\")\n print(f\"Average reprojection error for just-triangulated points on image {img_idx2} is:\", avg_error_r, \"pixels.\")\n errors = list(zip(delta_l, delta_r))\n return points3d_with_views, errors, avg_error_l, avg_error_r\n\n return points3d_with_views",
"def __init__(self, camera, cameras, settings):\n\n self.cam = None\n self.jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default\n # check picamera version\n try:\n picamversion = require('picamera')[0].version\n except:\n picamversion = '0'\n\n if 'threaded_read' in cameras[camera]: # threaded on non-threaded camera reading\n self.threaded_read = cameras[camera]['threaded_read']\n else:\n self.threaded_read = True\n if 'resolution' in cameras[camera]:\n self.resolution = literal_eval(cameras[camera]['resolution'])\n else:\n self.resolution = (320, 240)\n if 'framerate' in cameras[camera]:\n self.framerate = cameras[camera]['framerate']\n else:\n self.framerate = 32\n if 'vflip' in cameras[camera]:\n self.vflip = cameras[camera]['vflip']\n else:\n self.vflip = False\n if 'resize_width' in cameras[camera]:\n # resize_width is a percentage value\n # width in pixels will be computed later after reading a test image\n self.resize_width = cameras[camera]['resize_width']\n else:\n self.resize_width = None\n if 'viewname' in cameras[camera]:\n self.viewname = cameras[camera]['viewname']\n else:\n self.viewname = ' '\n if 'src' in cameras[camera]:\n self.src = cameras[camera]['src']\n else:\n self.src = 0\n if 'exposure_mode' in cameras[camera]:\n self.exposure_mode = cameras[camera]['exposure_mode']\n else:\n self.exposure_mode = None\n if 'iso' in cameras[camera]:\n self.iso = cameras[camera]['iso']\n else:\n self.iso = 0 # default value\n if 'shutter_speed' in cameras[camera]:\n self.shutter_speed = cameras[camera]['shutter_speed']\n else:\n self.shutter_speed = 0 # default value\n if 'sharpness' in cameras[camera]:\n self.sharpness = cameras[camera]['sharpness']\n else:\n self.sharpness = 0 # default value\n if 'contrast' in cameras[camera]:\n self.contrast = cameras[camera]['contrast']\n else:\n self.contrast = 0 # default value\n if 'brightness' in cameras[camera]:\n self.brightness = cameras[camera]['brightness']\n else:\n self.brightness = 50 # default value\n if 'exposure_compensation' in cameras[camera]:\n self.exposure_compensation = cameras[camera]['exposure_compensation']\n else:\n self.exposure_compensation = 0 # 0 default value, integer value between -25 and 25\n if 'awb_mode' in cameras[camera]:\n self.awb_mode = cameras[camera]['awb_mode']\n else:\n self.awb_mode = 'auto' # default value\n\n self.detectors = []\n if 'detectors' in cameras[camera]: # is there at least one detector\n self.setup_detectors(cameras[camera]['detectors'],\n settings.nodename,\n self.viewname)\n if camera[0].lower() == 'p': # this is a picam\n # start PiCamera and warm up; inherits methods from\n # imutils.VideoStream unless threaded_read is False; then uses class\n # PiCameraUnthreadedStream to read the PiCamera in an unthreaded way\n if self.threaded_read:\n self.cam = VideoStream(usePiCamera=True,\n resolution=self.resolution,\n framerate=self.framerate).start()\n else:\n self.cam = PiCameraUnthreadedStream(resolution=self.resolution,\n framerate=self.framerate)\n\n # if an exposure mode has been set in yaml, set it\n if self.exposure_mode:\n self.cam.camera.exposure_mode = self.exposure_mode\n # if an iso has been set in yaml, set it\n if self.iso:\n self.cam.camera.iso = self.iso\n # if an iso has been set in yaml, set it\n if self.shutter_speed:\n self.cam.camera.shutter_speed = self.shutter_speed\n # if an sharpness has been set in yaml, set it\n if self.sharpness:\n self.cam.camera.sharpness = self.sharpness\n # if an contrast has been set in yaml, set it\n if self.contrast:\n self.cam.camera.contrast = self.contrast\n # if an brightness has been set in yaml, set it\n if self.brightness:\n self.cam.camera.brightness = self.brightness\n # if an exposure_compensation has been set in yaml, set it\n if self.exposure_compensation:\n self.cam.camera.exposure_compensation = self.exposure_compensation\n # if an awb_mode has been set in yaml, set it\n if self.awb_mode:\n self.cam.camera.awb_mode = self.awb_mode\n self.cam_type = 'PiCamera'\n else: # this is a webcam (not a picam)\n self.cam = VideoStream(src=0).start()\n self.cam_type = 'webcam'\n sleep(3.0) # allow camera sensor to warm up\n\n # self.text is the text label for images from this camera.\n # Each image that is sent is sent with a text label so the hub can\n # file them by nodename, viewname, and send_type\n # example: JeffOffice Window|jpg\n # Nodename and View name are in one field, separated by a space.\n # send_type is in the next field\n # The 2 field names are separaged by the | character\n node_and_view = ' '.join([settings.nodename, self.viewname]).strip()\n self.text = '|'.join([node_and_view, settings.send_type])\n\n # set up camera image queue\n self.cam_q = deque(maxlen=settings.queuemax)"
] | [
"0.69114333",
"0.6471733",
"0.5740211",
"0.5670836",
"0.5657102",
"0.5598646",
"0.5563903",
"0.55359626",
"0.548316",
"0.54115915",
"0.5390015",
"0.534091",
"0.53331286",
"0.53153676",
"0.5306986",
"0.5242267",
"0.5223143",
"0.5221984",
"0.52040917",
"0.5180249",
"0.5106843",
"0.5082982",
"0.50740534",
"0.5072648",
"0.5058799",
"0.50542736",
"0.5030498",
"0.5019382",
"0.50154257",
"0.50067085"
] | 0.73232615 | 0 |
returns all timestamps of given netcdf file as datetime list. | def get_time_nc(nc_file, tv='time'):
from netCDF4 import MFDataset,num2date
ds = MFDataset(nc_file)
try:
time = ds.variables[tv]
except:
tv='time_counter'
ds.close()
try:
ds = MFDataset(nc_file)
time = ds.variables[tv]
if (hasattr(time , 'units') and hasattr(time , 'calendar')) == True:
timestamps = num2date(time[:], time.units , time.calendar)
elif hasattr(time , 'units'):
timestamps = num2date(time[:], time.units)
else:
timestamps = num2date(time[:])
ds.close()
except Exception as e:
raise Exception
return timestamps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_timestamps( self, raster_pos=None ):\n if raster_pos is None:\n headers = self.time_specific_headers\n else:\n headers = self.get_raster_pos_headers( raster_pos )\n \n return [to_epoch( from_Tformat( h['DATE_OBS'] ) ) for h in headers]",
"def timestamps(self) -> List[float]:\n return self._timestamps",
"def timestamps(self) -> List[float]:\n return self._timestamps",
"def load_timestamps(data_path):\n timestamp_file = os.path.join(data_path, 'oxts', 'timestamps.txt')\n\n # Read and parse the timestamps\n timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = datetime.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n timestamps.append(t)\n return timestamps",
"def timestamps(self) -> List[T]:\n return self._timestamps",
"def get_raw_datetimes():\n raw_datetimes = []\n with open(RAW_DATETIMES_PATH, 'r') as f:\n for x in f.read().splitlines():\n try:\n raw_datetimes.append(datetime.datetime(year=int(x[1:5]), month=int(x[6:8]), day=int(x[9:11])))\n except ValueError:\n raw_datetimes.append('NA')\n return raw_datetimes",
"def load_timestamps(ts_file):\n ts = []\n with open(ts_file, 'r') as f:\n for line in f.readlines():\n line = line.split()\n if line[0] != \"#\":\n ts.append(line)\n\n return ts",
"def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:",
"def read_times(self, slices=None):\n times = netCDF4.num2date(\n datetime.strptime(\n self.get_handler().SOURCE_START_DATE.split('.')[0],\n '%Y%m%d%H%M%S'\n )\n )\n return numpy.ma.array([times])",
"def get_timestamps(self) -> List[datetime.datetime]:\n return [activity.timestamp for activity in self.activities]",
"def get_datetimes(file_name):\n csv_file = open(file_name, 'rb')\n file_content = csv.reader(csv_file)\n\n # ignore header\n file_content.next()\n\n datetimes = []\n\n for row in file_content:\n datetimes.append(row[0])\n\n csv_file.close()\n\n return datetimes",
"def get_dates(file,start,end):\r\n \r\n data = format_data(file)\r\n data = data.loc[start:end,:] \r\n dates = list(data.index)\r\n \r\n return dates",
"def timeStamps(dataset):\n \n timestamps = []\n \n for index, row in enumerate(dataset):\n try:\n timeObj = datetime.datetime.strptime(timeStampFix(row), '%y:%j:%H:%M:%S')\n except ValueError:\n print('Failed to create datetime object for ' + timeStampFix(row))\n timestamps.append(timeObj)\n \n return timestamps",
"def timestamps():\n timestamps = ( # Index\n 1459516622.1, # 0\n 1459516622.2, # 1\n 1459516622.3, # 2\n 1459516623.0, # 3\n 1459516623.1, # 4\n 1459516623.3, # 5\n 1459516624.0, # 6\n )\n return timestamps",
"def get_nc_timevalues(fname, time_varname='time'):\n nc_fid = netCDF4.Dataset(fname, 'r')\n try:\n assert nc_fid.variables[time_varname] is not None\n except AssertionError:\n raise ValueError('nc file does not have variable/dimension: {}'\n .format(time_varname))\n\n nc_time_values = nc_fid.variables[time_varname]\n time_values = netCDF4.num2date(nc_time_values[:],\n nc_time_values.units,\n nc_time_values.calendar)\n\n return time_values",
"def _load_time_series(self, path: str) -> np.ndarray:\n items = []\n previous = None\n for item in sorted(pathlib.Path(path).glob(\"*.nc\")):\n with xr.open_dataset(item) as ds:\n current = ds.ocean_time.values[0].astype(\"datetime64[M]\")\n if (previous is not None\n and (current - previous != np.timedelta64(1, \"M\"))):\n raise ValueError(\"Time series not continuous\")\n items.append((current, str(item)))\n previous = current\n length = max(len(item[1]) for item in items)\n return np.array(\n items,\n dtype={\n \"names\": (\"date\", \"path\"),\n \"formats\": (\"datetime64[M]\", f\"U{length}\"),\n },\n )",
"def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:\n if any(feat_type.is_timeless() for feat_type, _, _ in self.features if feat_type.is_array()):\n return []\n\n timestamps = get_available_timestamps(\n bbox=bbox,\n time_interval=time_interval,\n data_collection=self.data_collection,\n maxcc=self.maxcc,\n config=self.config,\n )\n\n return self.timestamp_filter(timestamps, self.time_difference)",
"def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:\n if self.single_scene:\n return [time_interval[0]] # type: ignore[index, list-item]\n\n timestamps = get_available_timestamps(\n bbox=bbox,\n time_interval=time_interval,\n data_collection=self.data_collection,\n maxcc=self.maxcc,\n config=self.config,\n )\n\n return self.timestamp_filter(timestamps, self.time_difference)",
"def protobuf_timestamps_to_dates(protobuf_timestamps):\n date_list = []\n \n for protobuf_timestamp in protobuf_timestamps:\n _timestamp = Timestamp()\n _timestamp.FromJsonString(value = protobuf_timestamp)\n _date = _timestamp.ToDatetime().date()\n date_list.append(_date)\n \n return date_list",
"def load_timestamps(timestamps_data_path):\n timestamp_file = os.path.join(\n timestamps_data_path, 'data.csv')\n\n timestamps = []\n with codecs.open(timestamp_file, 'r', 'utf-8') as f:\n for line in islice(f, 1, None):\n t = float(\"{:.9f}\".format(float(line.split(',')[0]) / 1e9))\n timestamps.append(t) \n\n # Subselect the chosen range of frames, if any\n return timestamps",
"def load_timestamps_img(data_path):\n timestamp_file = os.path.join(data_path, 'image_00', 'timestamps.txt')\n\n # Read and parse the timestamps\n timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = datetime.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n timestamps.append(t)\n return timestamps",
"def get_timestamps(self, dataset_name):\n return get_timestamps(self, dataset_name)",
"def read_datetime_set(filename, seq_len):\n\tdate_set = []\n\twith open(os.path.join(info_path, \"squence_len_{}\".format(seq_len), filename), 'r') as f:\n\t\tfor line in f:\n\t\t\tnew_line = line.rstrip('\\n').split('\\t')\n\t\t\tdate_set.append([int(new_line[0]), int(new_line[1])])\n\treturn np.array(date_set)",
"def getTimeStamps():\n\n # Initialize\n results = dict()\n\n # UT time\n ut = utils.getUT(pointing=True).split()\n results['utday'] = ut[0]\n results['ut'] = float(ut[1])\n\n # year/month/day/second\n utStamp = time.gmtime()\n utHour = maybeAddAZero(utStamp[3])\n utMin = maybeAddAZero(utStamp[4])\n utSec = maybeAddAZero(utStamp[5])\n results['timeLab'] = ''.join([commands.yearMonthDay(),'_',utHour,utMin,utSec])\n\n # Done\n return results",
"def timestamps(self):\n return self.source.timestamps[self._time_keep]",
"def to_twodim_list(self):\n if self._timestampFormat is None:\n return self._timeseriesData\n\n datalist = []\n append = datalist.append\n convert = TimeSeries.convert_epoch_to_timestamp\n for entry in self._timeseriesData:\n append([convert(entry[0], self._timestampFormat), entry[1:]])\n\n return datalist",
"def load_timestamp_list_from_file(filename):\n\n if not filename:\n raise ValueError(\"Invalid File Name.\")\n\n timestamp_list = []\n\n if not os.path.exists(filename):\n raise IOError(\"File Not Found.\")\n\n try:\n with open(filename, \"rt\") as fp:\n timestamp_list = json.load(fp)\n\n except ValueError:\n raise ValueError(\"The file is not a valid JSON format.\")\n\n return timestamp_list",
"def get_seviri_file_time(file):\n if hasattr(file, '__iter__'):\n filenames = [f.split('/')[-1] for f in file]\n date = [datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50])) for f in filenames]\n else:\n f = file.split('/')[-1]\n date = datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50]))\n return date",
"def get_labels(file):\n file_split = file.split('+')\n lat = float(file_split[0])\n long = float(file_split[1])\n time = file_split[2].split('.')[0]\n #dtype='datetime64'\n return lat,long,time",
"def list_times(self, start: int = None, end: int = None) -> List:\n return [i.time for i in self.data[start:end]]"
] | [
"0.67807794",
"0.65918297",
"0.65918297",
"0.649629",
"0.6495381",
"0.6472645",
"0.6459693",
"0.6456668",
"0.6402592",
"0.6382574",
"0.6315384",
"0.6305346",
"0.6288575",
"0.6225149",
"0.6186225",
"0.61691904",
"0.6143416",
"0.60917103",
"0.6061064",
"0.6044909",
"0.60109127",
"0.5980108",
"0.5968533",
"0.59585094",
"0.5933251",
"0.59214884",
"0.5905007",
"0.5898037",
"0.5865798",
"0.585939"
] | 0.6615538 | 1 |
Reformats analogs results file for analogues viewer code. | def reformat_analogs(analogs, prefix = 'modified-analogfile.tsv'):
# import numpy as np
import pandas as pd
try:
num_cols = 3 # dateAnlg, Dis, Corr
# Create dataframe and read in output csv file of analogs process
dfS = pd.DataFrame()
dfS = pd.read_csv(analogs, delimiter=r"\s+", index_col=0)
# Find number of analogues
num_analogues = (dfS.shape[1]) / 3
# LOGGER.debug('num_analogues: %s', num_analogues)
# Define temporary df
df_anlg = dfS.iloc[:, 0:num_analogues] # store only anlg dates
df_dis = dfS.iloc[:, num_analogues:2 * num_analogues] # store only dis
df_corr = dfS.iloc[:, 2 * num_analogues:3 *
num_analogues] # store only corr
# remove index name before stacking
df_anlg.index.name = ""
df_dis.index.name = ""
df_corr.index.name = ""
dateStack = df_anlg.stack()
disStack = df_dis.stack().abs() # raw values < 0 so take abs
corrStack = df_corr.stack()
# Create df of correct dimensions (n x num_cols) using dfS
df_all = dfS.iloc[:, 0:num_cols] # NB data are placeholders
# Rename cols
df_all.columns = ['dateAnlg', 'Dis', 'Corr']
# Replicate each row 20 times (for dcjs format)
df_all = df_all.loc[np.repeat(df_all.index.values, num_analogues)]
# Replace data placeholders with correct values
df_all['dateAnlg'] = list(dateStack)
df_all['Dis'] = list(disStack)
df_all['Corr'] = list(corrStack)
# Name index col
df_all.index.name = 'dateRef'
# save to tsv file
analogs_mod = prefix
df_all.to_csv(analogs_mod, sep='\t')
LOGGER.info('successfully reformatted analog file')
except Exception:
msg = 'failed to reformat analog file'
LOGGER.exception(msg)
raise Exception(msg)
return analogs_mod | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_result():\n names = get_tests()[0]\n\n for name in names:\n arctern_file = os.path.join(ARCTERN_RESULT, name + '.csv')\n\n update_quote(arctern_file)\n update_bool(arctern_file)",
"def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')",
"def on_menu_prepare_magic_results_tables(self, event):\n # write a redo file\n try:\n self.on_menu_save_interpretation(None)\n except Exception as ex:\n print('-W-', ex)\n pass\n if self.data_model != 3: # data model 3 data already read in to contribution\n #------------------\n # read existing pmag results data and sort out the directional data.\n # The directional data will be merged to one combined pmag table.\n # these data will be merged later\n #-----------------------.\n\n PmagRecsOld = {}\n for FILE in ['pmag_specimens.txt', 'pmag_samples.txt', 'pmag_sites.txt', 'pmag_results.txt']:\n PmagRecsOld[FILE], meas_data = [], []\n try:\n meas_data, file_type = pmag.magic_read(\n os.path.join(self.WD, FILE))\n self.GUI_log.write(\n \"-I- Read existing magic file %s\\n\" % (os.path.join(self.WD, FILE)))\n # if FILE !='pmag_specimens.txt':\n os.rename(os.path.join(self.WD, FILE),\n os.path.join(self.WD, FILE + \".backup\"))\n self.GUI_log.write(\n \"-I- rename old magic file %s.backup\\n\" % (os.path.join(self.WD, FILE)))\n except:\n self.GUI_log.write(\n \"-I- Can't read existing magic file %s\\n\" % (os.path.join(self.WD, FILE)))\n continue\n for rec in meas_data:\n if \"magic_method_codes\" in list(rec.keys()):\n if \"LP-PI\" not in rec['magic_method_codes'] and \"IE-\" not in rec['magic_method_codes']:\n PmagRecsOld[FILE].append(rec)\n\n pmag_specimens_header_1 = [\n \"er_location_name\", \"er_site_name\", \"er_sample_name\", \"er_specimen_name\"]\n pmag_specimens_header_2 = [\n 'measurement_step_min', 'measurement_step_max', 'specimen_int']\n pmag_specimens_header_3 = [\"specimen_correction\", \"specimen_int_corr_anisotropy\",\n \"specimen_int_corr_nlt\", \"specimen_int_corr_cooling_rate\"]\n pmag_specimens_header_4 = []\n for short_stat in self.preferences['show_statistics_on_gui']:\n stat = \"specimen_\" + short_stat\n pmag_specimens_header_4.append(stat)\n pmag_specimens_header_5 = [\n \"magic_experiment_names\", \"magic_method_codes\", \"measurement_step_unit\", \"specimen_lab_field_dc\"]\n pmag_specimens_header_6 = [\"er_citation_names\"]\n\n specimens_list = []\n for specimen in list(self.Data.keys()):\n if 'pars' in list(self.Data[specimen].keys()):\n if 'saved' in self.Data[specimen]['pars'] and self.Data[specimen]['pars']['saved']:\n specimens_list.append(specimen)\n elif 'deleted' in self.Data[specimen]['pars'] and self.Data[specimen]['pars']['deleted']:\n specimens_list.append(specimen)\n\n # Empty pmag tables:\n MagIC_results_data = {}\n MagIC_results_data['pmag_specimens'] = {}\n MagIC_results_data['pmag_samples_or_sites'] = {}\n MagIC_results_data['pmag_results'] = {}\n\n # write down pmag_specimens.txt\n specimens_list.sort()\n for specimen in specimens_list:\n if 'pars' in self.Data[specimen] and 'deleted' in self.Data[specimen]['pars'] and self.Data[specimen]['pars']['deleted']:\n print('-I- Deleting interpretation for {}'.format(specimen))\n this_spec_data = self.spec_data.loc[specimen]\n # there are multiple rows for this specimen\n if isinstance(this_spec_data, pd.DataFrame):\n # delete the intensity rows for specimen\n cond1 = self.spec_container.df.specimen == specimen\n cond2 = self.spec_container.df.int_abs.notnull()\n cond = cond1 & cond2\n self.spec_container.df = self.spec_container.df[-cond]\n # there is only one record for this specimen\n else:\n # delete all intensity data for that specimen\n columns = list(self.contribution.data_model.get_group_headers('specimens', 'Paleointensity'))\n columns.extend(list(self.contribution.data_model.get_group_headers('specimens', 'Paleointensity pTRM Check Statistics')))\n columns.extend(list(self.contribution.data_model.get_group_headers('specimens', 'Paleointensity pTRM Tail Check Statistics')))\n columns.extend(list(self.contribution.data_model.get_group_headers('specimens', 'Paleointensity pTRM Additivity Check Statistics')))\n columns.extend(list(self.contribution.data_model.get_group_headers('specimens', 'Paleointensity Arai Statistics')))\n columns.extend(list(self.contribution.data_model.get_group_headers('specimens', 'Paleointensity Directional Statistics')))\n int_columns = set(columns).intersection(self.spec_data.columns)\n int_columns.update(['method_codes', 'result_quality', 'meas_step_max', 'meas_step_min', 'software_packages', 'meas_step_unit', 'experiments'])\n new_data = {col: \"\" for col in int_columns}\n cond1 = self.spec_container.df.specimen == specimen\n for col in int_columns:\n self.spec_container.df.loc[specimen, col] = \"\"\n\n elif 'pars' in self.Data[specimen] and 'saved' in self.Data[specimen]['pars'] and self.Data[specimen]['pars']['saved']:\n sample_name = self.Data_hierarchy['specimens'][specimen]\n site_name = thellier_gui_lib.get_site_from_hierarchy(\n sample_name, self.Data_hierarchy)\n location_name = thellier_gui_lib.get_location_from_hierarchy(\n site_name, self.Data_hierarchy)\n\n MagIC_results_data['pmag_specimens'][specimen] = {}\n if version != \"unknown\":\n MagIC_results_data['pmag_specimens'][specimen]['magic_software_packages'] = version\n MagIC_results_data['pmag_specimens'][specimen]['er_citation_names'] = \"This study\"\n # MagIC_results_data['pmag_specimens'][specimen]['er_analyst_mail_names']=\"unknown\"\n\n MagIC_results_data['pmag_specimens'][specimen]['er_specimen_name'] = specimen\n MagIC_results_data['pmag_specimens'][specimen]['er_sample_name'] = sample_name\n MagIC_results_data['pmag_specimens'][specimen]['er_site_name'] = site_name\n MagIC_results_data['pmag_specimens'][specimen]['er_location_name'] = location_name\n MagIC_results_data['pmag_specimens'][specimen]['magic_method_codes'] = self.Data[\n specimen]['pars']['magic_method_codes'] + \":IE-TT\"\n tmp = MagIC_results_data['pmag_specimens'][specimen]['magic_method_codes'].split(\n \":\")\n # magic_experiment_names=specimen\n magic_experiment_names = \"\"\n # for m in tmp: # this is incorrect - it should be a concatenated list of the experiment names from the measurement table.\n # if \"LP-\" in m:\n # magic_experiment_names=magic_experiment_names+\":\" + m\n MagIC_results_data['pmag_specimens'][specimen]['magic_experiment_names'] = magic_experiment_names\n MagIC_results_data['pmag_specimens'][specimen]['measurement_step_unit'] = 'K'\n MagIC_results_data['pmag_specimens'][specimen]['specimen_lab_field_dc'] = \"%.2e\" % (\n self.Data[specimen]['pars']['lab_dc_field'])\n MagIC_results_data['pmag_specimens'][specimen]['specimen_correction'] = self.Data[specimen]['pars']['specimen_correction']\n for key in pmag_specimens_header_4:\n if key in ['specimen_int_ptrm_n', 'specimen_int_n']:\n MagIC_results_data['pmag_specimens'][specimen][key] = \"%i\" % (\n self.Data[specimen]['pars'][key])\n elif key in ['specimen_scat'] and self.Data[specimen]['pars'][key] in [\"Fail\", 'f']:\n MagIC_results_data['pmag_specimens'][specimen][key] = \"False\"\n elif key in ['specimen_scat'] and self.Data[specimen]['pars'][key] in [\"Pass\", 't']:\n MagIC_results_data['pmag_specimens'][specimen][key] = \"True\"\n elif key in ['specimen_k_prime','specimen_k']:\n MagIC_results_data['pmag_specimens'][specimen][key] = \"%.4f\" % (\n self.Data[specimen]['pars'][key])\n else:\n MagIC_results_data['pmag_specimens'][specimen][key] = \"%.2f\" % (\n self.Data[specimen]['pars'][key])\n #print(self.Data[specimen]['pars'])#DEBUG\n\n MagIC_results_data['pmag_specimens'][specimen]['specimen_int'] = \"%.2e\" % (\n self.Data[specimen]['pars']['specimen_int_uT']*1e-6) # fix for saving corrected data\n MagIC_results_data['pmag_specimens'][specimen]['measurement_step_min'] = \"%i\" % (\n self.Data[specimen]['pars']['measurement_step_min'])\n MagIC_results_data['pmag_specimens'][specimen]['measurement_step_max'] = \"%i\" % (\n self.Data[specimen]['pars']['measurement_step_max'])\n if \"specimen_int_corr_anisotropy\" in list(self.Data[specimen]['pars'].keys()):\n MagIC_results_data['pmag_specimens'][specimen]['specimen_int_corr_anisotropy'] = \"%.2f\" % (\n self.Data[specimen]['pars']['specimen_int_corr_anisotropy'])\n else:\n MagIC_results_data['pmag_specimens'][specimen]['specimen_int_corr_anisotropy'] = \"\"\n if \"specimen_int_corr_nlt\" in list(self.Data[specimen]['pars'].keys()):\n MagIC_results_data['pmag_specimens'][specimen]['specimen_int_corr_nlt'] = \"%.2f\" % (\n self.Data[specimen]['pars']['specimen_int_corr_nlt'])\n else:\n MagIC_results_data['pmag_specimens'][specimen]['specimen_int_corr_nlt'] = \"\"\n if \"specimen_int_corr_cooling_rate\" in list(self.Data[specimen]['pars'].keys()) and self.Data[specimen]['pars']['specimen_int_corr_cooling_rate'] != -999:\n MagIC_results_data['pmag_specimens'][specimen]['specimen_int_corr_cooling_rate'] = \"%.2f\" % (\n self.Data[specimen]['pars']['specimen_int_corr_cooling_rate'])\n else:\n MagIC_results_data['pmag_specimens'][specimen]['specimen_int_corr_cooling_rate'] = \"\"\n MagIC_results_data['pmag_specimens'][specimen]['criteria'] = \"IE-SPEC\"\n\n if self.data_model == 3: # convert pmag_specimen format to data model 3 and replace existing specimen record or add new & delete blank records\n new_spec_data = MagIC_results_data['pmag_specimens'][specimen]\n # turn new_specimen data to 3.0\n new_data = map_magic.convert_spec('magic3', new_spec_data)\n # check if interpretation passes criteria and set flag\n spec_pars = thellier_gui_lib.check_specimen_PI_criteria(\n self.Data[specimen]['pars'], self.acceptance_criteria)\n if len(spec_pars['specimen_fail_criteria']) > 0:\n new_data['result_quality'] = 'b'\n else:\n new_data['result_quality'] = 'g'\n # reformat all the keys\n cond1 = self.spec_container.df['specimen'].str.contains(\n specimen + \"$\") == True\n if 'int_abs' not in self.spec_container.df.columns:\n self.spec_container.df['int_abs'] = None\n print(\"-W- No intensity data found for specimens\")\n cond2 = self.spec_container.df['int_abs'].apply(lambda x: cb.not_null(x, False)) #notnull() == True\n condition = (cond1 & cond2)\n # update intensity records\n self.spec_data = self.spec_container.update_record(\n specimen, new_data, condition)\n ## delete essentially blank records\n #condition = self.spec_data['method_codes'].isnull().astype(\n #bool) # find the blank records\n #info_str = \"specimen rows with blank method codes\"\n #self.spec_data = self.spec_container.delete_rows(\n # condition, info_str) # delete them\n\n if self.data_model != 3: # write out pmag_specimens.txt file\n fout = open(os.path.join(self.WD, \"pmag_specimens.txt\"), 'w')\n fout.write(\"tab\\tpmag_specimens\\n\")\n headers = pmag_specimens_header_1 + pmag_specimens_header_2 + pmag_specimens_header_3 + \\\n pmag_specimens_header_4 + pmag_specimens_header_5 + pmag_specimens_header_6\n String = \"\"\n for key in headers:\n String = String + key + \"\\t\"\n fout.write(String[:-1] + \"\\n\")\n for specimen in specimens_list:\n String = \"\"\n for key in headers:\n String = String + \\\n MagIC_results_data['pmag_specimens'][specimen][key] + \"\\t\"\n fout.write(String[:-1] + \"\\n\")\n fout.close()\n # merge with non-intensity data\n # read the new pmag_specimens.txt\n meas_data, file_type = pmag.magic_read(\n os.path.join(self.WD, \"pmag_specimens.txt\"))\n # add the old non-PI lines from pmag_specimens.txt\n for rec in PmagRecsOld[\"pmag_specimens.txt\"]:\n meas_data.append(rec)\n # fix headers, so all headers in all lines\n meas_data = self.converge_pmag_rec_headers(meas_data)\n # write the combined pmag_specimens.txt\n pmag.magic_write(os.path.join(\n self.WD, \"pmag_specimens.txt\"), meas_data, 'pmag_specimens')\n try:\n os.remove(os.path.join(self.WD, \"pmag_specimens.txt.backup\"))\n except:\n pass\n\n #-------------\n # message dialog\n #-------------\n TEXT = \"specimens interpretations are saved in pmag_specimens.txt.\\nPress OK for pmag_samples/pmag_sites/pmag_results tables.\"\n else: # data model 3, so merge with spec_data and save as specimens.txt file\n # remove unwanted columns (site, location).\n for col in ['site', 'location']:\n if col in self.spec_data.columns:\n del self.spec_data[col]\n\n self.spec_container.drop_duplicate_rows()\n # write out the data\n self.spec_container.write_magic_file(dir_path=self.WD)\n TEXT = \"specimens interpretations are saved in specimens.txt.\\nPress OK for samples/sites tables.\"\n\n dlg = wx.MessageDialog(self, caption=\"Saved\",\n message=TEXT, style=wx.OK | wx.CANCEL)\n result = self.show_dlg(dlg)\n if result == wx.ID_OK:\n dlg.Destroy()\n if result == wx.ID_CANCEL:\n dlg.Destroy()\n return()\n #-------------\n # pmag_samples.txt or pmag_sites.txt\n #-------------\n if self.acceptance_criteria['average_by_sample_or_site']['value'] == 'sample':\n BY_SITES = False\n BY_SAMPLES = True\n else:\n BY_SITES = True\n BY_SAMPLES = False\n\n pmag_samples_header_1 = [\"er_location_name\", \"er_site_name\"]\n if BY_SAMPLES:\n pmag_samples_header_1.append(\"er_sample_name\")\n if BY_SAMPLES:\n pmag_samples_header_2 = [\"er_specimen_names\", \"sample_int\", \"sample_int_n\",\n \"sample_int_sigma\", \"sample_int_sigma_perc\", \"sample_description\"]\n else:\n pmag_samples_header_2 = [\"er_specimen_names\", \"site_int\", \"site_int_n\",\n \"site_int_sigma\", \"site_int_sigma_perc\", \"site_description\"]\n pmag_samples_header_3 = [\n \"magic_method_codes\", \"magic_software_packages\"]\n pmag_samples_header_4 = [\"er_citation_names\"]\n\n pmag_samples_or_sites_list = []\n\n if BY_SAMPLES:\n samples_or_sites = list(self.Data_samples.keys())\n Data_samples_or_sites = copy.deepcopy(self.Data_samples)\n else:\n samples_or_sites = list(self.Data_sites.keys())\n Data_samples_or_sites = copy.deepcopy(self.Data_sites)\n samples_or_sites.sort()\n for sample_or_site in samples_or_sites:\n if True:\n specimens_names = \"\"\n B = []\n specimens_LP_codes = []\n for specimen in list(Data_samples_or_sites[sample_or_site].keys()):\n B.append(Data_samples_or_sites[sample_or_site][specimen])\n if specimen not in MagIC_results_data['pmag_specimens']:\n continue\n magic_codes = MagIC_results_data['pmag_specimens'][specimen]['magic_method_codes']\n codes = magic_codes.replace(\" \", \"\").split(\":\")\n for code in codes:\n if \"LP-\" in code and code not in specimens_LP_codes:\n specimens_LP_codes.append(code)\n\n specimens_names = specimens_names + specimen + \":\"\n magic_codes = \":\".join(specimens_LP_codes) + \":IE-TT\"\n specimens_names = specimens_names[:-1]\n if specimens_names != \"\":\n\n # sample_pass_criteria=False\n sample_or_site_pars = self.calculate_sample_mean(\n Data_samples_or_sites[sample_or_site])\n if sample_or_site_pars['pass_or_fail'] == 'fail':\n continue\n N = sample_or_site_pars['N']\n B_uT = sample_or_site_pars['B_uT']\n B_std_uT = sample_or_site_pars['B_std_uT']\n B_std_perc = sample_or_site_pars['B_std_perc']\n pmag_samples_or_sites_list.append(sample_or_site)\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site] = {\n }\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site]['er_specimen_names'] = specimens_names\n if BY_SAMPLES:\n name = \"sample_\"\n else:\n name = \"site_\"\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][name +\n 'int'] = \"%.2e\" % (B_uT * 1e-6)\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][name +\n 'int_n'] = \"%i\" % (N)\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][name +\n 'int_sigma'] = \"%.2e\" % (B_std_uT * 1e-6)\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][name +\n 'int_sigma_perc'] = \"%.2f\" % (B_std_perc)\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][name +\n 'description'] = \"paleointensity mean\"\n if BY_SAMPLES:\n sample_name = sample_or_site\n site_name = thellier_gui_lib.get_site_from_hierarchy(\n sample_name, self.Data_hierarchy)\n location_name = thellier_gui_lib.get_location_from_hierarchy(\n site_name, self.Data_hierarchy)\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site]['er_sample_name'] = sample_name\n\n if BY_SITES:\n site_name = sample_or_site\n location_name = thellier_gui_lib.get_location_from_hierarchy(\n site_name, self.Data_hierarchy)\n\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site]['er_site_name'] = site_name\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site]['er_location_name'] = location_name\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][\"pmag_criteria_codes\"] = \"\"\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site]['magic_method_codes'] = magic_codes\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][\"magic_software_packages\"] = version\n\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][\"er_citation_names\"] = \"This study\"\n\n # prepare pmag_samples.txt\n pmag_samples_or_sites_list.sort()\n if self.data_model != 3: # save 2.5 way\n if BY_SAMPLES:\n fout = open(os.path.join(self.WD, \"pmag_samples.txt\"), 'w')\n fout.write(\"tab\\tpmag_samples\\n\")\n else:\n fout = open(os.path.join(self.WD, \"pmag_sites.txt\"), 'w')\n fout.write(\"tab\\tpmag_sites\\n\")\n\n headers = pmag_samples_header_1 + pmag_samples_header_2 + \\\n pmag_samples_header_3 + pmag_samples_header_4\n String = \"\"\n for key in headers:\n String = String + key + \"\\t\"\n fout.write(String[:-1] + \"\\n\")\n\n for sample_or_site in pmag_samples_or_sites_list:\n String = \"\"\n for key in headers:\n String = String + \\\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][key] + \"\\t\"\n fout.write(String[:-1] + \"\\n\")\n fout.close()\n\n # merge with non-intensity data\n if BY_SAMPLES:\n meas_data, file_type = pmag.magic_read(\n os.path.join(self.WD, \"pmag_samples.txt\"))\n for rec in PmagRecsOld[\"pmag_samples.txt\"]:\n meas_data.append(rec)\n meas_data = self.converge_pmag_rec_headers(meas_data)\n pmag.magic_write(os.path.join(\n self.WD, \"pmag_samples.txt\"), meas_data, 'pmag_samples')\n try:\n os.remove(os.path.join(self.WD, \"pmag_samples.txt.backup\"))\n except:\n pass\n pmag.magic_write(os.path.join(\n self.WD, \"pmag_sites.txt\"), PmagRecsOld[\"pmag_sites.txt\"], 'pmag_sites')\n try:\n os.remove(os.path.join(self.WD, \"pmag_sites.txt.backup\"))\n except:\n pass\n\n else:\n meas_data, file_type = pmag.magic_read(\n os.path.join(self.WD, \"pmag_sites.txt\"))\n for rec in PmagRecsOld[\"pmag_sites.txt\"]:\n meas_data.append(rec)\n meas_data = self.converge_pmag_rec_headers(meas_data)\n pmag.magic_write(os.path.join(\n self.WD, \"pmag_sites.txt\"), meas_data, 'pmag_sites')\n try:\n os.remove(os.path.join(self.WD, \"pmag_sites.txt.backup\"))\n except:\n pass\n pmag.magic_write(os.path.join(\n self.WD, \"pmag_samples.txt\"), PmagRecsOld[\"pmag_samples.txt\"], 'pmag_samples')\n try:\n os.remove(os.path.join(self.WD, \"pmag_samples.txt.backup\"))\n except:\n pass\n\n else: # don't do anything yet = need vdm data\n pass\n\n #-------------\n # pmag_results.txt\n #-------------\n\n pmag_results_header_1 = [\"er_location_names\", \"er_site_names\"]\n if BY_SAMPLES:\n pmag_results_header_1.append(\"er_sample_names\")\n pmag_results_header_1.append(\"er_specimen_names\")\n\n pmag_results_header_2 = [\"average_lat\", \"average_lon\", ]\n pmag_results_header_3 = [\n \"average_int_n\", \"average_int\", \"average_int_sigma\", \"average_int_sigma_perc\"]\n if self.preferences['VDM_or_VADM'] == \"VDM\":\n pmag_results_header_4 = [\"vdm\", \"vdm_sigma\"]\n else:\n pmag_results_header_4 = [\"vadm\", \"vadm_sigma\"]\n pmag_results_header_5 = [\"data_type\", \"pmag_result_name\", \"magic_method_codes\",\n \"result_description\", \"er_citation_names\", \"magic_software_packages\", \"pmag_criteria_codes\"]\n\n for sample_or_site in pmag_samples_or_sites_list:\n if sample_or_site is None:\n continue\n if isinstance(sample_or_site, type(np.nan)):\n continue\n MagIC_results_data['pmag_results'][sample_or_site] = {}\n if self.data_model == 3:\n if BY_SAMPLES:\n if len(self.test_for_criteria()):\n MagIC_results_data['pmag_results'][sample_or_site]['pmag_criteria_codes'] = \"IE-SPEC:IE-SAMP\"\n if BY_SITES:\n if len(self.test_for_criteria()):\n MagIC_results_data['pmag_results'][sample_or_site]['pmag_criteria_codes'] = \"IE-SPEC:IE-SITE\"\n else:\n MagIC_results_data['pmag_results'][sample_or_site]['pmag_criteria_codes'] = \"ACCEPT\"\n MagIC_results_data['pmag_results'][sample_or_site][\"er_location_names\"] = MagIC_results_data[\n 'pmag_samples_or_sites'][sample_or_site]['er_location_name']\n MagIC_results_data['pmag_results'][sample_or_site][\"er_site_names\"] = MagIC_results_data[\n 'pmag_samples_or_sites'][sample_or_site]['er_site_name']\n MagIC_results_data['pmag_results'][sample_or_site][\"er_specimen_names\"] = MagIC_results_data[\n 'pmag_samples_or_sites'][sample_or_site]['er_specimen_names']\n\n if BY_SAMPLES:\n MagIC_results_data['pmag_results'][sample_or_site][\"er_sample_names\"] = MagIC_results_data[\n 'pmag_samples_or_sites'][sample_or_site]['er_sample_name']\n\n site = MagIC_results_data['pmag_results'][sample_or_site][\"er_site_names\"]\n lat, lon = \"\", \"\"\n if site in list(self.Data_info[\"er_sites\"].keys()) and \"site_lat\" in list(self.Data_info[\"er_sites\"][site].keys()):\n # MagIC_results_data['pmag_results'][sample_or_site][\"average_lat\"]=self.Data_info[\"er_sites\"][site][\"site_lat\"]\n lat = self.Data_info[\"er_sites\"][site][\"site_lat\"]\n\n if site in list(self.Data_info[\"er_sites\"].keys()) and \"site_lon\" in list(self.Data_info[\"er_sites\"][site].keys()):\n # MagIC_results_data['pmag_results'][sample_or_site][\"average_lon\"]=self.Data_info[\"er_sites\"][site][\"site_lon\"]\n lon = self.Data_info[\"er_sites\"][site][\"site_lon\"]\n MagIC_results_data['pmag_results'][sample_or_site][\"average_lat\"] = lat\n MagIC_results_data['pmag_results'][sample_or_site][\"average_lon\"] = lon\n if BY_SAMPLES:\n name = 'sample'\n else:\n name = 'site'\n\n MagIC_results_data['pmag_results'][sample_or_site][\"average_int_n\"] = MagIC_results_data[\n 'pmag_samples_or_sites'][sample_or_site][name + '_int_n']\n MagIC_results_data['pmag_results'][sample_or_site][\"average_int\"] = MagIC_results_data[\n 'pmag_samples_or_sites'][sample_or_site][name + '_int']\n MagIC_results_data['pmag_results'][sample_or_site][\"average_int_sigma\"] = MagIC_results_data[\n 'pmag_samples_or_sites'][sample_or_site][name + '_int_sigma']\n MagIC_results_data['pmag_results'][sample_or_site][\"average_int_sigma_perc\"] = MagIC_results_data[\n 'pmag_samples_or_sites'][sample_or_site][name + '_int_sigma_perc']\n\n if self.preferences['VDM_or_VADM'] == \"VDM\":\n pass\n # to be done\n else:\n if lat != \"\":\n lat = float(lat)\n # B=float(MagIC_results_data['pmag_samples_or_sites'][sample_or_site]['sample_int'])\n B = float(\n MagIC_results_data['pmag_results'][sample_or_site][\"average_int\"])\n # B_sigma=float(MagIC_results_data['pmag_samples_or_sites'][sample_or_site]['sample_int_sigma'])\n B_sigma = float(\n MagIC_results_data['pmag_results'][sample_or_site][\"average_int_sigma\"])\n VADM = pmag.b_vdm(B, lat)\n VADM_plus = pmag.b_vdm(B + B_sigma, lat)\n VADM_minus = pmag.b_vdm(B - B_sigma, lat)\n VADM_sigma = (VADM_plus - VADM_minus) / 2\n MagIC_results_data['pmag_results'][sample_or_site][\"vadm\"] = \"%.2e\" % VADM\n MagIC_results_data['pmag_results'][sample_or_site][\"vadm_sigma\"] = \"%.2e\" % VADM_sigma\n if self.data_model == 3: # stick vadm into site_or_sample record\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][\"vadm\"] = \"%.2e\" % VADM\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][\"vadm_sigma\"] = \"%.2e\" % VADM_sigma\n else:\n MagIC_results_data['pmag_results'][sample_or_site][\"vadm\"] = \"\"\n MagIC_results_data['pmag_results'][sample_or_site][\"vadm_sigma\"] = \"\"\n if self.data_model == 3: # stick vadm into site_or_sample record\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][\"vadm\"] = \"\"\n MagIC_results_data['pmag_samples_or_sites'][sample_or_site][\"vadm_sigma\"] = \"\"\n if MagIC_results_data['pmag_results'][sample_or_site][\"vadm\"] != \"\":\n MagIC_results_data['pmag_results'][sample_or_site][\"pmag_result_name\"] = \"Paleointensity;V[A]DM;\" + sample_or_site\n MagIC_results_data['pmag_results'][sample_or_site][\"result_description\"] = \"Paleointensity; V[A]DM\"\n else:\n MagIC_results_data['pmag_results'][sample_or_site][\"pmag_result_name\"] = \"Paleointensity;\" + sample_or_site\n MagIC_results_data['pmag_results'][sample_or_site][\"result_description\"] = \"Paleointensity\"\n\n MagIC_results_data['pmag_results'][sample_or_site][\"magic_software_packages\"] = version\n MagIC_results_data['pmag_results'][sample_or_site][\"magic_method_codes\"] = magic_codes\n # try to make a more meaningful name\n\n MagIC_results_data['pmag_results'][sample_or_site][\"data_type\"] = \"i\"\n MagIC_results_data['pmag_results'][sample_or_site][\"data_quality\"] = \"g\"\n MagIC_results_data['pmag_results'][sample_or_site][\"er_citation_names\"] = \"This study\"\n if self.data_model != 3: # look for ages in er_ages - otherwise they are in sites.txt already\n # add ages\n found_age = False\n site = MagIC_results_data['pmag_results'][sample_or_site][\"er_site_names\"]\n if sample_or_site in list(self.Data_info[\"er_ages\"].keys()):\n sample_or_site_with_age = sample_or_site\n found_age = True\n elif site in list(self.Data_info[\"er_ages\"].keys()):\n sample_or_site_with_age = site\n found_age = True\n if found_age:\n for header in [\"age\", \"age_unit\", \"age_sigma\", \"age_range_low\", \"age_range_high\"]:\n if sample_or_site_with_age in list(self.Data_info[\"er_ages\"].keys()) and header in list(self.Data_info[\"er_ages\"][sample_or_site_with_age].keys()):\n if self.Data_info[\"er_ages\"][sample_or_site_with_age][header] != \"\":\n value = self.Data_info[\"er_ages\"][sample_or_site_with_age][header]\n header_result = \"average_\" + header\n if header_result == \"average_age_range_high\":\n header_result = \"average_age_high\"\n if header_result == \"average_age_range_low\":\n header_result = \"average_age_low\"\n MagIC_results_data['pmag_results'][sample_or_site][header_result] = value\n\n if header_result not in pmag_results_header_4:\n pmag_results_header_4.append(header_result)\n\n else:\n\n found_age = False\n if BY_SAMPLES and sample_or_site in list(self.Data_info[\"er_ages\"].keys()):\n element_with_age = sample_or_site\n found_age = True\n elif BY_SAMPLES and sample_or_site not in list(self.Data_info[\"er_ages\"].keys()):\n site = self.Data_hierarchy['site_of_sample'][sample_or_site]\n if site in list(self.Data_info[\"er_ages\"].keys()):\n element_with_age = site\n found_age = True\n elif BY_SITES and sample_or_site in list(self.Data_info[\"er_ages\"].keys()):\n element_with_age = sample_or_site\n found_age = True\n else:\n continue\n if not found_age:\n continue\n foundkeys = False\n # print \"element_with_age\",element_with_age\n for key in ['age', 'age_sigma', 'age_range_low', 'age_range_high', 'age_unit']:\n # print \"Ron debug\"\n # print element_with_age\n # print sample_or_site\n if \"er_ages\" in list(self.Data_info.keys()) and element_with_age in list(self.Data_info[\"er_ages\"].keys()):\n if key in list(self.Data_info[\"er_ages\"][element_with_age].keys()):\n if self.Data_info[\"er_ages\"][element_with_age][key] != \"\":\n # print self.Data_info[\"er_ages\"][element_with_age]\n # print self.Data_info[\"er_ages\"][element_with_age][key]\n # print\n # MagIC_results_data['pmag_results'][sample_or_site]\n MagIC_results_data['pmag_results'][sample_or_site][\n key] = self.Data_info[\"er_ages\"][element_with_age][key]\n foundkeys = True\n if foundkeys == True:\n if \"er_ages\" in list(self.Data_info.keys()) and element_with_age in list(self.Data_info[\"er_ages\"].keys()):\n if 'magic_method_codes' in list(self.Data_info[\"er_ages\"][element_with_age].keys()):\n methods = self.Data_info[\"er_ages\"][element_with_age]['magic_method_codes'].replace(\n \" \", \"\").strip('\\n').split(\":\")\n for meth in methods:\n MagIC_results_data['pmag_results'][sample_or_site][\"magic_method_codes\"] = MagIC_results_data[\n 'pmag_results'][sample_or_site][\"magic_method_codes\"] + \":\" + meth\n\n if self.data_model != 3:\n # write pmag_results.txt\n fout = open(os.path.join(self.WD, \"pmag_results.txt\"), 'w')\n fout.write(\"tab\\tpmag_results\\n\")\n headers = pmag_results_header_1 + pmag_results_header_2 + \\\n pmag_results_header_3 + pmag_results_header_4 + pmag_results_header_5\n String = \"\"\n for key in headers:\n String = String + key + \"\\t\"\n fout.write(String[:-1] + \"\\n\")\n\n # pmag_samples_list.sort()\n for sample_or_site in pmag_samples_or_sites_list:\n if sample_or_site is None:\n continue\n if isinstance(sample_or_site, type(np.nan)):\n continue\n String = \"\"\n for key in headers:\n if key in list(MagIC_results_data['pmag_results'][sample_or_site].keys()):\n String = String + \\\n MagIC_results_data['pmag_results'][sample_or_site][key] + \"\\t\"\n else:\n String = String + \"\" + \"\\t\"\n fout.write(String[:-1] + \"\\n\")\n fout.close()\n\n # merge with non-intensity data\n meas_data, file_type = pmag.magic_read(\n os.path.join(self.WD, \"pmag_results.txt\"))\n for rec in PmagRecsOld[\"pmag_results.txt\"]:\n meas_data.append(rec)\n meas_data = self.converge_pmag_rec_headers(meas_data)\n pmag.magic_write(os.path.join(\n self.WD, \"pmag_results.txt\"), meas_data, 'pmag_results')\n try:\n os.remove(os.path.join(self.WD, \"pmag_results.txt.backup\"))\n except:\n pass\n\n else: # write out samples/sites in data model 3.0\n for sample_or_site in pmag_samples_or_sites_list:\n if sample_or_site is None:\n continue\n if isinstance(sample_or_site, type(np.nan)):\n continue\n\n # convert, delete, add and save\n new_sample_or_site_data = MagIC_results_data['pmag_samples_or_sites'][sample_or_site]\n\n if BY_SAMPLES:\n new_data = map_magic.convert_samp(\n 'magic3', new_sample_or_site_data) # convert to 3.0\n if len(self.test_for_criteria()):\n new_data['criteria'] = 'IE-SPEC:IE-SAMP'\n new_data['result_quality'] = 'g'\n #new_data['result_type'] = 'i' # no longer required\n self.samp_data = self.samp_container.df\n cond1 = self.samp_data['sample'].str.contains(\n sample_or_site + \"$\") == True\n if 'int_abs' not in self.samp_data.columns:\n self.samp_data['int_abs'] = None\n print('-W- No intensity data found for samples')\n cond2 = self.samp_data['int_abs'].notnull() == True\n condition = (cond1 & cond2)\n # update record\n self.samp_data = self.samp_container.update_record(\n sample_or_site, new_data, condition)\n self.site_data = self.site_container.df\n # remove intensity data from site level.\n if 'int_abs' not in self.site_data.columns:\n self.site_data['int_abs'] = None\n print('-W- No intensity data found for sites')\n site = self.Data_hierarchy['site_of_sample'][sample_or_site]\n try: # if site name is blank will skip\n cond1 = self.site_data['site'].str.contains(\n site + \"$\") == True\n cond2 = self.site_data['int_abs'].notnull() == True\n condition = (cond1 & cond2)\n site_keys = ['samples', 'int_abs', 'int_sigma', 'int_n_samples', 'int_sigma_perc', 'specimens',\n 'int_abs_sigma', 'int_abs_sigma_perc', 'vadm'] # zero these out but keep the rest\n blank_data = {}\n for key in site_keys:\n blank_data[key] = \"\"\n self.site_data = self.site_container.update_record(\n site, blank_data, condition, update_only=True)\n # add record for sample in the site table\n cond1 = self.site_data['site'].str.contains(\n sample_or_site + \"$\") == True\n cond2 = self.site_data['int_abs'].notnull() == True\n condition = (cond1 & cond2)\n # change 'site' column to reflect sample name,\n # since we are putting this sample at the site level\n new_data['site'] = sample_or_site\n new_data['samples'] = sample_or_site\n new_data['int_n_samples'] = '1'\n # get rid of this key for site table\n del new_data['sample']\n new_data['vadm'] = MagIC_results_data['pmag_results'][sample_or_site][\"vadm\"]\n new_data['vadm_sigma'] = MagIC_results_data['pmag_results'][sample_or_site][\"vadm_sigma\"]\n new_data['result_quality'] = 'g'\n self.site_data = self.site_container.update_record(\n sample_or_site, new_data, condition, debug=True)\n except:\n pass # no site\n\n else: # do this by site and not by sample START HERE\n # new_data is getting grabbed from where it was defined earlier for specimens\n # we need to create new_data right here right now\n # convert data from magic2 to magic3\n new_data = map_magic.convert_site('magic3', new_sample_or_site_data)\n cond1 = self.site_data['site'].str.contains(\n sample_or_site + \"$\") == True\n if 'int_abs' not in self.site_data.columns:\n self.site_data['int_abs'] = None\n cond2 = self.site_data['int_abs'].notnull() == True\n condition = (cond1 & cond2)\n loc = None\n locs = self.site_data[cond1]['location']\n if any(locs):\n loc = locs.values[0]\n new_data['site'] = sample_or_site\n new_data['location'] = loc\n if len(self.test_for_criteria()):\n new_data['criteria'] = 'IE-SPEC:IE-SITE'\n\n self.site_data = self.site_container.update_record(\n sample_or_site, new_data, condition)\n # remove intensity data from sample level. # need to look\n # up samples from this site\n cond1 = self.samp_data['site'].str.contains(\n sample_or_site + \"$\") == True\n if 'int_abs' not in self.samp_data.columns:\n self.samp_data['int_abs'] = None\n cond2 = self.samp_data['int_abs'].notnull() == True\n condition = (cond1 & cond2)\n new_data = {} # zero these out but keep the rest\n # zero these out but keep the rest\n samp_keys = ['int_abs', 'int_sigma',\n 'int_n_specimens', 'int_sigma_perc']\n for key in samp_keys:\n new_data[key] = \"\"\n samples = self.samp_data[condition].index.unique()\n for samp_name in samples:\n self.samp_container.update_record(\n samp_name, new_data, cond2)\n for col in ['location']:\n if col in list(self.samp_data.keys()):\n del self.samp_data[col]\n # if BY_SAMPLES: # replace 'site' with 'sample'\n # self.samp_data['site']=self.samp_data['sample']\n # condition= self.samp_container.df['specimens'].notnull()==True # find all the blank specimens rows\n # self.samp_container.df = self.samp_container.df.loc[condition]\n\n\n\n # remove sample only columns that have been put into sites\n if BY_SAMPLES:\n #ignore = ['cooling_rate_corr', 'cooling_rate_mcd']\n self.site_container.remove_non_magic_cols_from_table(ignore_cols=[]) #ignore)\n # write out the data\n self.samp_container.write_magic_file(dir_path=self.WD)\n self.site_container.write_magic_file(dir_path=self.WD)\n\n #-------------\n # MagIC_methods.txt\n #-------------\n\n # search for all magic_methods in all files:\n magic_method_codes = []\n for F in [\"magic_measurements.txt\", \"rmag_anisotropy.txt\", \"rmag_results.txt\", \"rmag_results.txt\", \"pmag_samples.txt\", \"pmag_specimens.txt\", \"pmag_sites.txt\", \"er_ages.txt\"]:\n try:\n fin = open(os.path.join(self.WD, F), 'r')\n except:\n continue\n line = fin.readline()\n line = fin.readline()\n header = line.strip('\\n').split('\\t')\n if \"magic_method_codes\" not in header:\n continue\n else:\n index = header.index(\"magic_method_codes\")\n for line in fin.readlines():\n tmp = line.strip('\\n').split('\\t')\n if len(tmp) >= index:\n codes = tmp[index].replace(\" \", \"\").split(\":\")\n for code in codes:\n if code != \"\" and code not in magic_method_codes:\n magic_method_codes.append(code)\n fin.close()\n\n if self.data_model == 2:\n magic_method_codes.sort()\n # print magic_method_codes\n magic_methods_header_1 = [\"magic_method_code\"]\n fout = open(os.path.join(self.WD, \"magic_methods.txt\"), 'w')\n fout.write(\"tab\\tmagic_methods\\n\")\n fout.write(\"magic_method_code\\n\")\n for code in magic_method_codes:\n fout.write(\"%s\\n\" % code)\n fout.close\n\n # make pmag_criteria.txt if it does not exist\n if not os.path.isfile(os.path.join(self.WD, \"pmag_criteria.txt\")):\n Fout = open(os.path.join(self.WD, \"pmag_criteria.txt\"), 'w')\n Fout.write(\"tab\\tpmag_criteria\\n\")\n Fout.write(\"er_citation_names\\tpmag_criteria_code\\n\")\n Fout.write(\"This study\\tACCEPT\\n\")\n\n dlg1 = wx.MessageDialog(\n self, caption=\"Message:\", message=\"MagIC files are saved in MagIC project folder\", style=wx.OK | wx.ICON_INFORMATION)\n self.show_dlg(dlg1)\n dlg1.Destroy()\n\n self.close_warning = False",
"def calibrate(filename, analog_idx):\n if not os.path.isfile(filename):\n filename = glia.match_filename(filename,\"analog\")\n analog = glia.read_raw_voltage(filename)[:,analog_idx]\n data_directory, fn = os.path.split(filename)\n calibration = glia.auto_calibration(analog, data_directory)\n glia.analog_histogram(analog, data_directory)\n print(f\"saving analog histogram to {data_directory}/analog_histogram.png\")\n with open(os.path.join(data_directory,\"config.yml\"), 'w') as outfile:\n yaml.dump({\"analog_calibration\": calibration.tolist()}, outfile)\n print(f\"saving suggested config to {data_directory}/config.yml\")",
"def display_results(self):\n print \"Resultats pour le fichier : \\n================================\"\n print \"Moyenne arithmetique : \", self.results['arithAvg']\n print \"Moyenne quadratique : \", self.results['quadAvg']\n print \"Moyenne geometrique : \", self.results['geoAvg']\n print \"Moyenne harmonique : \", self.results['harmAvg']\n print \"Ecart a la moyenne : \", self.results['std']\n print \"Valeure maximale : \", self.results['max']\n print \"Valeurs minimale : \", self.results['min']\n print \"Variance : \", self.results['var']\n print \"Moments d'ordre R (jusqu'a 4) : \", self.results['momentsR']\n print \"Moments centrés d'ordre R (jusqu'a 4) : \", self.results['centralMomentsR']\n print \"Dissymetrie : \", self.results['dissym']\n print \"Coefficient d'applatissement : \", self.results['flattening']\n print \"Ecart type : \", self.results['ecartType']",
"def read_fatlasa_results(filename):\n\n pass",
"def process_results(stadict,dir,ot):\n # If the absolute value of linearity \n ignore_linearity=0.90\n # Process our output\n csvfile=\"%s/Results.csv\" % (dir)\n csv=open(csvfile,'r')\n hl=csv.readline() # Read the header\n for line in csv:\n lvals=line.split(',')\n nsl=\"%s-%s-%s\" % (lvals[0],lvals[1],lvals[2])\n difference=float(lvals[3])-float(lvals[4])\n linearity=np.abs(float(lvals[6]))\n if linearity>=ignore_linearity: #Could move this to the plot section but it adds complexity if you do\n stadict=add_azi_measurement(stadict,nsl,ot,difference,linearity)\n return stadict",
"def _read_analogies(self):\n questions = []\n questions_skipped = 0\n with open(self._options.eval_data, \"rb\") as analogy_f:\n for line in analogy_f:\n if line.startswith(\":\"): # Skip comments.\n continue\n words = line.strip().lower().split(\" \")\n # print words\n ids = [self._cate2id.get(w.strip()) for w in words]\n # print ids\n if None in ids or len(ids) != 4:\n questions_skipped += 1\n else:\n questions.append(np.array(ids))\n print(\"Eval analogy file: \", self._options.eval_data)\n print(\"Questions: \", len(questions))\n print(\"Skipped: \", questions_skipped)\n questions = np.array(questions, dtype=np.int32)\n self._analogy_questions = questions\n self._target_field = np.array(\n list(set(questions[:, 3])), dtype=np.int32)\n np.random.shuffle(self._analogy_questions)",
"def res_to_csv(results_filename=None, output_filename=None):\n \n if results_filename is None:\n results_filename = _get_filename_from_dialog('res')\n if not results_filename:\n print('You must select a .res file!')\n \n if output_filename is None:\n output_filename = _get_filename_from_dialog('csv')\n if not output_filename:\n print('You must select a .csv file for output!')\n\n # Run an aview cmd command to load the results file.\n aview_cmd = 'file results read file_name = \"{}\"'.format(os.path.normpath(results_filename).replace(os.sep, '/'))\n Adams.execute_cmd(aview_cmd)\n \n # Get the name of the analysis loaded by the results file\n selected_analysis_name = os.path.split(results_filename)[-1].replace('.res','')\n\n # Loop through all analyses in the model \n data = OrderedDict()\n for mod_name in Adams.Models:\n mod = Adams.Models.get(mod_name)\n for ans_name in mod.Analyses:\n if ans_name not in selected_analysis_name:\n # If this analysis name doesn't match, break\n break\n \n # Initialize a flag indicating that the TIME array has not been set\n time_found = False\n \n # For each analysis in the model, get the analysis handle\n ans = mod.Analyses.get(ans_name)\n\n # Filtering out XFORM result sets\n filt_res_names = [res_name for res_name in ans.results if 'XFORM' not in res_name and TIME_STRING not in res_name]\n \n for res_name in filt_res_names:\n # For each result set, get the result set handle\n res = ans.results.get(res_name) \n for comp_name in res.keys():\n # for each result component in the result set, get the result component handle\n comp = res.get(comp_name)\n\n if TIME_STRING in comp_name and not time_found:\n # If this is the first time component encountered, add TIME to the data dictionary\n data[TIME_STRING] = comp.values\n time_found = True\n\n elif TIME_STRING not in comp_name:\n # If this is not a TIME component, add item to the data dictionary\n # Key = (result set name), (result component name)\n # Value = list of numeric data\n data['{}.{}'.format(res_name, comp_name)] = comp.values\n \n if TIME_STRING in data:\n # If the data dictionary has a time key, move it to the front\n data.move_to_end(TIME_STRING, last=False) \n\n if output_filename is None:\n # If the output file is not given, set equal to the results file\n output_filename = results_filename.replace('.res', '.csv')\n\n # Write the data dictionary to a csv\n try:\n with open(output_filename, \"w\",newline = '') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(data.keys())\n writer.writerows(zip(*data.values()))\n except PermissionError:\n message_box = PyQt4.QtGui.QMessageBox()\n message_box.setTitle('Permission Denied!')\n message_box.setText(f'Permission to access {output_filename} was denied! Please close any programs that are using it and rerun this script.')\n message_box.setIcon(PyQt4.QtGui.QMessageBox.Warning)\n message_box.setStandardButtons(PyQt4.QtGui.QMessageBox.Ok)\n message_box.exec_()",
"def parse_results_file(filename):\n\tfile = open(filename, 'r')\n\tpretext=[line for line in file.readlines() if line.strip()]\n\tfile.close()\n\n\ttext = []\n\tprocessed = []\n\tlanguages = 'NONE'\n\tID = 'NONE'\n\t\n\tmoreheader = raw_input('Extra header labels from question field (e.g.: item,condition,factor1,factor2): ')\n\tstim_type = raw_input('What type are your stims? (i.e. AcceptabilityJudgment): ')\n\toutput_loc = raw_input('Where would you like to put your parsed file? (enter filename path): ')\n\t\n\t#takes out comments\n\tfor line in pretext:\n\t\tif re.match('#', line):\n\t\t\tcontinue\n\t\telse:\n\t\t\ttext.append(line)\n\n\tfirst = 1;\n\n\tfor line in range(len(text)):\n\t\t#get their info\n\t\tif re.search('Form', text[line]):\n\t\t\tif re.search('number', text[line]):\n\t\t\t\tID = re.split('number,', text[line])[1].strip()\n\t\t\telif re.search('age', text[line]):\n\t\t\t\tlanguages = re.split('age,', text[line])[1].strip()\n\n\t\t#looks for the main stimulus type, as entered earlier\t\t\n\t\tif re.search(stim_type, text[line]):\n\t\t\tif first:\n\t\t\t\t#print 'first'\n\t\t\t\tprocessed.append(str(ID+ ','+languages+','+text[line]))\n\t\t\t\tfirst=0\n\t\t\telse:\n\t\t\t\ttoAmend = processed.pop()\n\t\t\t\t#print str('toAmend: ' + toAmend)\n\t\t\t\ttoAdd=''\n\t\t\t\tsplits = re.split('NULL,', text[line])\n\t\t\t\tfor thing in splits[1:]:\n\t\t\t\t\tif thing is not '':\n\t\t\t\t\t\ttoAdd = str(toAdd + ',' + thing.strip(','))\n\t\t\t\t#print str('toAdd: ' + toAdd)\n\t\t\t\tprocessed.append(str(toAmend.strip()+ toAdd))\n\t\t\t\tfirst = 1\n\n\t\t#if the line is a question line, there's more to append\n\t\tif re.search('Question', text[line]):\n\t\t\ttoAmend = processed.pop()\n\t\t\tpart = re.split('\\$', text[line])[1]\n\t\t\tpart.strip('$')\n\t\t\tparts = part.split('%2C')\n\t\t\tprocessed.append(str(toAmend.strip()+ ','+ string.join(parts, ',')+'\\n'))\n\t\t\t\n\toutput = open(output_loc, 'w')\n\n\theader = 'ID,Languages,Time sent,MD5 Hash of IP Address,Controller,Item Number,Element Number,Type,Group,Stimulus,Answer,RT,'\n\n\toutput.write(str(header+moreheader+'\\n'))\n\n\t#put it all into a text file\n\tfor line in processed:\n\t\toutput.write(line)\n\toutput.close()",
"def convert_csv_to_alfed(self) -> None:\n global output_path, file_name\n self.parse_command_line_args()\n self.validate_command_line_args()\n\n for _, _, files in walk(self.args.input):\n for output_file in files:\n if output_file.endswith(\".csv\"):\n file_name, _ = path.splitext(output_file)\n output_path = \"\"\n output_path = path.join(self.args.output, file_name)\n\n try:\n mkdir(output_path)\n print(f\"Creating folder {output_path}...\")\n except OSError:\n print(f\"Creation of directory {output_path} failed\")\n\n with open(path.join(self.args.input, output_file), \"rt\") as csv_file:\n reader = DictReader(csv_file, fieldnames=self.args.fieldorder)\n\n for row in reader:\n uid = str(uuid.uuid1()).upper()\n row[\"content\"] = self.replace_embedded_snipptes(row[\"content\"], self.args.lplaceholder,\n self.args.rplaceholder, self.args.changeplaceholders)\n output = dumps(\n {\n \"alfredsnippet\": {\n \"snippet\": row['content'],\n \"uid\": uid,\n \"name\": row['name'],\n \"keyword\": row['abbreviation']\n }\n },\n sort_keys=False, indent=4,\n separators=(',', ': ')\n )\n\n output_file = f\"{row['name']}_[{uid}].json\"\n target = path.join(output_path, output_file)\n f = open(target, \"w\")\n f.write(output)\n f.close()\n print(f\"Writing file {target}...\")\n else:\n self.error_msg(\"The files in the input folder are not with extension '*.csv'\")\n\n subprocess.call(\n [\n 'ditto',\n '--norsrc',\n '-ck',\n output_path,\n self.args.output + \"/\" + file_name + \".alfredsnippets\"\n ]\n )\n print(f\"{self.args.output}/{file_name}.alfredsnippets was created\")\n self.created_folders.append(file_name)\n\n self.remove_temp_folders()",
"def _reformat_results(self, results, strategy='wavelet_peaks'):\n return np.array(results)",
"def showResult():\n logging.info('I am in showResult......')\n filename = \"../txt/%s_testall_%d_%d.txt\" % (const.DATASET_NAME,const.TOPIC_NUM,const.TOP_N)\n x = range(1,const.TOP_N,1)\n result = [[[] for i in range(5)] for i in range(const.METHOD_SIZE)]\n #read result from file to result\n if os.path.exists(filename):\n print '%s is existing......' % filename \n rFile = open(filename,\"r\")\n lines = rFile.readlines()\n for line in lines:\n line = line.rstrip('\\n')\n items = line.split(\"INFO:\")\n line = items[1]\n items = line.split(\":\")\n ids = items[0]\n values = items[1]\n idItems = ids.split(\">\")\n mid = int(idItems[0])\n topN = int(idItems[1])\n valueItems = values.split()\n result[mid][0].append(float(valueItems[0]))\n result[mid][1].append(float(valueItems[1]))\n result[mid][2].append(float(valueItems[2]))\n result[mid][3].append(float(valueItems[3]))\n result[mid][4].append(float(valueItems[4]))\n rFile.close()\n else:\n rFile = open(filename,\"w\")\n rFile.close()\n #if some method is not in file, recreate it\n for mid in range(const.METHOD_SIZE):\n if len(result[mid][0]) == 0:\n recalls,precisions,f1s,maes,rmses = getErrorOfRecMethod(mid)\n result[mid][0] = recalls\n result[mid][1] = precisions\n result[mid][2] = f1s\n result[mid][3] = maes\n result[mid][4] = rmses\n\n #plt img of comparing with pure method\n for index in range(5):\n plt.figure(index)\n indexName = util.getIndexName(index)\n mids = [const.ARIMA,const.SIMILAR,const.AVG]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Algorithms(Pure)\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/pure_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()\n\n #plt img of comparing with hybrid method\n for index in range(5):\n plt.figure(index+5)\n indexName = util.getIndexName(index)\n mids = [const.ARIMA,const.ARIMA_SIMILAR,const.ARIMA_AVG,const.ALL_HYBRID]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Algorithms(Hybrid)\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/hybrid_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()\n\n\n #plt img of comparing with sequential method\n for index in range(5):\n plt.figure(index+10)\n indexName = util.getIndexName(index)\n mids = [const.ARIMA,const.KNN,const.PATTERN,const.MARKOV,const.MARKOV_3]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Methods\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/seq_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()\n \n plt.figure(30)\n plt.plot(x,result[const.ARIMA_SIMILAR][3],'k-.',label=util.getMethodName(const.ARIMA_SIMILAR)) \n plt.plot(x,result[const.ARIMA_AVG][3],'k+',label=util.getMethodName(const.ARIMA_AVG)) \n plt.plot(x,result[const.ALL_HYBRID][3],'k',label=util.getMethodName(const.ALL_HYBRID)) \n plt.title(\"MAE of Hybrid Music Recommendation Methods\")\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(\"MAE\")\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/local_global_%s_%s_%d_%d.png\" % (const.DATASET_NAME,\"MAE\",const.TOPIC_NUM,const.TOP_N))\n\n plt.figure(31)\n plt.plot(x,result[const.ARIMA_SIMILAR][4],'k-.',label=util.getMethodName(const.ARIMA_SIMILAR)) \n plt.plot(x,result[const.ARIMA_AVG][4],'k+',label=util.getMethodName(const.ARIMA_AVG)) \n plt.plot(x,result[const.ALL_HYBRID][4],'k',label=util.getMethodName(const.ALL_HYBRID)) \n plt.title(\"RMSE of Hybrid Music Recommendation Methods\")\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(\"RMSE\")\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/local_global_%s_%s_%d_%d.png\" % (const.DATASET_NAME,\"RMSE\",const.TOPIC_NUM,const.TOP_N))\n\n plt.figure(19)\n improvement = []\n for i in range(len(result[const.ARIMA][1])):\n improvement.append((result[const.ARIMA][1][i]-result[const.KNN][1][i]) / result[const.KNN][1][i])\n plt.plot(x[10:],improvement[10:],'k',label='Improvement over UserKNN Recommender')\n plt.title('Average Precision Improvement over UserKNN Recommender')\n plt.xlabel('Number of recommendations')\n plt.ylabel('Improvement in Average Precision (times)')\n plt.legend()\n indexName = util.getIndexName(1)\n plt.savefig(\"../img/improvement_knn_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n\n plt.figure(20)\n improvement = []\n for i in range(len(result[const.ARIMA][1])):\n improvement.append((result[const.ARIMA][1][i]-result[const.KNN][1][i]) / result[const.KNN][1][i])\n plt.plot(x[10:],improvement[10:],'k',label='Improvement over UserKNN Recommender')\n plt.title('Average Precision Improvement over UserKNN Recommender')\n plt.xlabel('Number of recommendations')\n plt.ylabel('Improvement in Average Precision (times)')\n plt.legend()\n indexName = util.getIndexName(1)\n plt.savefig(\"../img/improvement_knn_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n\n plt.figure(21)\n improvement = []\n for i in range(len(result[const.ARIMA][2])):\n improvement.append((result[const.ARIMA][2][i]-result[const.KNN][2][i]) / result[const.KNN][2][i])\n plt.plot(x[10:],improvement[10:],'k',label='Improvement over UserKNN Recommender')\n plt.title('Average F1-Score Improvement over UserKNN Recommender')\n plt.xlabel('Number of recommendations')\n plt.ylabel('Improvement in Average F1-Score (times)')\n plt.legend()\n indexName = util.getIndexName(2)\n plt.savefig(\"../img/improvement_knn_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()\n logging.info('I am out showResult......')\n\n #plt img of comparing with pure method\n for index in range(5):\n plt.figure(index+50)\n indexName = util.getIndexName(index)\n print indexName\n mids = [const.ARIMA,const.SIMILAR,const.KNN,const.AVG,const.PATTERN,const.MARKOV]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker1[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker1[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Algorithms\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/all_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n\n #plt img of comparing with hybrid method\n for index in range(5):\n plt.figure(index+75)\n indexName = util.getIndexName(index)\n mids = [const.ARIMA,const.ALL_HYBRID]\n markerIndex = 0\n for mid in mids:\n if index == 1 or index == 2:\n plt.plot(x[10:],result[mid][index][10:],const.marker[markerIndex],label=util.getMethodName(mid))\n else:\n plt.plot(x,result[mid][index],const.marker[markerIndex],label=util.getMethodName(mid))\n markerIndex += 1\n plt.title(\"%s of Different Recommend Algorithms\" % indexName)\n plt.xlabel(\"Number of recommendations\")\n plt.ylabel(indexName)\n plt.legend()\n plt.xlim(1,160)\n plt.savefig(\"../img/hybrid_only_%s_%s_%d_%d.png\" % (const.DATASET_NAME,indexName,const.TOPIC_NUM,const.TOP_N))\n #plt.show()",
"def ProcessAsl(self):\n for entry in self.info:\n if self.info[entry]['type'] == 'asl':\n if self.verbose:\n print 'Processing ASL data in %s' % os.path.basename(entry)\n cmd = 'convert_file %s %s %s' % (entry, \\\n self.info[entry]['imgfile'], self.info[entry]['filetype'])\n fname = '%s%s' % \\\n (self.info[entry]['imgfile'], self.info[entry]['suffix'])\n self.CheckExec(cmd, [fname])",
"def astrometry_script(filename, catalog=\"PS\", rotation_scaling=True, xy_transformation=True, fine_transformation=True, images=False, vignette=3,vignette_rectangular=1., cutouts=None, ra=None, dec=None, projection_ra=None, projection_dec=None, verbose=False, save_images=False, ignore_header_rot=False, radius=-1., save_bad_result=False, silent=False, sigma_threshold_for_source_detection=5, high_res = False, hdul_idx=0, filename_for_sources=None, FWHM=4):\n #print(\"Program version: 1.2\")\n\n report = {}\n if(images):\n plt.ioff()\n warnings.simplefilter('ignore', UserWarning)\n fits_image_filename = filename\n\n print(\"> Astrometry for {} \".format(fits_image_filename))\n\n with fits.open(fits_image_filename) as hdul:\n #print(hdul.info())\n #print(hdul[0].header)\n\n hdu = hdul[hdul_idx]\n #hdu.verify('fix')\n hdr = hdu.header\n\n\n image_or = hdul[hdul_idx].data.astype(float)\n median = np.nanmedian(image_or)\n image_or[np.isnan(image_or)]=median\n image = image_or - median\n\n observation = find_sources(image, vignette,vignette_rectangular,cutouts, sigma_threshold_for_source_detection, FWHM=FWHM)\n #print(observation)\n\n #changed order of positions to [(x,y), (x,y),...] for compatibility with photutils 1.4\n xcenters = np.array(observation['xcenter'])\n ycenters = np.array(observation['ycenter'])\n positions = [(xcenters[i], ycenters[i]) for i in range(len(xcenters))]\n apertures = CircularAperture(positions, r=4.)\n\n\n #world coordinates\n if(not silent):\n print(\">Info found in the file -- (CRVAl: position of central pixel (CRPIX) on the sky)\")\n print(WCS(hdr))\n\n hdr[\"NAXIS1\"] = image.shape[0]\n hdr[\"NAXIS2\"] = image.shape[1]\n\n #wcsprm = Wcsprm(hdr.tostring().encode('utf-8')) #everything else gave me errors with python 3, seemed to make problems with pc conversios, so i wwitched to the form below\n wcsprm = WCS(hdr).wcs\n wcsprm_original = WCS(hdr).wcs\n wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR = read_additional_info_from_header(wcsprm, hdr, ra, dec,projection_ra, projection_dec, ignore_header_rot, radius)\n if(verbose):\n print(WCS(wcsprm.to_header()))\n coord = SkyCoord(wcsprm.crval[0], wcsprm.crval[1], unit=(u.deg, u.deg), frame=\"icrs\")\n if(not PIXSCALE_UNCLEAR):\n if(wcsprm.crpix[0] < 0 or wcsprm.crpix[1] < 0 or wcsprm.crpix[0] > image.shape[0] or wcsprm.crpix[1] > image.shape[1] ):\n if(not silent):\n print(\"central value outside of the image, moving it to the center\")\n coord_radec = wcsprm.p2s([[image.shape[0]/2, image.shape[1]/2]], 0)[\"world\"][0]\n coord = SkyCoord(coord_radec[0], coord_radec[1], unit=(u.deg, u.deg), frame=\"icrs\")\n #print(wcsprm)\n\n\n\n #better: put in nice wrapper! with repeated tries and maybe try synchron!\n if(not silent):\n print(\">Dowloading catalog data\")\n radius = u.Quantity(fov_radius, u.arcmin)#will prob need more\n catalog_data = query.get_data(coord, radius, catalog)\n report[\"catalog\"] = catalog\n #reference = reference.query(\"mag <20\")\n \n\n if(catalog == \"GAIA\" and catalog_data.shape[0] < 5):\n if(not silent):\n print(\"GAIA seems to not have enough objects, will enhance with PS1\")\n catalog_data2 = query.get_data(coord, radius, \"PS\")\n report[\"catalog\"] = \"PS\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n elif(catalog == \"PS\" and (catalog_data is None or catalog_data.shape[0] < 5)):\n if(not silent):\n print(\"We seem to be outside the PS footprint, enhance with GAIA data\")\n catalog_data2 = query.get_data(coord, radius, \"GAIA\")\n report[\"catalog\"] = \"GAIA\"\n catalog_data = pd.concat([catalog_data, catalog_data2])\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n if(not silent):\n print(\"Now we have a total of {} sources. Keep in mind that there might be duplicates now since we combined 2 catalogs\".format(catalog_data.shape[0]))\n\n max_sources = 400\n if(INCREASE_FOV_FLAG):\n max_sources= max_sources*2.25 #1.5 times the radius, so 2.25 the area\n if(catalog_data.shape[0]>max_sources):\n catalog_data = catalog_data.nsmallest(400, \"mag\")\n #remove duplicates in catalog?\n\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n #plotting what we have, I keep it in the detector field, world coordinates are more painfull to plot\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Input for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Input - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n\n plt.xlim(-200,image.shape[0]+200)\n plt.ylim(-200,image.shape[1]+200)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_before.pdf\")\n\n ###tranforming to match the sources\n if(not silent):\n print(\"---------------------------------\")\n print(\">Finding the transformation\")\n if(rotation_scaling):\n if(not silent):\n print(\"Finding scaling and rotation\")\n wcsprm = register.get_scaling_and_rotation(observation, catalog_data, wcsprm, scale_guessed=PIXSCALE_UNCLEAR, verbose=verbose)\n if(xy_transformation):\n if(not silent):\n print(\"Finding offset\")\n wcsprm,_,_ = register.offset_with_orientation(observation, catalog_data, wcsprm, fast=False , INCREASE_FOV_FLAG=INCREASE_FOV_FLAG, verbose= verbose, silent=silent)\n\n #correct subpixel error\n compare_threshold = 3\n if(high_res):\n compare_threshold = 100\n obs_x, obs_y, cat_x, cat_y, distances = register.find_matches(observation, catalog_data, wcsprm, threshold=compare_threshold)#3\n if (len(distances) == 0): #meaning the list is empty\n best_score = 0\n else:\n rms = np.sqrt(np.mean(np.square(distances)))\n best_score = len(obs_x)/(rms+10) #start with current best score\n fine_transformation_success = False\n if(fine_transformation):\n print(\"Finding scaling and rotation\")\n lis = [2,3,5,8,10,6,4, 20,2,1,0.5]\n if(high_res):\n lis = [200,300,100,150,80,40,70, 20, 100, 30,9,5]\n skip_rot_scale = True\n for i in lis:\n wcsprm_new, score = register.fine_transformation(observation, catalog_data, wcsprm, threshold=i, compare_threshold=compare_threshold, skip_rot_scale=skip_rot_scale)\n if(i == 20):\n #only allow rot and scaling for the last few tries\n skip_rot_scale = False\n if(score> best_score):\n wcsprm = wcsprm_new\n best_score = score\n fine_transformation_success = True\n if not fine_transformation_success:\n if(not silent):\n print(\"Fine transformation did not improve result so will be discarded.\")\n else:\n if(not silent):\n print(\"Fine transformation applied to improve result\")\n #register.calculate_rms(observation, catalog_data,wcs)\n\n #make wcsprim more physical by moving scaling to cdelt, out of the pc matrix\n wcs =WCS(wcsprm.to_header())\n if(verbose):\n print(wcs)\n from astropy.wcs import utils\n scales = utils.proj_plane_pixel_scales(wcs)\n #print(scales)\n cdelt = wcsprm.get_cdelt()\n #print(cdelt)\n scale_ratio = scales/cdelt\n #print(scale_ratio)\n pc = np.array(wcsprm.get_pc())\n pc[0,0] = pc[0,0]/scale_ratio[0]\n pc[1,0] = pc[1,0]/scale_ratio[1]\n pc[0,1] = pc[0,1]/scale_ratio[0]\n pc[1,1] = pc[1,1]/scale_ratio[1]\n wcsprm.pc = pc\n wcsprm.cdelt = scales\n\n #WCS difference before and after\n if(not silent):\n print(\"> Compared to the input the Wcs was changed by: \")\n scales_original = utils.proj_plane_pixel_scales(WCS(hdr))\n if(not silent):\n print(\"WCS got scaled by {} in x direction and {} in y direction\".format(scales[0]/scales_original[0], scales[1]/scales_original[1]))\n #sources:\n #https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices\n #https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249\n def unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / max(np.linalg.norm(vector), 1e-10)\n def matrix_angle( B, A ):\n \"\"\" comment cos between vectors or matrices \"\"\"\n Aflat = A.reshape(-1)\n Aflat = unit_vector(Aflat)\n Bflat = B.reshape(-1)\n Bflat = unit_vector(Bflat)\n #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))\n #print(matrix_angle(wcsprm.get_pc(), wcsprm_original.get_pc()) /2/np.pi*360)\n #bugfix: multiplying by cdelt otherwise the calculated angle is off by a tiny bit\n rotation_angle = matrix_angle(wcsprm.get_pc()@wcsprm.get_cdelt(), wcsprm_original.get_pc()@wcsprm_original.get_cdelt()) /2./np.pi*360.\n if((wcsprm.get_pc() @ wcsprm_original.get_pc() )[0,1] > 0):\n text = \"counterclockwise\"\n else:\n text = \"clockwise\"\n if(not silent):\n print(\"Rotation of WCS by an angle of {} deg \".format(rotation_angle)+text)\n old_central_pixel = wcsprm_original.s2p([wcsprm.crval], 0)[\"pixcrd\"][0]\n if(not silent):\n print(\"x offset: {} px, y offset: {} px \".format(wcsprm.crpix[0]- old_central_pixel[0], wcsprm.crpix[1]- old_central_pixel[1]))\n\n\n #check final figure\n if(images):\n fig = plt.figure()\n fig.canvas.manager.set_window_title('Result for {}'.format(fits_image_filename))\n plt.xlabel(\"pixel x direction\")\n plt.ylabel(\"pixel y direction\")\n plt.title(\"Result - red: catalog sources, blue: detected sources in img\")\n plt.imshow(image,cmap='Greys', origin='lower', norm=LogNorm())\n apertures.plot(color='blue', lw=1.5, alpha=0.5)\n #apertures_catalog = CircularAperture(wcs.wcs_world2pix(catalog_data[[\"ra\", \"dec\"]], 1), r=5.)\n apertures_catalog = CircularAperture(wcsprm.s2p(catalog_data[[\"ra\", \"dec\"]], 1)['pixcrd'], r=5.)\n\n apertures_catalog.plot(color='red', lw=1.5, alpha=0.5)\n if(save_images):\n name_parts = fits_image_filename.rsplit('.', 1)\n plt.savefig(name_parts[0]+\"_image_after.pdf\")\n if(not silent):\n print(\"--- Evaluate how good the transformation is ----\")\n dic_rms = register.calculate_rms(observation, catalog_data,wcsprm)\n #updating file\n converged = determine_if_fit_converged(dic_rms, catalog_data, observation, wcsprm, image.shape[0], image.shape[1], silent)\n report[\"converged\"] = converged\n report[\"matches\"] = dic_rms[\"matches\"]\n report[\"match_radius\"] = dic_rms[\"radius_px\"]\n if(converged or save_bad_result):\n write_wcs_to_hdr(fits_image_filename, wcsprm, report, hdul_idx=hdul_idx)\n if(filename_for_sources != None):\n wcs =WCS(wcsprm.to_header())\n observation_on_sky = wcs.wcs_pix2world(observation[[\"xcenter\",\"ycenter\"]], 1)\n #catalog_from_obs = np.zeros(observation_on_sky.shape[0], dtype={'names':('ra', 'dec', 'aperture_sum'),'formats':('f8', 'f8', 'f8')})\n catalog_from_obs = pd.DataFrame()\n catalog_from_obs[\"ra\"]= observation_on_sky[:,0]\n catalog_from_obs[\"dec\"]= observation_on_sky[:,1]\n catalog_from_obs[\"aperture_sum\"]= observation[\"aperture_sum\"]\n catalog_from_obs[\"mag\"]= -1.* observation[\"aperture_sum\"]#this is fine since we only use the mag to order the sources!\n catalog_from_obs.to_csv(filename_for_sources+\".csv\")\n if(images):\n plt.show()\n\n return converged, dic_rms #dictionary with short info about fit, \"matches\" gives a number of objects matched within certain radius",
"def update_results(self):\n clis = []\n aps = []\n\n with open(self.curr_csv) as fileo:\n file_ = fileo.readlines()\n file_enum = enumerate(file_)\n num = 0\n for num, line in file_enum:\n line = line.replace('\\0', '')\n if not line:\n continue\n if line.startswith('BSSID'):\n continue\n if line.startswith('Station'):\n num += 1\n break\n aps.append(line)\n for line in file_[num:]:\n clis.append(line)\n\n def clean_rows(reader):\n \"\"\"\n Airodump-ng's csv info comes a bit unclean.\n Strip each line of its extra blank spaces\n \"\"\"\n return [[a.strip() for a in row] for row in reader if row]\n self._aps = clean_rows(csv.reader(StringIO('\\n'.join(aps))))\n self._clients = clean_rows(csv.reader(StringIO('\\n'.join(clis))))",
"def viz(analogies):\n print(\"Index\".ljust(12) + \"Analogy\".center(45) + \"Gender score\".rjust(12))\n print(\"-\" * 69)\n print(\n \"\\n\".join(\n str(i).rjust(4) + a[0].rjust(29) + \" | \" + a[1].ljust(29) + (str(a[2]))[:4]\n for i, a in enumerate(analogies)\n )\n )",
"def ConvertAnat(self):\n if self.verbose:\n print 'Convert T1 and T2 images...'\n for entry in self.info:\n info = self.info[entry]\n if self.info[entry]['imgfile'] is None:\n continue\n if self.info[entry]['type'] in self.anat_types:\n key = self.info[entry]['type']\n imgfile = self.info[entry]['imgfile']\n cmd = 'convert_file %s %s %s %s' % (self.flip_opts, entry, \\\n imgfile, self.info[entry]['filetype'])\n checkfile = '%s%s' % (imgfile, self.info[entry]['suffix'])\n self.CheckExec(cmd, [checkfile])\n if self.info[entry]['norm_src'] and self.skull_strip:\n cmd = \"3dSkullStrip -input %s -prefix %s\" % \\\n (checkfile, self.info[entry]['imgfile_skstrip'])\n checkfile = '%s+orig.BRIK' % \\\n (self.info[entry]['imgfile_skstrip'])\n self.CheckExec(cmd, [checkfile])",
"def results_to_file(samples, filename, during_analysis):\r\n\r\n results = []\r\n\r\n if hasattr(samples, \"log_evidence\"):\r\n if samples.log_evidence is not None:\r\n\r\n value = \"{:.8f}\".format(samples.log_evidence)\r\n results += [\r\n frm.add_whitespace(str0=\"Bayesian Evidence \", str1=value, whitespace=90)\r\n ]\r\n results += [\"\\n\"]\r\n\r\n value = \"{:.8f}\".format(max(samples.log_likelihoods))\r\n results += [\r\n frm.add_whitespace(str0=\"Maximum Likelihood \", str1=value, whitespace=90)\r\n ]\r\n results += [\"\\n\\n\"]\r\n\r\n results += [\"Maximum Log Likelihood Model:\\n\\n\"]\r\n\r\n formatter = frm.TextFormatter()\r\n\r\n for i, prior_path in enumerate(samples.model.unique_prior_paths):\r\n formatter.add(\r\n (prior_path, format_str().format(samples.max_log_likelihood_vector[i]))\r\n )\r\n results += [formatter.text + \"\\n\"]\r\n\r\n if hasattr(samples, \"pdf_converged\"):\r\n\r\n if samples.pdf_converged:\r\n\r\n results += samples_text.summary(samples=samples, sigma=3.0, indent=4, line_length=90)\r\n results += [\"\\n\"]\r\n results += samples_text.summary(samples=samples, sigma=1.0, indent=4, line_length=90)\r\n\r\n else:\r\n\r\n results += [\r\n \"\\n WARNING: The samples have not converged enough to compute a PDF and model errors. \\n \"\r\n \"The model below over estimates errors. \\n\\n\"\r\n ]\r\n results += samples_text.summary(samples=samples, sigma=1.0, indent=4, line_length=90)\r\n\r\n results += [\"\\n\\ninstances\\n\"]\r\n\r\n formatter = frm.TextFormatter()\r\n\r\n for t in samples.model.path_float_tuples:\r\n formatter.add(t)\r\n\r\n results += [\"\\n\" + formatter.text]\r\n\r\n frm.output_list_of_strings_to_file(file=filename, list_of_strings=results)",
"def process_radia_vcf(job, radia_vcf, work_dir, univ_options):\n radia_vcf = job.fileStore.readGlobalFile(radia_vcf)\n with open(radia_vcf, 'r') as infile, open(radia_vcf + 'radia_parsed.tmp', 'w') as outfile:\n # The columns in INFILE are\n # [0] CHROM\n # [1] POS\n # [2] ID\n # [3] REF\n # [4] ALT\n # [5] QUAL\n # [6] FILTER\n # [7] INFO\n # [8] FORMAT\n # [9] DNA_NORMAL\n # [10] DNA_TUMOR\n # [11] RNA_TUMOR - Not always present\n for line in infile:\n # Print header to outfile\n if line.startswith('#'):\n print(line.strip(), file=outfile)\n continue\n line = line.strip().split('\\t')\n # If the call was not PASSing, or if the call was germline: skip\n if line[6] != 'PASS' or 'MT=GERM' in line[7]:\n continue\n # If there is just 1 ALT allele, print and continue\n if len(line[4]) == 1:\n print('\\t'.join(line), file=outfile)\n # If not, process\n else:\n seq_field_indeces = [9, 10]\n alleles = [line[3]] + line[4].split(',') # all alleles, incl. REF\n # collect tumor, normal and (if present) rna AD and AFs\n # AD = Depth of reads supporting each allele\n # AF = Fraction of reads supporting each allele\n # normal_ad = line[9].split(':')[5].split(',')\n normal_af = line[9].split(':')[6].split(',')\n tumor_ad = line[10].split(':')[5].split(',')\n tumor_af = line[10].split(':')[6].split(',')\n if len(line[11]) > 1:\n rna_ad = line[11].split(':')[5].split(',')\n rna_af = line[11].split(':')[6].split(',')\n seq_field_indeces += [11] # append rna since it is present\n else:\n # If rna is missing, set RNA_AD and RNA_AF to null sets for easily\n # integrating into the logic in the following code\n rna_ad = rna_af = [0, 0, 0, 0]\n # Initialise variables to store the probable ALT alleles and the index values of\n # the same wrt AD and AF\n out_alleles = set([])\n out_af_ad_index = {0}\n # parse AD and AF to get most probable ALT alleles\n for i in range(1, len(normal_af)):\n # Criteria for selection = AD > 4 and AF >0.1 in either tumor or RNA, given\n # normal AF < 0.1\n if ((float(tumor_af[i]) >= 0.1 and int(tumor_ad[i]) >= 4) or\n (float(rna_af[i]) >= 0.1 and int(rna_ad[i]) >= 4)) and \\\n (float(normal_af[i]) < 0.1):\n out_alleles.add(alleles[i])\n out_af_ad_index.add(i)\n # If the number of probable alleles is greater than 0 the print to outfile with\n # the modified allele fraction representing reads corrresponding to all alleles\n if len(out_alleles) > 0:\n line[4] = ','.join(out_alleles) # set alt alleles\n # Modify the AD and AF values in the TUMOR/NORMAL/RNA fields\n # one at a time. Seq fields contain\n # [0] GT* - Genotype\n # [1] DP - Read depth at this position in the sample\n # [2] INDEL - Number of indels\n # [3] START - Number of reads starting at this position\n # [4] STOP - Number of reads stopping at this position\n # [5] AD* - Depth of reads supporting alleles\n # [6] AF* - Fraction of reads supporting alleles\n # [7] BQ* - Avg base quality for reads supporting alleles\n # [8] SB* - Strand Bias for reads supporting alleles\n # Fields marked with *s are teh ones that contain info for each seq field\n # and need to be modified\n for seq_field_index in seq_field_indeces:\n # Get the details for seq_field\n deets = line[seq_field_index].split(':')\n # modify fields 5 thu 8 to hold only info for the probable\n # alleles\n for field_index in range(5, 9):\n field = deets[field_index].split(\",\")\n deets[field_index] = \",\".join([x for i, x in enumerate(field)\n if i in out_af_ad_index])\n # Modify DP to hold the new total of reads\n deets[1] = str(sum([int(x) for x in deets[5].split(\",\")]))\n # get the most likely genotypes based on AD and AF\n gt_by_ad = set([i for i, x in enumerate(deets[5].split(\",\"))\n if int(x) >= 4])\n gt_by_af = set([i for i, x in enumerate(deets[6].split(\",\"))\n if float(x) >= 0.1])\n # Get the consensus genotype\n genotype = gt_by_ad.intersection(gt_by_af)\n if len(genotype) == 0:\n deets[0] = \"0/0\"\n elif len(genotype) == 1:\n deets[0] = \"/\".join([str(x) for x in genotype] +\n [str(x) for x in genotype])\n elif len(genotype) == 2:\n deets[0] = \"/\".join([str(x) for x in genotype])\n else:\n print(\"ERROR : triple genotype detected\", file=sys.stderr)\n print(line, file=sys.stdout)\n # Rejoin the details line\n line[seq_field_index] = \":\".join(deets)\n # Print the modified line to output\n print(\"\\t\".join(line), file=outfile)\n # Else do nothing\n else:\n pass\n return outfile.name",
"def _read_arasim_antenna_data(filename):\n data = {}\n freqs = set()\n thetas = set()\n phis = set()\n freq = 0\n with open(filename) as f:\n for line in f:\n words = line.split()\n if line.startswith('freq'):\n freq = 1\n if words[-1]==\"Hz\":\n pass\n elif words[-1]==\"kHz\":\n freq *= 1e3\n elif words[-1]==\"MHz\":\n freq *= 1e6\n elif words[-1]==\"GHz\":\n freq *= 1e9\n else:\n raise ValueError(\"Cannot parse line: '\"+line+\"'\")\n freq *= float(words[-2])\n freqs.add(freq)\n elif line.startswith('SWR'):\n swr = float(words[-1])\n elif len(words)==5 and words[0]!=\"Theta\":\n theta = int(words[0])\n thetas.add(theta)\n phi = int(words[1])\n phis.add(phi)\n db_gain = float(words[2])\n # AraSim actually only seems to use the sqrt of the gain\n # (must be gain in power, not voltage)\n # gain = np.sqrt(float(words[3]))\n gain = np.sqrt(10**(db_gain/10))\n phase = np.radians(float(words[4]))\n data[(freq, theta, phi)] = (gain, phase)\n\n # Convert data dictionary into 3-D array of responses\n response = np.empty((len(freqs), len(thetas), len(phis)),\n dtype=np.complex_)\n for i, freq in enumerate(sorted(freqs)):\n for j, theta in enumerate(sorted(thetas)):\n for k, phi in enumerate(sorted(phis)):\n gain, phase = data[(freq, theta, phi)]\n response[i, j, k] = gain * np.exp(1j*phase)\n\n response_data = (response, np.array(sorted(freqs)),\n np.array(sorted(thetas)), np.array(sorted(phis)))\n return _fix_response_wrapping(response_data)",
"def imdisplay(filename, representation):\n\n image = read_image(filename, representation)\n plt.imshow(image, cmap=\"gray\")\n plt.show()",
"def reformat():\n toolkit.reformat()",
"def reformat_file(inFile, outFile):\n \n with open(outFile, \"w\") as outHandle:\n \n\t\t# write header line\n\t\toutLine = [\"g1\", \"g2\", \"raw_count\", \"log(obs/exp)\"]\n\t\t\n\t\toutHandle.write(\"\\t\".join(outLine) + \"\\n\")\n\n\n\t\tfor i, line in enumerate(open(inFile)):\n\t\t\t\n\t\t\tif not i == 0:\n\t\t\t\t\n\t\t\t\tsp = line.strip().split(\"\\t\")\n\t\t\t\t\n\t\t\t\t# get row interaction counts and normalized obs/exp values\n\t\t\t\trawCount = sp[12]\n\t\t\t\tobsExp = sp[13]\n\t\t\t\t\n\t\t\t\tgenes1 = sp[4].split(\"|\")\n\t\t\t\tgenes2 = sp[10].split(\"|\")\n\t\t\t\t\n\t\t\t\t#~ print(g1, g2, rawCount)\n\t\t\t\t\n\t\t\t\t# iterate over all pairs\n\t\t\t\tfor g1 in genes1:\n\n\t\t\t\t\tfor g2 in genes2:\n\t\t\t\t\t\t\n\t\t\t\t\t\toutLine = [g1, g2, rawCount, obsExp]\n\t\t\t\t\t\t\n\t\t\t\t\t\toutHandle.write(\"\\t\".join(outLine) + \"\\n\")",
"def output_suspects(dir_results_path, suspects):\n with open(path.join(dir_results_path, 'suspects.txt'), 'w', encoding='utf-8', errors='replace') as suspects_file:\n sorted_file_names = sorted(list(suspects.keys()))\n for file_name in sorted_file_names:\n suspects_file.write('{}\\t{}\\n'.format(string_utils.filename, file_name))\n suspects_file.write('{}\\t{}\\n\\n'.format(string_utils.suspect_ind, suspects[file_name]))",
"def analog_mapping_response(self, data):\n self.analog_mapping_query_results = data",
"def multiple(folder_name: str,\r\n min_plant_pixels: int = MIN_PLANT_SIZE,\r\n output_options = [['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'distances'],\r\n \r\n ['rows',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers'],\r\n \r\n ['dirt',\r\n 'ditches',\r\n 'rows',\r\n 'clusters',\r\n 'centers',\r\n 'row_ids',\r\n 'numbers',\r\n 'lines']\r\n ]) -> None:\r\n\r\n # Go to the specified folder\r\n ls = listdir(folder_name)\r\n ls = [join(folder_name, i) for i in ls]\r\n\r\n # Check if the folder exists\r\n if join(folder_name, 'Analysis') in ls:\r\n\r\n # If it does, rename the old folder\r\n new_name = join(folder_name, 'Analysis')\r\n while new_name in ls:\r\n new_name += '_old'\r\n \r\n rename(join(folder_name,'Analysis'), new_name)\r\n\r\n # Create new folders inside the given directory\r\n mkdir(join(folder_name, 'Analysis'))\r\n mkdir(join(folder_name, 'Analysis/Images'))\r\n mkdir(join(folder_name, 'Analysis/Data'))\r\n \r\n # Gather the images to be analysed\r\n co = 0\r\n pics = [j for j in ls if isfile(j)]\r\n le = len(pics)\r\n\r\n # Analyze each of the pictures\r\n for i in pics:\r\n\r\n # Make the field\r\n field = just_field(i, min_plant_pixels)\r\n\r\n # Measure the field and save results\r\n print('Saving data...\\n')\r\n ruler = Ruler(field)\r\n \r\n ruler.output_distances(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Distances.csv'.format(basename(i).split('.')[0])\r\n ) \r\n )\r\n \r\n ruler.output_row_info(\r\n join(folder_name,\r\n 'Analysis/Data/{}_Rows.csv'.format(basename(i).split('.')[0])\r\n )\r\n )\r\n\r\n # Make and save visuals\r\n print('Saving pictures...\\n')\r\n for k in range(len(output_options)):\r\n output_options[k]\r\n img = field.make_visual(ruler, output_options[k])\r\n img.save(\r\n join(folder_name,\r\n 'Analysis/Images/{}_Visual_{}.png'.format(basename(i).split('.')[0], k + 1)))\r\n\r\n # Increment the progress meter\r\n co += 1\r\n print('Completed {}/{} images\\n\\n'.format(co, le))",
"def __printResults(files, expected, actual, similarity):\n if (showIndividualResults):\n for i in range(len(files)):\n print \"\\nExpected = %s\\nActual = %s \\nSimilarity = %f\" % (expected[i], actual[i], similarity[i])\n print \"\\nMean Similarity = %f\" % np.mean(similarity)",
"def output_results(self, filename):\n\n self.data.plot(title='Result of applying {} onto data set'.format(self.transformations[-1]))\n plt.savefig(\"results/{}.png\".format(filename))\n plt.close()",
"def read_aeronet_data_main(station_name, month, year, plot_results):\n # Load AERONET file of month-year\n station = gs.Station(station_name)\n\n monthdays = (date(year, month + 1, 1) - date(year, month, 1)).days\n start_day = datetime(year, month, 1, 0, 0)\n end_day = datetime(year, month, monthdays, 0, 0)\n wavelengths = [355, 532, 1064]\n\n base_name = f\"{start_day.strftime('%Y%m%d')}_{end_day.strftime('%Y%m%d')}_{station.aeronet_name}\"\n file_name = os.path.join(station.aeronet_folder, base_name, base_name + '.lev20')\n # TODO : add automatic download of `.lev20' file from AERONET in case a file is missing.\n aeronet_data = pd.read_csv(file_name, skiprows=6).dropna()\n\n # Parse data and rename columns for easier extrapolation of AOD values\n df_dt = pd.to_datetime(aeronet_data['Date(dd:mm:yyyy)'] + aeronet_data['Time(hh:mm:ss)'], format=\"%d:%m:%Y%H:%M:%S\")\n columns = ['AOD_1640nm', 'AOD_1020nm', 'AOD_675nm', 'AOD_500nm', 'AOD_380nm', 'AOD_340nm']\n df_AOD_ANGSTROM = aeronet_data[columns].copy(deep=True)\n df_AOD_ANGSTROM.index = df_dt\n for col in sorted(columns):\n col_new = int(col.split('_')[1].replace('nm', ''))\n df_AOD_ANGSTROM.rename(columns={col: col_new}, inplace=True)\n\n cols = df_AOD_ANGSTROM.columns.values.tolist()\n cols.extend(wavelengths)\n df_AOD_ANGSTROM = df_AOD_ANGSTROM.reindex(cols, axis='columns').sort_index(axis=1)\n\n # Calculate AOD for missing wavelengths as $355,532,1064$\n # by interpolation values from the nearest existing measured wavelengths.\n cols = df_AOD_ANGSTROM.columns.values.tolist()\n for wavelength in wavelengths:\n col_ind = df_AOD_ANGSTROM.columns.get_loc(wavelength)\n ratio = (cols[col_ind + 1] - cols[col_ind]) / (cols[col_ind + 1] - cols[col_ind - 1])\n df_AOD_ANGSTROM[wavelength] = df_AOD_ANGSTROM.iloc[:, col_ind - 1] * \\\n ratio + (1 - ratio) * \\\n df_AOD_ANGSTROM.iloc[:, col_ind + 1]\n\n # Create dataset of AOD per wavelength\n ds_chans = []\n for wavelength in wavelengths:\n aeronet_ds_chan = xr.Dataset(\n data_vars={'aod': ('Time', df_AOD_ANGSTROM[wavelength]),\n 'lambda_nm': ('Wavelength', [wavelength])\n },\n coords={'Time': df_AOD_ANGSTROM.index.tolist(),\n 'Wavelength': [wavelength]\n })\n ds_chans.append(aeronet_ds_chan)\n ds_aod = xr.concat(ds_chans, dim='Wavelength')\n\n ds_aod.aod.attrs['long_name'] = r'$\\tau$'\n ds_aod = ds_aod.aod.where(ds_aod >= 0, drop=True)\n ds_aod.attrs = {'info': 'Aerosol Optical Depth - generated from AERONET - level 2.0',\n 'location': station.name, 'source_file': file_name,\n 'start_time': start_day.strftime(\"%Y-%d-%m\"), 'end_time': end_day.strftime(\"%Y-%d-%m\")}\n\n # Calculate Angstrom Exponent\n couples = [(355, 532), (355, 1064), (532, 1064)]\n angstrom_daily = []\n for lambda_1, lambda_2 in couples:\n angstrom_couple = xr.apply_ufunc(lambda x, y: misc_lidar.angstrom(ds_aod.sel(Wavelength=x).aod,\n ds_aod.sel(Wavelength=y).aod, x, y), lambda_1, lambda_2,\n keep_attrs=True).rename('angstrom')\n angstrom_ds_chan = xr.Dataset(\n data_vars={'angstrom': ('Time', angstrom_couple.values),\n 'lambda_nm': ('Wavelengths', [f\"{lambda_1}-{lambda_2}\"])\n },\n coords={'Time': df_AOD_ANGSTROM.index.tolist(),\n 'Wavelengths': [f\"{lambda_1}-{lambda_2}\"]\n })\n\n angstrom_daily.append(angstrom_ds_chan)\n ds_ang = xr.concat(angstrom_daily, dim='Wavelengths')\n ds_ang.angstrom.attrs['long_name'] = r'$\\AA$'\n ds_ang.attrs = {'info': 'Angstrom Exponent - generated from AERONET AOD',\n 'location': station.name, 'source_file': file_name,\n 'start_time': start_day.strftime(\"%Y-%d-%m\"), 'end_time': end_day.strftime(\"%Y-%d-%m\")}\n\n # Show AOD and Angstrom Exponent for a period\n if plot_results:\n t_slice = slice(start_day, start_day + timedelta(days=30) - timedelta(seconds=30))\n\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 8))\n ax = axes.ravel()\n for wavelength in wavelengths:\n aod_mean = ds_aod.aod.sel(Wavelength=wavelength, Time=t_slice).mean().item()\n aod_std = ds_aod.aod.sel(Wavelength=wavelength, Time=t_slice).std().item()\n textstr = ' '.join((\n r'$\\mu=%.2f$, ' % (aod_mean,),\n r'$\\sigma=%.2f$' % (aod_std,)))\n ds_aod.aod.sel(Wavelength=wavelength, Time=t_slice).plot(label=fr\"{wavelength}, \" + textstr, ax=ax[0])\n ax[0].set_title(ds_aod.attrs['info'])\n ax[0].legend()\n ax[0].set_ylabel(r'$\\tau$')\n\n for lambda_1, lambda_2 in couples:\n angstrom_mean = ds_ang.angstrom.sel(Wavelengths=f\"{lambda_1}-{lambda_2}\", Time=t_slice).mean().item()\n angstrom_std = ds_ang.angstrom.sel(Wavelengths=f\"{lambda_1}-{lambda_2}\", Time=t_slice).std().item()\n textstr = ' '.join((\n r'$\\mu=%.2f$, ' % (angstrom_mean,),\n r'$\\sigma=%.2f$' % (angstrom_std,)))\n ds_ang.angstrom.sel(Wavelengths=f\"{lambda_1}-{lambda_2}\", Time=t_slice).plot(x='Time',\n label=fr\"$ \\AA \\, {lambda_1},{lambda_2}$, \" + textstr\n , ax=ax[1])\n ax[1].legend()\n ax[1].set_title('Angstrom Exponent')\n plt.tight_layout()\n plt.show()\n\n # Angstrom Exponent distribution of a month\n couple_0 = f\"{355}-{532}\"\n couple_1 = f\"{532}-{1064}\"\n\n x = ds_ang.angstrom.sel(Time=t_slice, Wavelengths=couple_0).values\n y = ds_ang.angstrom.sel(Time=t_slice, Wavelengths=couple_1).values\n\n fig, ax = plt.subplots(nrows=1, ncols=1)\n ax.scatter(x=x, y=y)\n ax.set_ylabel(couple_0)\n ax.set_xlabel(couple_1)\n ax.set_title(f\"Angstrom Exponent distribution {t_slice.start.strftime('%Y-%m')}\")\n plt.tight_layout()\n plt.show()\n\n # Save AOD and Angstrom Exponent datasets\n nc_base_name = f\"{start_day.strftime('%Y%m%d')}_{end_day.strftime('%Y%m%d')}_{station.name}\"\n\n xr_utils.save_dataset(ds_aod, folder_name=station.aeronet_folder, nc_name=nc_base_name+\"_aod.nc\")\n xr_utils.save_dataset(ds_ang, folder_name=station.aeronet_folder, nc_name=nc_base_name+\"_ang.nc\")"
] | [
"0.61483634",
"0.5722668",
"0.5629232",
"0.5580286",
"0.5352792",
"0.535211",
"0.53194237",
"0.5256824",
"0.52232164",
"0.5197312",
"0.51745605",
"0.5136301",
"0.51050967",
"0.5043372",
"0.50213665",
"0.50095344",
"0.4953617",
"0.49320453",
"0.49033397",
"0.48801324",
"0.48544413",
"0.4829855",
"0.48291317",
"0.4826379",
"0.48206928",
"0.4808265",
"0.47973037",
"0.47846088",
"0.47674194",
"0.47649845"
] | 0.62174374 | 0 |
Generate an analogs viewer HTML page based on a template. | def render_viewer(configfile, datafile):
try:
page = 'analogviewer.html'
with open(page, 'w') as fp:
fp.write(templating.render_template(
page,
configfile=configfile,
datafile=datafile,
# static_url=config.output_url() + '/static'))
static_url='../static'))
prepare_static_folder()
except Exception:
msg = "Failed to render analogviewer."
LOGGER.exception(msg)
raise Exception(msg)
else:
return page | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view_html_page():\n\n return render_template(\"moby.html\")",
"def main():\r\n return render_template(\"UI.html\")",
"def camera():\n return render_template('home/cam.html')",
"def preview():\n return render_template(\"controls/preview.html\")",
"def show(template_name):\n ShowCommandExecutor().show(template_name)",
"def study():\n return render_template('study.html')",
"def template(self):\n output=file(self.src, 'w').write\n output(\"\"\"%s\n<html>\n<head>\n<title>CHANGE ME</title>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=iso-8859-15\">\n<link rel=\"STYLESHEET\" href=\"%s\" type=\"text/css\">\n</head>\n<body>\n\n<!--it-->\n <p>\n Pagina non disponibile in questa lingua!\n <FORM><INPUT TYPE=\"button\" VALUE=\"Indietro\" onClick=\"history.go(-1);return true;\"> </FORM>\n </p>\n<!--/it-->\n\n<!--en-->\n <p>\n Page not available in this language!\n <FORM><INPUT TYPE=\"button\" VALUE=\"Back\" onClick=\"history.go(-1);return true;\"> </FORM>\n </p>\n<!--/en-->\n\n</body>\n</html>\n\"\"\" % (self.doctype, self.css))",
"def display():\n\n #still needs some cleanup on imagry and what the site is about. \n\n return render_template(\"index.html\")",
"def page_html(database, model):\n import json\n import pystache\n\n context = dict()\n context[\"formatted-model\"] = json.dumps(model, indent=2, sort_keys=True)\n context[\"_id\"] = model[\"_id\"]\n context[\"name\"] = model[\"name\"]\n context[\"full-project\"] = database.get(\"project\", model[\"project\"])\n return pystache.render(open(os.path.join(os.path.dirname(__file__), \"ui.html\"), \"r\").read(), context)",
"def get(self):\n path = os.path.join(os.path.dirname(__file__), '../pages/upload_information.html')\n self.response.out.write(template.render(path, {}))",
"def home():\r\n return render_template('video_test.html')",
"def provide_html_template():\n get_content = str(input(\"Paste the content you want to see displayed in the browser here. \\n\"))\n get_name = input(\"I am going to create an html file with your content. What do you want to call your file? \\n\")\n \n new_html_file = open(str(get_name) + '.html', 'w')\n \n page_content = '<html><head></head><body><p>' + get_content + '</p></body></html>'\n \n new_html_file.write(page_content)\n new_html_file.close()",
"def prototype_page1():\n return render_template('Prototype1.html')",
"def get(self, *args, **kwargs):\n self.render(\n os.path.join(self.application.template_home, \"nyi.html\"),\n **self.get_template_args()\n )",
"def make_html(depends=(files['image.gif'],),\n targets=(files['index.html'],)):\n\n index_html = open(files['index.html'].rel, 'w')\n index_html.write(pyyaks.context.render(html_template))\n index_html.close()",
"def prototype_page4():\n return render_template('Prototype4.html')",
"def main():\n return render_template(\"main.html\")",
"def main():\n return render_template(\"main.html\")",
"def home():\n return render_template(\"take_photo.html\")",
"def prototype_page3():\n return render_template('Prototype3.html')",
"def main_page():\n return render_template(\"index.html\")",
"def main_page():\n return render_template(\"main_page.html\")",
"def render(self, template: str, **vars) -> str:",
"def image_capture_demo():\n return render_template('image_capture_demo.html')",
"def html_template_file(self):\n pass",
"def get(self):\n path = os.path.join(os.path.dirname(__file__), 'poi_index.html')\n template_values = {}\n\n self.response.out.write(template.render(path, template_values))",
"def create_html(text, template, output):\n\n # TODO uncomment this for orginal DMP format (right now difficult with differing section sizes)\n #templateLoader = jinja2.FileSystemLoader(searchpath=\"../templates/new\")\n templateLoader = jinja2.FileSystemLoader(searchpath=\"../templates\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n TEMPLATE_FILE = \"template_\" + template.lower() + \".html\"\n real_template = templateEnv.get_template(TEMPLATE_FILE)\n\n outputText = real_template.render(contact=text)\n html_file = open(output + \".html\", \"w\")\n html_file.write(outputText)\n html_file.close()\n\n return output + \".html\"",
"def main():\n return render_template('index.html')",
"def main():\n return render_template('index.html')",
"def main():\n with open(\"page_data.yaml\", 'r') as inputstr:\n config_data = yaml.safe_load(inputstr)\n ointf = OutputInterface('template.txt')\n table_data = get_song_artist_matches()\n ofilen = config_data['directory'] + os.sep + 'common_songs.html'\n title = 'Song Titles and Band Name Overlap'\n header = ['No.', 'Artist', 'Peak', 'Date', 'Song/Artist', 'Peak',\n 'Date', 'Song']\n ointf.build_page(ofilen, title, header, fmt_table(table_data))\n ointf.inject(XTRAEDIT)\n ointf.output()"
] | [
"0.66812843",
"0.6424832",
"0.64130586",
"0.641176",
"0.63622534",
"0.6312638",
"0.62966967",
"0.6219156",
"0.61963755",
"0.61826026",
"0.61532706",
"0.6106545",
"0.61008984",
"0.6087467",
"0.6080106",
"0.605967",
"0.6037739",
"0.6037739",
"0.60084754",
"0.5999159",
"0.5960242",
"0.59577274",
"0.5951533",
"0.59101903",
"0.59076345",
"0.59008044",
"0.58905816",
"0.58801776",
"0.58801776",
"0.5875831"
] | 0.70714074 | 0 |
floor the pont to the nxt lower multiple of bucket_size | def bucketize(point, bucket_size):
return bucket_size * math.floor(point / bucket_size) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bucketize(point, bucket_size):\r\n return bucket_size * math.floor(point / bucket_size)",
"def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('bucket %d out of range' % bucket)\n if bucket == self.total_buckets - 1:\n return (self._lower_bounds[bucket], float('Inf'))\n return (self._lower_bounds[bucket], self._lower_bounds[bucket + 1])",
"def get_new_size(old_size, buckets):\n if buckets is None:\n return old_size\n else:\n w, h = old_size\n for (w_b, h_b) in buckets:\n if w_b >= w and h_b >= h:\n return w_b, h_b\n\n return old_size",
"def frequency_bucket_floor(bucket_index):\n\tfraction = bucket_index / FREQUENCY_BUCKETS\n\tlog_range = [math.log(edge, 2) for edge in HEARING_RANGE]\n\tlog_floor = log_range[0] + fraction * (log_range[1] - log_range[0])\n\treturn 2 ** log_floor",
"def _splitBucket(self, bucket):\n idx = self.buckets.index(bucket)\n self.buckets.pop(idx)\n middle = int(bucket.low + (bucket.high - bucket.low)/2)\n \n bucketLow = Bucket(bucket.low, middle, bucket.refreshed)\n bucketHigh = Bucket(middle+1, bucket.high, refreshed.refreshed)\n \n self.buckets.append(bucketLow)\n self.buckets.append(bucketHigh)\n \n for bucket in bucket.nodes:\n if bucketLow.inRange(bucket):\n bucketLow.addNode(bucket)\n else:\n bucketHigh.addNode(bucket)\n \n return (bucketLow, bucketHigh)",
"def FixedWidthBucketer(width, num_finite_buckets=100):\n return Bucketer(width=width, growth_factor=0.0,\n num_finite_buckets=num_finite_buckets)",
"def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries",
"def _bucket_boundaries(self, max_length, min_length=8, length_bucket_step=1.1):\n assert min_length <= max_length\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries",
"def estimate_bucket_pipeline(bucket_boundaries, num_samples, safe=True):\n if len(bucket_boundaries) < 2:\n raise ValueError('Bucket boundaries must contain at least 2 values')\n\n batch_step = 8\n\n batch_sizes = []\n for boundary in bucket_boundaries:\n batch_size = num_samples / (boundary - 1)\n batch_size = np.floor(batch_size / batch_step) if safe \\\n else np.round(batch_size / batch_step)\n batch_size *= batch_step\n\n if safe and batch_size < batch_step:\n if len(batch_sizes) < 2:\n raise ValueError('Too few samples per batch')\n\n return bucket_boundaries[:len(batch_sizes) - 1], batch_sizes, bucket_boundaries[len(batch_sizes) - 1]\n\n batch_sizes.append(max(batch_step, batch_size.astype(int)))\n\n return bucket_boundaries[:-1], batch_sizes, bucket_boundaries[-1]",
"def relative_position_bucket(relative_position,\n bidirectional: bool = True,\n num_buckets: int = 32,\n max_distance: int = 128):\n ret = 0\n relative_position = -relative_position\n if bidirectional:\n assert num_buckets % 2 == 0, 'When bidirectional is True, the number of buckets must be ' \\\n 'divisible by 2.'\n num_buckets //= 2\n ret = ret + (relative_position < 0).astype(np.int32) * num_buckets\n relative_position = np.abs(relative_position)\n else:\n # Clip all the negative values to 0\n relative_position = np.clip(relative_position, a_min=0, a_max=None)\n # Now, the relative_position is in the range [0, inf)\n\n # Half of the buckets deal with the exact increments,\n # i.e., 0, 1, 2, ..., max_exact - 1, where max_exact = num_buckets // 2\n max_exact = num_buckets // 2\n is_small = relative_position < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to\n # max_distance\n val_if_large = max_exact + (\n np.log(relative_position.astype(np.float32) / max_exact)\n / math.log(max_distance / max_exact) * (num_buckets - max_exact)).astype(np.int32)\n val_if_large = np.minimum(val_if_large, num_buckets - 1)\n ret = ret + np.where(is_small, relative_position, val_if_large)\n return ret",
"def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.assertEqual(100, fit)",
"def _prep_buckets(buckets, len_x):\n if isinstance(buckets, int):\n lims = np.linspace(0, len_x-1, buckets+1, dtype=int)\n else:\n lims = buckets\n buckets = len(lims)-1\n\n # Determine center of each bucket\n mids = np.rint(np.convolve(lims, np.ones(2), 'valid') / 2).astype(int)\n mids[0] = 0\n mids[-1] = len_x - 1\n\n return lims, mids",
"def smooth5(size: int) -> int:\n if size < 6:\n return size\n if not size % 2:\n return size\n\n new = np.inf\n power5 = 1\n while power5 < size:\n power35 = power5\n while power35 < size:\n power2 = 2 ** ((-int(-size // power35) - 1).bit_length())\n n = power2 * power35\n if n == size:\n return new\n elif n < new:\n new = n\n power35 *= 3\n if power35 == size:\n return new\n if power35 < new:\n new = power35\n power5 *= 5\n if power5 == size:\n return new\n if power5 < new:\n new = power5\n return new",
"def ceil_inplace(a):",
"def assign_bucket(self, level2: pd.DataFrame, l2size: int = 0, buckets: int = 20) -> pd.DataFrame:\n # Calc middle price between ask and bid\n level2 = level2.set_index(\"datetime\")\n askmin = level2[level2['ask_vol'].notna()].groupby('datetime')['price'].min().reset_index().set_index(\n \"datetime\")\n level2['price_min'] = askmin['price']\n bidmax = level2[level2['bid_vol'].notna()].groupby('datetime')['price'].max().reset_index().set_index(\n \"datetime\")\n level2['price_max'] = bidmax['price']\n level2['price_middle'] = (askmin['price'] + bidmax['price']) / 2\n\n # Assign a bucket number to each level2 item\n # scalar level2 size and bucket size\n if not l2size:\n l2size = level2.groupby('datetime')['price'].agg(np.ptp).reset_index()['price'].median()\n # 10 ask steps + 10 bid steps\n # buckets = 20\n bucketsize = l2size / buckets\n\n # If price is too out, set maximum possible bucket\n level2['bucket'] = (level2['price'] - level2['price_middle']) // bucketsize\n maxbucket = buckets // 2 - 1\n minbucket = -buckets // 2\n level2['bucket'] = level2['bucket'].clip(upper=maxbucket, lower=minbucket)\n return level2",
"def _wrap(self, point: float):\n\n if point == self.ub:\n return point\n width = self.ub - self.lb\n return ((point - self.lb) % width) + self.lb",
"def _wrap(self, point: float):\n\n if point == self.ub:\n return point\n width = self.ub - self.lb\n return ((point - self.lb) % width) + self.lb",
"def slice_sample_bounded_max(N, burn, logdist, xx, widths, step_out, max_attempts, bounds):\n xx = copy.deepcopy(xx)\n D = len(xx)\n samples = []\n if (not isinstance(widths, list)) or len(widths) == 1:\n widths = np.ones(D) * widths\n\n log_Px = logdist(xx)\n\n for ii in range(N + burn):\n log_uprime = np.log(random.random()) + log_Px\n for dd in random.sample(range(D), D):\n x_l = copy.deepcopy(xx)\n x_r = copy.deepcopy(xx)\n xprime = copy.deepcopy(xx)\n\n # Create a horizontal interval (x_l, x_r) enclosing xx\n rr = random.random()\n x_l[dd] = max(xx[dd] - rr*widths[dd], bounds[dd][0])\n x_r[dd] = min(xx[dd] + (1-rr)*widths[dd], bounds[dd][1])\n\n if step_out:\n while logdist(x_l) > log_uprime and x_l[dd] > bounds[dd][0]:\n\n x_l[dd] = max(x_l[dd] - widths[dd], bounds[dd][0])\n while logdist(x_r) > log_uprime and x_r[dd] < bounds[dd][1]:\n x_r[dd] = min(x_r[dd] + widths[dd], bounds[dd][1])\n\n # Propose xprimes and shrink interval until good one found\n zz = 0\n num_attempts = 0\n while True:\n zz += 1\n # print(x_l)\n xprime[dd] = random.random()*(x_r[dd] - x_l[dd]) + x_l[dd]\n \n log_Px = logdist(xx)\n if log_Px > log_uprime:\n xx[dd] = xprime[dd]\n break\n else:\n # Shrink in\n num_attempts += 1\n if num_attempts >= max_attempts:\n # print('Failed to find something')\n break\n elif xprime[dd] > xx[dd]:\n x_r[dd] = xprime[dd]\n elif xprime[dd] < xx[dd]:\n x_l[dd] = xprime[dd]\n else:\n raise Exception('Slice sampling failed to find an acceptable point')\n # Record samples\n if ii >= burn:\n samples.append(copy.deepcopy(xx))\n return samples",
"def calculate_large_constant(self, bound, real_reduction_iterations):#factor):\n minimum_exponent = round(90/(real_reduction_iterations-1))#math.ceil(math.log(bound, 10) * factor)\n \n return ZZ(10 ** minimum_exponent)",
"def compute_pool(in_size):\n return (in_size - 2) // 2 + 1",
"def all_bucket_boundaries(self):\n\n lower = self._lower_bounds[0]\n for i in xrange(1, self.total_buckets):\n upper = self._lower_bounds[i]\n yield (lower, upper)\n lower = upper\n\n yield (lower, float('Inf'))",
"def bucketing_fn(sequence_length, buckets):\n t = tf.clip_by_value(buckets, 0, sequence_length)\n return tf.argmax(t)",
"def buckets(self, disable_last_bucket_padding=False):\n if self.__total_count == 0:\n return\n\n # We use the minimum value for the lower bound of the first bucket.\n previous = self.__min\n for i in range(0, len(self.__counts)):\n if self.__counts[i] > 0:\n yield self.__counts[i], previous, self.__bucket_ranges[i]\n previous = self.__bucket_ranges[i]\n\n if self.__overflow == 0:\n return\n\n if not disable_last_bucket_padding:\n padding = 0.01\n else:\n padding = 0.0\n\n # We use the maximum value for the upper bound of the overflow range. Note, we added 0.01 to make sure the\n # boundary is exclusive to the values that fell in it.\n yield self.__overflow, self.__bucket_ranges[-1], self.__max + padding",
"def _BucketInterpolate(last_percentage, target, next_percentage, bucket_min,\n bucket_max):\n log_domain = False\n if bucket_min + 1.5 < bucket_max and bucket_min > 0:\n log_domain = True\n bucket_min = math.log(bucket_min)\n bucket_max = math.log(bucket_max)\n result = _LinearInterpolate(\n last_percentage, target, next_percentage, bucket_min, bucket_max)\n if log_domain:\n result = math.exp(result)\n return result",
"def split_kbucket(self):\n cur_range_size = self.range_max - self.range_min\n half_point = self.range_min + cur_range_size // 2\n\n # Ensure no empty range is created.\n assert self.range_min < half_point < self.range_max\n\n # Make the instantiation dependent on the actual class,\n # for easy inheritance.\n new_kbucket = self.__class__(half_point, self.range_max)\n\n # Halve the ID space of the split KBucket.\n self.range_max = half_point\n\n # Split the contact list into two, according to the new ranges.\n self._contacts, new_kbucket._contacts = util.partition(\n self._contacts,\n self.contact_in_range\n )\n\n return new_kbucket",
"def _compute_bn(self, lvl):\n bn = [0] # number of samples crossing the left/right boundary\n for n in range(lvl):\n # 1. down-sampling of N samples by the factor scl gives (N-1)//scl + 1 samples\n # 2. bn[-1]+M-1 is the number of samples acrossing the left/right boundary, with M being the number of freqeuncies\n # => hence after the downsampling the number of boundary crossing samples is:\n bn.append((bn[-1]+self.nfreq-2)//self.scaling+1)\n bn.append(bn[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return bn[1:][::-1]",
"def find_floor(n: int) -> int:\n block = find_block(n)\n n = n - 54 * (block - 1)\n return(n // 6 + min(1, n % 6))",
"def bucket_for_value(self, value):\n\n # bisect.bisect_left is wrong because the buckets are of [lower, upper) form\n return bisect.bisect(self._lower_bounds, value) - 1",
"def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))",
"def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))"
] | [
"0.7597311",
"0.6233041",
"0.62305874",
"0.6062927",
"0.5940828",
"0.5933147",
"0.5872735",
"0.57214737",
"0.57195526",
"0.5686446",
"0.5638457",
"0.5626216",
"0.56134063",
"0.5600956",
"0.5583571",
"0.55832744",
"0.55832744",
"0.55479383",
"0.5519782",
"0.55196553",
"0.54353935",
"0.54276943",
"0.5422362",
"0.5420862",
"0.5417241",
"0.5416008",
"0.54025435",
"0.53952956",
"0.5379143",
"0.5379143"
] | 0.75068164 | 1 |
buckets the ponts and counts how many in each bucket | def make_histogram(points, bucket_size):
return Counter(bucketize(point, bucket_size) for point in points) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_histogram(points, bucket_size):\r\n return Counter(bucketize(point, bucket_size) for point in points)",
"def htable(nbuckets):",
"def list_buckets():\n pass",
"def buckets(text, sigma):\n alpha = []\n bucket_sizes = array(\"L\", [0] * sigma)\n for c in text:\n bucket_sizes[c] += 1\n for i in range(sigma):\n if bucket_sizes[i] != 0:\n alpha.append(i)\n\n # print_buckets(bucket_sizes)\n return alpha, bucket_sizes",
"def test_bins(self):\n min_val = 0\n max_val = 1\n buckets = 10\n values_per_bucket = 10\n\n import numpy\n\n data = list(numpy.linspace(min_val, max_val, buckets * values_per_bucket))\n bins = numpy.linspace(min_val, max_val + sys.float_info.epsilon, buckets + 1)\n digitized = numpy.digitize(data, bins)\n counts = numpy.bincount(digitized)\n self.assertEqual(buckets + 1, len(counts))\n self.assertEqual(0, counts[0])\n for bucket in counts[1:]:\n self.assertEqual(values_per_bucket, bucket)",
"def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum",
"def pt_bayescount(Pr, Nt):\n # all credit goes to panzeri-treves\n\n # dimension of space\n dim = Pr.size\n\n # non zero probs only\n PrNZ = Pr[Pr>np.finfo(np.float).eps]\n Rnaive = PrNZ.size\n\n R = Rnaive\n if Rnaive < dim:\n Rexpected = Rnaive - ((1.0-PrNZ)**Nt).sum()\n deltaR_prev = dim\n deltaR = np.abs(Rnaive - Rexpected)\n xtr = 0.0\n while (deltaR < deltaR_prev) and ((Rnaive+xtr)<dim):\n xtr = xtr+1.0\n Rexpected = 0.0\n # occupied bins\n gamma = xtr*(1.0 - ((Nt/(Nt+Rnaive))**(1.0/Nt)))\n Pbayes = ((1.0-gamma) / (Nt+Rnaive)) * (PrNZ*Nt+1.0)\n Rexpected = (1.0 - (1.0-Pbayes)**Nt).sum()\n # non-occupied bins\n Pbayes = gamma / xtr\n Rexpected = Rexpected + xtr*(1.0 - (1.0 - Pbayes)**Nt)\n deltaR_prev = deltaR\n deltaR = np.abs(Rnaive - Rexpected)\n Rnaive = Rnaive + xtr - 1.0\n if deltaR < deltaR_prev:\n Rnaive += 1.0\n return Rnaive",
"def count():",
"def countTriplets1(arr, r):\n from collections import Counter\n arr_dict = Counter()\n ratio_range = []\n triplets = 0\n\n # Build the counter\n for x in arr:\n arr_dict[x] += 1\n\n # Build a list for easier iteration\n for key, value in arr_dict.items():\n ratio_range.append(tuple([key,value]))\n ratio_range.sort()\n \n for y in range(len(ratio_range)-2):\n firstvalue = ratio_range[y][1]\n secondvalue = ratio_range[y+1][1]\n thirdvalue = ratio_range[y+2][1]\n print(ratio_range, firstvalue, secondvalue,thirdvalue)\n\n summedvalue = (firstvalue + secondvalue + thirdvalue) - 3\n triplet_count = 2**summedvalue\n print(summedvalue, triplet_count)\n triplets += triplet_count\n\n return triplets, arr_dict, ratio_range",
"def length(self):\n # Loop through all buckets\n # Count number of key-value entries in each bucket\n\n # could be done with 1 line with comprehension\n # return sum(bucket.length() for bucket in self.buckets)\n\n total_entries = 0\n\n for linked_list in self.buckets:\n total_entries += linked_list.length()\n\n return total_entries",
"def build_histogram(iterator, key):\n buckets = defaultdict(int)\n values = {}\n\n num_objects = 0\n for obj in iterator:\n num_objects += 1\n\n try:\n val = obj[key]\n except (KeyError, TypeError):\n continue\n\n value_hash = hashlib.sha1()\n value_hash.update(syaml.dump_config(sort_yaml_obj(val)).encode())\n value_hash = value_hash.hexdigest()\n\n buckets[value_hash] += 1\n values[value_hash] = val\n\n return [\n (h, buckets[h], float(buckets[h]) / num_objects, values[h])\n for h in sorted(buckets.keys(), key=lambda k: -buckets[k])\n ]",
"def get_buckets(self, first, last, num_buckets, hertz_cutoff=float(5)):\n # Pensar en la posibilidad de no aplicar PCA, permitir utilizar fft sobre una feature diferente, por ejemplo raiz-cuadrada(x2 + y2 + z2)\n if self.pca == True:\n pca = PCA(n_components=1, copy=True, whiten=True)\n numpy_data = array(self.data)\n transformed_dataset = PCA.fit_transform(pca, numpy_data)\n slice=transformed_dataset[first:last]\n else:\n slice = self.data[first:last]\n slice = [column[0] for column in slice]\n \n transformed = fft.fft(slice)\n absolute = [abs(complex) for complex in transformed]\n\n frequencies = self.get_frequencies()\n\n buckets = [0 for i in range(num_buckets)]\n width = hertz_cutoff / num_buckets\n sum_of_buckets = 0.0000001\n for i in range(1, len(absolute)):\n index = int(frequencies[i] / width)\n if index >= num_buckets:\n break\n buckets[index] += absolute[i]\n sum_of_buckets += absolute[i]\n\n #if args.normalize == 't':\n # buckets = map(lambda x: x/sum_of_buckets, buckets)\n\n return buckets",
"def Hashtables__Triplets():\n # URL: https://www.hackerrank.com/challenges/count-triplets-1/problem\n ## Passes all tests\n # O(n) ish.\n # dae9ccff5aea4a8ca6e087a7c16bd70d Notability notes\n from collections import defaultdict\n from dataclasses import dataclass\n\n @dataclass\n class I:\n idx: int\n cnt: int\n\n\n def countTriplets(arr, r):\n d = defaultdict(list)\n prev_count = defaultdict(int) #\n triple_count = 0\n for i, v in enumerate(arr):\n prev = v / r # (!) Integer division can be wrong. 17 // 3 -> 5. This builds incorrect previous (5, 17)\n prev_prev = (prev / r, prev)\n\n if prev_prev in d:\n # cnt = sum([i.cnt for i in d[prev_prev]]) # Counting the whole chain can be O(n) ish. Tests 6,11 fail.\n cnt = prev_count[(prev / r, prev, \"sum\")] # Optimization, keep rolling sum. -> O(1)\n triple_count += cnt\n if prev in d:\n prev_c = len(d[prev]) # O(1)\n d[(prev, v)].append(I(i, prev_c))\n prev_count[(prev, v, \"sum\")] += prev_c # Keep rolling su.\n d[v].append(i)\n\n return triple_count\n\n _, r = [int(i) for i in input().split()]\n arr = [float(i) for i in input().split()]\n print(countTriplets(arr, r))\n\n #### wip entries\n # T (Submission 6) -> (integer devision issue.\n # 100000 3\n # 1 17 80 68 5 5 58 17 38 81 26 44 38 6 12 ...\n # expr: 2325652489\n # Act : 667065187 << wrong, under count.\n # ac2 : 19107507001 << wrong, over count. (integer devision issue.\n # ac3: 2325652489",
"def __init__(self):\n self.buckets = collections.defaultdict(list)",
"def new(num_buckets=256):\n aMap=[]",
"def buckets(self):\n return self.indexed",
"def group_count(counts, comp_ids):\n # binning\n for i in range(comp_ids.size):\n val = comp_ids[i]\n counts[val] += 1\n # inclusive scan\n total = 0\n for i in range(counts.size):\n ct = counts[i]\n counts[i] = ct + total\n total += ct",
"def __init__(self):\n self.num_counts = {}",
"def count_gates(qobj, basis, qubits):\n\n #TO DO\n pass",
"def length(self):\n # TODO: Count number of key-value entries in each of the buckets\n return self.size\n # for bucket in self.buckets():",
"def partitioner(mappings):\n\t\n\ttoken_counts = defaultdict(list)\n\t\n\tfor sublist in mappings:\n\t\tfor t, c in sublist:\n\t\t\ttoken_counts[t].append(c)\n\t\t\t\n\treturn token_counts",
"def distr(self,X):\r\n return {x:X.count(x) for x in set(X)}",
"def get_number_of_posts_per_bucket(dataset, min_time, max_time):\n\n buckets_rdd = dataset.map(lambda rec: (get_bucket(rec, min_time.timestamp(),\n max_time.timestamp()), 1)).\\\n reduceByKey(lambda c1, c2: c1 + c2)\n return buckets_rdd",
"def __init__(self):\n self.buckets = [-1] * 10\n self.length = len(self.buckets)",
"def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1",
"def pobj_counts(pcode_obj):\n pcode = (pcode_obj.asDict())['pcode'][0] # no multiple pcode blocks - no delimiter\n counts = {'galleries': 0, 'spreads': 0, 'layouts': 0, 'panelgroups': 0}\n # , 'panels': 0, 'skips': 0 }\n galleries = pcode.pop('gallery', '')\n counts['galleries'] = len(galleries)\n for gallery in galleries:\n spreads = gallery.pop('spread', '')\n counts['spreads'] += len(spreads)\n for spread in spreads:\n layouts = spread.pop('layout', '')\n counts['layouts'] += len(layouts)\n for layout in layouts:\n panelgroups = layout.pop('panelgroup', '')\n counts['panelgroups'] += len(panelgroups)\n return counts",
"def freq():",
"def __init__(self):\n self.buckets = 1009\n self.table = [{} for _ in range(self.buckets)]",
"def bucketize(point, bucket_size):\r\n return bucket_size * math.floor(point / bucket_size)",
"def totalhashes(self):\n return np.sum(self.counts)"
] | [
"0.6686371",
"0.6445363",
"0.6214255",
"0.60969096",
"0.605933",
"0.60316575",
"0.5992487",
"0.5950243",
"0.59492326",
"0.593202",
"0.59296554",
"0.5911292",
"0.58533347",
"0.58212197",
"0.5780779",
"0.5774585",
"0.5740624",
"0.570844",
"0.568908",
"0.5678229",
"0.5676203",
"0.56738245",
"0.5649308",
"0.5644287",
"0.56131226",
"0.5612169",
"0.5610705",
"0.56069195",
"0.5604273",
"0.5591653"
] | 0.6620214 | 1 |
returns the num_columns x num_colunmns matrix whos (i, j)th entry is the correlation between columns i and j of data | def correlation_matrix(data):
_, num_columns = shape(data)
def matrix_entry(i, j):
return correlation(get_column(data, i), get_column(data, j))
return make_matrix(num_columns, num_columns, matrix_entry) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def correlation_matrix(data):\r\n\r\n _, num_columns = shape(data)\r\n\r\n def matrix_entry(i, j):\r\n return correlation(get_column(data, i), get_column(data, j))\r\n\r\n return make_matrix(num_columns, num_columns, matrix_entry)",
"def correlation(data):\n return corrcoef(np.transpose(np.reshape(data, ((data.shape[0] * data.shape[1]), data.shape[2]))))",
"def correlation_matrix(self):\n correlation_matrix = self.model.covariance.copy()\n sigmaD = np.sqrt(np.diag(correlation_matrix))\n for ii in range(correlation_matrix.shape[0]):\n for jj in range(correlation_matrix.shape[1]):\n correlation_matrix[ii, jj] /= sigmaD[ii] * sigmaD[jj]\n return correlation_matrix",
"def _calc_correlation_matrix(data, kind='correlation'):\n correlation_measure = ConnectivityMeasure(kind=kind)\n correlation_matrix = correlation_measure.fit_transform([data])[0]\n\n return correlation_matrix",
"def correlate_columns(matrix):\n return np.dot(matrix.T, matrix) / (la.norm(matrix) ** 2)",
"def _listcorr(a):\n corrs = np.zeros((a[0].shape[1], len(a), len(a)))\n for i in range(len(a)):\n for j in range(len(a)):\n if j > i:\n corrs[:, i, j] = [np.nan_to_num(np.corrcoef(ai, aj)[0, 1])\n for (ai, aj) in zip(a[i].T, a[j].T)]\n return corrs",
"def compute_correlation_matrix_with_incomplete_data(df, correlation_type):\n X = copy.deepcopy(pd.DataFrame(df)) # make sure we are using a dataframe to do computations. \n assert correlation_type in ['spearman', 'pearson', 'covariance']\n X = X.astype(np.float64) # if we do not do this for some reason it ignores some columns in computing the correlation matrix. \n # which ends up being the wrong shape. \n if correlation_type == 'covariance':\n C = X.cov() * (len(df) - 1) / len(df) # need correction factor so it's consistent with ddof = 0. Makes little difference. \n else:\n C = X.corr(correlation_type)\n C = np.array(C)\n assert C.shape[0] == C.shape[1]\n assert C.shape[0] == len(df.columns)\n\n \n for i in range(len(C)):\n for j in range(len(C)):\n if np.isnan(C[i][j]):\n print(\"Warning: entry of covariance matrix is nan; setting to 0.\")\n C[i][j] = 0\n non_missing_data_counts = (~pd.isnull(X)).sum(axis = 0)\n return C, non_missing_data_counts",
"def FormCorrelationMatrix(mat):\n nVars = len(mat[0])\n N = len(mat)\n \n res = numpy.zeros((nVars,nVars),'d')\n for i in range(nVars):\n x = mat[:,i]\n sumX = sum(x)\n sumX2 = sum(x*x)\n for j in range(i,nVars):\n y = mat[:,j]\n sumY = sum(y)\n sumY2 = sum(y*y)\n numerator = N*sum(x*y) - sumX*sumY\n denom = numpy.sqrt((N*sumX2-sumX**2)*(N*sumY2-sumY**2))\n if denom != 0.0:\n res[i,j] = numerator/denom\n res[j,i] = numerator/denom\n else:\n res[i,j] = 0\n res[j,i] = 0\n return res",
"def test_correlation_matrix(self):\r\n a = [2, 4, 6, 8]\r\n b = [1.5, 1.4, 1.2, 1.1]\r\n c = [15, 10, 5, 20]\r\n m = correlation_matrix([a, b, c])\r\n self.assertFloatEqual(m[0, 0], [1.0])\r\n self.assertFloatEqual([m[1, 0], m[1, 1]], [correlation(b, a)[0], 1.0])\r\n self.assertFloatEqual(\r\n m[2], [correlation(c, a)[0], correlation(c, b)[0],\r\n 1.0])",
"def FormCorrelationMatrix(mat):\n nVars = len(mat[0])\n N = len(mat)\n\n res = numpy.zeros((nVars, nVars), 'd')\n for i in range(nVars):\n x = mat[:, i]\n sumX = sum(x)\n sumX2 = sum(x * x)\n for j in range(i, nVars):\n y = mat[:, j]\n sumY = sum(y)\n sumY2 = sum(y * y)\n numerator = N * sum(x * y) - sumX * sumY\n denom = numpy.sqrt((N * sumX2 - sumX**2) * (N * sumY2 - sumY**2))\n if denom != 0.0:\n res[i, j] = numerator / denom\n res[j, i] = numerator / denom\n else:\n res[i, j] = 0\n res[j, i] = 0\n return res",
"def correlation_d(mat):\n\n print(\"DO NOT USE. BROKEN?\")\n\n if mat.ndim != 2:\n raise ValueError(\"mat must be a 2d matrix\")\n if np.any(mat > 1) or np.any(mat < 0):\n raise ValueError(\"mat must be binary\")\n\n N = mat.size\n g = np.diagonal(mat)\n # g = np.tril(mat, -1) # g is the sum over the heavside used in Grassberger\n # g = g[g.nonzero()]\n g = g.sum()\n\n return (2.0 / N * (N - 1)) * g",
"def correlation_matrix(self, layout={}, **kwargs):\n df = self._data.corr()\n kwargs.update({\n 'zmin': -1, 'zmax': 1,\n 'colors': 'rdbu', 'ncolors': 9,\n 'xgap': 3, 'ygap': 3, 'dtick': 1,\n 'colorbar': {'x': 1 - 0.22},\n })\n\n layout = recursive_update(\n layout, updater={\n 'xaxis': {'showgrid': False, 'zeroline': False},\n 'yaxis': {'showgrid': False, 'zeroline': False},\n })\n\n # square for 1920x1080 screens in awating for better plotly option\n layout = recursive_update(\n layout, updater={\n 'yaxis': {'domain': [0, 1]},\n 'xaxis': {'domain': [0.28215, 1 - 0.28215]},\n })\n\n return df.iplot.heatmap(layout=layout, **kwargs)",
"def correlate_rows(matrix):\n return np.dot(matrix, matrix.T) / (la.norm(matrix) ** 2)",
"def correlation(C):\n\n if type(C) is not np.ndarray:\n raise TypeError('C must be a numpy.ndarray')\n if len(C.shape) < 2 or C.shape[0] is not C.shape[1]:\n raise ValueError('C must be a 2D square matrix')\n return C / np.sqrt(np.outer(np.diagonal(C), np.diagonal(C)))",
"def calc_ic(data):\n return scs.spearmanr(data[:, 0], data[:, 1]).correlation",
"def _compute_correlations(self, data):\n mappings = self.mappings_\n n_channels, n_times = data.shape\n\n # get the predictions\n y_pred = data.T.dot(mappings.T)\n y_pred = y_pred.reshape((n_times, len(self.picks),\n self.n_resample), order='F')\n # pool them using median\n # XXX: weird that original implementation sorts and takes middle value.\n # Isn't really the median if n_resample even\n y_pred = np.median(y_pred, axis=-1)\n # compute correlation\n num = np.sum(data.T * y_pred, axis=0)\n denom = (np.sqrt(np.sum(data.T ** 2, axis=0)) *\n np.sqrt(np.sum(y_pred ** 2, axis=0)))\n\n corr = num / denom\n return corr",
"def calculate_correlations(input_data, index_col, cat_features, exclu_elements): \r\n try:\r\n # encode the categorical features\r\n encoded_data = pd.get_dummies(input_data,columns=cat_features,drop_first=True)\r\n\r\n pd_transposed_data = encoded_data.set_index('Style_display_code').T\r\n\r\n # get the number of items\r\n items_list = [str(a) for a in pd_transposed_data.columns]\r\n\r\n print(\"Number of items to correlate :{}_Timestamp:{}\".format(str(len(items_list)), \r\n format(str(datetime.now()))))\r\n \r\n\r\n #compute correlations and save the pickle file\r\n# matrix = pd_transposed_data.corr().values\r\n# pickle.dump(matrix, open(staging_dir+ '/corr_matrix_output_py3.p', 'wb'))\r\n \r\n # read from the saved pickle file - ONLY FOR CONSECUTIVE RUNS, TO SAVE TIME\r\n matrix = pickle.load(open(staging_dir+ '/corr_matrix_output_py3.p', \"rb\" ) )\r\n\r\n print(\"Corr Matrix size:{}_Timestamp:{}\".format(str(matrix.size),\r\n format(str(datetime.now()))))\r\n\r\n except Exception as e:\r\n print(\" Error !!\", e)\r\n \r\n # return the top correlated items\r\n return top_correlateditems(items_list,matrix, index_col, exclu_elements)",
"def correlation(data, method, caption):\n columns = list(data)\n coefficients = data.astype(float).corr(method=method)\n results = []\n for i in range(len(columns)):\n for j in range(i + 1, len(columns)):\n coefficient = coefficients[columns[i]][columns[j]]\n results.append((\n abs(coefficient), coefficient,\n columns[i] + ' x ' + columns[j]))\n print('# ' + caption + ', ' + method)\n for result in reversed(sorted(results)):\n abs_coefficient, coefficient, columns_pair = result\n print (coefficient, columns_pair)",
"def _c_correlation(cls, X, y):\n su = np.zeros(X.shape[1])\n for i in np.arange(X.shape[1]):\n su[i] = cls._symmetrical_uncertainty(X[:, i], y)\n return su",
"def matthews_corr(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n elif b == n or c == n:\n # only one (non-diagonal) cell is non-zero\n return -0.5\n elif p1 == n or p2 == n or q1 == n or q2 == n:\n # one row or column is zero, another non-zero\n return 0.0\n\n return _div(self.covar(), sqrt(p1 * q1 * p2 * q2))",
"def calculate_correlation_matrix(X, Y=None):\n\tif Y is None:\n\t\tY = X\n\tn_samples = np.shape(X)[0]\n\tcovariance = (1 / n_samples) * (X - X.mean(0)).T.dot(Y - Y.mean(0))\n\tstd_dev_X = np.expand_dims(calculate_std_dev(X), 1)\n\tstd_dev_Y = np.exapnd_dims(calculate_std_dev(Y), 1)\n\tcorrelation_matrix = np.divide(covariance, std_dev_X.dot(std_dev_Y.T))\n\treturn np.array(correlation_matrix, dtype=float)",
"def Corr(x,y):\n \n cocoeff1 = np.empty((y.shape[1],y.shape[2]))\n cocoeff2 = np.empty((y.shape[1],y.shape[2]))\n for i in xrange(y.shape[1]):\n for j in xrange(y.shape[2]):\n cocoeff1[i,j],cocoeff2[i,j] = sts.pearsonr(x[:,i,j],y[:,i,j])\n \n print 'Completed: Correlation calculations!'\n \n return cocoeff1, cocoeff2",
"def correlation_matrix(series, as_rows=True):\r\n return corrcoef(series, rowvar=as_rows)\r\n # unused codes below\r\n if as_rows:\r\n return corrcoef(transpose(array(series)))\r\n else:\r\n return corrcoef(array(series))",
"def _mn_cor_ ( self , size = -1 , root = False ) :\n #\n cov = self.cov ( size , root )\n #\n from math import sqrt\n #\n if isinstance ( cov , ROOT.TMatrix ) :\n\n size = cov.GetNrows()\n root = True\n \n else : size = cov.kRows\n\n ## use ROOT matrices \n if root : cor = ROOT.TMatrix ( size , size )\n else : cor = cov.__class__ () \n\n for i in range(0, size ) :\n \n d_i = cov ( i , i )\n cor [ i , i ] = 1 if 0 < d_i else 0\n \n for j in range ( i + 1 , size ) :\n \n d_j = cov ( j , j )\n \n if 0 != cov ( i , j ) and 0 < d_i and 0 < d_j :\n \n if root and _rv < 6 : cor [ i ] [ j ] = cov ( i , j ) / sqrt ( d_i * d_j )\n else : cor [ i , j ] = cov ( i , j ) / sqrt ( d_i * d_j )\n \n else :\n \n if root and _rv < 6 : cor [ i ] [ j ] = 0 \n else : cor [ i , j ] = 0\n\n return cor",
"def cov_to_corr(matrix):\n sqrtdiag = np.sqrt(np.diag(matrix))\n return matrix / np.outer(sqrtdiag, sqrtdiag)",
"def fast_corr(df, col_name):\n\n if not isinstance(df, pd.DataFrame):\n raise TypeError(\"The type of the input data must be dataframe.\")\n\n if not isinstance(col_name, list):\n raise TypeError(\"The col_name must be list.\")\n\n if all(isinstance(item, str) for item in col_name) is False and all(\n isinstance(item, int) for item in col_name) is False:\n raise ValueError(\n \"The col_name must be a list of strings or a list of integers.\")\n\n if len(col_name) < 2:\n raise ValueError(\n \"At least two columns must be selected for correlation analysis.\")\n\n if all(isinstance(item, str) for item in col_name) is True and all(\n elem in df.columns.to_list() for elem in col_name) is False:\n raise ValueError(\"The column names were not found.\")\n\n if all(isinstance(item, int) for item in col_name) is True and max(\n col_name) > (df.shape[1] - 1):\n raise ValueError(\"The column indexes were out of range.\")\n\n if all(isinstance(item, str) for item in col_name):\n data = df.loc[:, col_name]\n else:\n data = df.iloc[:, col_name]\n\n data2 = data._get_numeric_data()\n rm_n = data.shape[1] - data2.shape[1]\n print(\"Removed\", rm_n, \"non-numberical columns from your selected columns\")\n\n sns.set(style=\"white\")\n corr = data2.corr()\n mask = np.triu(np.ones_like(corr, dtype=np.bool))\n f, ax = plt.subplots(figsize=(9, 11))\n ax.set_title('Correlation Matrix', size=20)\n ax.tick_params(axis='x', labelsize=15)\n ax.tick_params(axis='y', labelsize=15)\n\n cmap = sns.diverging_palette(220, 20, as_cmap=True)\n p = sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n p.set_yticklabels(p.get_yticklabels(), rotation=360)\n return p",
"def einsum_correlation(X, Y_i, type=\"pearson\"):\n\n if type == \"pearson\":\n X -= X.mean(axis=1)[:, None]\n Y_i -= np.nanmean(Y_i)\n elif type == \"cosine\":\n X, Y_i = X, Y_i\n elif type == \"spearman\":\n # check this\n X = stats.rankdata(X, axis=1)\n Y_i = stats.rankdata(Y_i)\n elif type == \"kendalltau\":\n corr = np.array([stats.kendalltau(x, Y_i)[0] for x in X])\n return corr[None, :]\n\n X_norm, Y_norm = norm(X, axis=1), norm(Y_i)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n if Y_norm == 0:\n corr = np.zeros(X_norm.shape[0])\n else:\n corr = np.einsum(\"ij, j\", X, Y_i) / (X_norm * Y_norm)[None, :]\n\n return corr",
"def determine_correlation(var1,var2):\n v1 = np.array(var1)\n v2 = np.array(var2)\n mat = np.c_[(v1,v2)]# np.vstack((v1,v2)) #\n corr = np.corrcoef(mat.T)\n return corr[0][1]",
"def correlation(C):\n if not isinstance(C, np.ndarray):\n raise TypeError(\"C must be a numpy.ndarray\")\n shape = C.shape\n if (len(shape) != 2) or shape[0] != shape[1]:\n raise ValueError(\"C must be a 2D square matrix\")\n\n diagonal = np.diag(C)\n\n # standard deviation\n std = np.sqrt(np.expand_dims(diagonal, axis=0))\n\n correlation = C / np.matmul(std.T, std)\n\n return correlation",
"def calculate_correlation(self):\n self.network.index_nodes()\n self._calculate_dist()\n pearson_correlation, pearson_pvalue = scipy.stats.pearsonr(self.dist[:,0], self.dist[:,1])\n spearman_correlation, spearman_pvalue = scipy.stats.spearmanr(self.dist[:,0], self.dist[:,1])\n return pearson_correlation, pearson_pvalue, spearman_correlation, spearman_pvalue"
] | [
"0.764798",
"0.69120264",
"0.6902327",
"0.6836222",
"0.6754823",
"0.66619134",
"0.6467521",
"0.637715",
"0.6350548",
"0.6327055",
"0.6241184",
"0.623158",
"0.62231666",
"0.608737",
"0.5997184",
"0.59910566",
"0.5946013",
"0.5926691",
"0.5917131",
"0.5892433",
"0.58744544",
"0.58568764",
"0.5841674",
"0.58316565",
"0.5815126",
"0.5757207",
"0.5708692",
"0.57068074",
"0.57004964",
"0.56945634"
] | 0.77116764 | 0 |
given a list of parsers (some of which may be None) apply the appropriate one to each element of the input row | def parse_row(input_row, parsers):
return [parser(value) if parser is not None else value
for value, parser in zip(input_row, parsers)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_row(input_row, parsers):\n\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def parse_row(input_row, parsers):\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row,parsers)",
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row, parsers)",
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row, parsers)",
"def whole(parsers):\n if len(parsers) == 0:\n return finished >> (lambda x: [])\n if len(parsers) == 1:\n return parsers[0] + finished >> (lambda x: x[:-1])\n return reduce(add, parsers) + skip(finished)",
"def parse_order(line, *line_parsers):\r\n for parser in line_parsers:\r\n try:\r\n return parser.parse(line)\r\n except ValueError:\r\n continue",
"def merge_one_row(self, row, combine):\n for index in range(0, len(row)):\n columns_to_combine = self.column_index_to_columns[index]\n if columns_to_combine is not None:\n yield combine(row, columns_to_combine)",
"def _try_parse(self, *parse_funcs: ParseFunc) -> Optional[node.NodeType]:\n for parse_func in parse_funcs:\n try:\n with self.tokens:\n return parse_func()\n except ParserException:\n pass\n return None",
"def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert",
"def parser(sent_list): #input: list of sentences",
"def parse(rows):\n compiled_data = []\n for row in grouper(rows, 5):\n row1_fields = ('year', 'id', 'name', 'age', 'sex', 'city', 'st', 'co')\n row2_fields = ('pace', 'proj_time', 'offl_time', 'place')\n split_fields = ('5k', '10k', '15k', '20k', 'Half', '25k',\n '30k', '35k', '40k', 'Pre-Finish', 'Finish')\n \n row1 = field_labels(row1_fields, row[0])\n row2 = field_labels(row2_fields, row[4][0:4])\n\n times = row[2] + [row2['offl_time'], row2['offl_time']]\n dists = [5, 10, 15, 20, 21.097494, 25, 30, 35, 40, 42.194, 42.195]\n split_data = zip(split_fields, times, dists)\n fmt_splits = {'splits': split_format(split_data)}\n datum = map_union(row1, row2, fmt_splits)\n compiled_data.append(datum)\n\n return compiled_data",
"def safe_apply(row, fn):\n if row:\n return fn(row)\n else:\n return row",
"def apply_rules(self, token_parse_list):\r\n return token_parse_list",
"def merge_rows(self, rows):\n for row in rows:\n yield tuple(self.merge_one_row(row, combine_measurements))",
"def _list_parser(self, old_list):\n for i, item in enumerate(old_list):\n if isinstance(item, dict):\n old_list[i] = Yaco(item)\n elif isinstance(item, list):\n old_list[i] = self._list_parser(item)\n else:\n pass\n return old_list",
"def _parse_rows(rows, header='infer'):\n if not rows:\n raise ValueError('rows={0} is invalid'.format(rows))\n rows = copy.copy(rows)\n label = rows[0][0].replace(' ', '_').lower()\n\n if header == 'infer':\n if len(rows) >= 3:\n if _infer_dtype(rows[1][-1]) != _infer_dtype(rows[2][-1]):\n header = True\n else:\n header = False\n else:\n header = False\n if header is True:\n colnames = rows[1]\n data_idx = 2\n else:\n colnames = None\n data_idx = 1\n\n data_dtypes = [_infer_dtype(val) for val in rows[data_idx]]\n if any(dd == 'pct' for dd in data_dtypes):\n label += '_pct'\n\n parsed_rows = []\n for row in rows[data_idx:]:\n vals = [_convert_val(val, dtype) for val, dtype in zip(row, data_dtypes)]\n if colnames:\n parsed_rows.append({colname:val for colname, val in zip(colnames, vals)})\n else:\n parsed_rows.append(vals)\n\n return label, parsed_rows",
"def parse(\n colstr,\n *,\n hex6=True,\n hex3=True,\n rgbfunc_int=True,\n rgbfunc_float=True,\n rgbfunc_percent=True,\n name_css=True,\n name_crayola=True,\n name_xkcd=True,\n name_meodai_best=True,\n name_meodai=True,\n):\n funcs = []\n if hex6:\n funcs.append(parse_hex6)\n if hex3:\n funcs.append(parse_hex3)\n if rgbfunc_int:\n funcs.append(parse_rgbfunc_int)\n if rgbfunc_float:\n funcs.append(parse_rgbfunc_float)\n if rgbfunc_percent:\n funcs.append(parse_rgbfunc_percent)\n if name_css:\n funcs.append(parse_name_css)\n if name_crayola:\n funcs.append(parse_name_crayola)\n if name_xkcd:\n funcs.append(parse_name_xkcd)\n if name_meodai_best:\n funcs.append(parse_name_meodai_best)\n if name_meodai:\n funcs.append(parse_name_meodai)\n\n res = None\n for func in funcs:\n try:\n res = func(colstr)\n except ValueError:\n pass\n if res is None:\n raise ValueError(f\"Could not find a working parser for {colstr!r}.\")\n return res",
"def _postprocess(\n self,\n result: List[str],\n eojeols: List[str],\n poses: List[str],\n ):\n token_indices = []\n temp_group = []\n for i, res in enumerate(result):\n if (\"<\" in res) or (\">\" in res):\n continue\n if not temp_group:\n temp_group.append(i)\n else:\n if i == (temp_group[-1] + 1):\n temp_group.append(i)\n else:\n token_indices.append(temp_group)\n temp_group = [i]\n token_indices.append(temp_group)\n\n lucrative = 0\n for i, li_index in enumerate(token_indices):\n if poses:\n eojeol = eojeols[i].split(\"+\")\n pos = poses[i].split(\"+\")\n tagged = []\n for e, p in zip(eojeol, pos):\n tagged.append(f\"{e}/{p}\")\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [\"+\".join(tagged)]\n else:\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [eojeols[i]]\n lucrative += len(li_index) - 1\n\n return result",
"def process_fix_list(fix_list, fixes):\r\n for line in fix_list:\r\n yield process_fix_line(line, fixes)",
"def ParseMultiple(\r\n cls,\r\n statements: List[\"Statement.ItemType\"],\r\n normalized_iter: NormalizedIterator,\r\n observer: \"Statement.Observer\",\r\n ignore_whitespace=False,\r\n\r\n # True to ensure that results are sorted to find the best possible match\r\n # (regardless of statement order). False will return the first statement\r\n # matched.\r\n sort_results=True,\r\n\r\n # True to execute all statements within a single thread\r\n single_threaded=False,\r\n ) -> Optional[\"Statement.ParseResult\"]:\r\n\r\n original_statements = statements\r\n if isinstance(original_statements, Statement.NamedItem):\r\n statements = original_statements.Item\r\n\r\n use_futures = not single_threaded and len(statements) != 1\r\n\r\n # ----------------------------------------------------------------------\r\n def Impl(statement):\r\n parser = cls._Parser(\r\n statement,\r\n normalized_iter.Clone(),\r\n observer,\r\n ignore_whitespace=ignore_whitespace,\r\n single_threaded=single_threaded,\r\n )\r\n\r\n success = parser.ParseItem(statement)\r\n if success is None:\r\n return None\r\n\r\n return Statement.ParseResult(success, parser.results, parser.normalized_iter)\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n if use_futures:\r\n futures = observer.Enqueue(\r\n [\r\n lambda statement=statement: Impl(statement)\r\n for statement in statements\r\n ],\r\n )\r\n\r\n results = []\r\n\r\n for future in futures:\r\n result = future.result()\r\n if result is None:\r\n return None\r\n\r\n results.append(result)\r\n\r\n else:\r\n results = []\r\n\r\n for statement in statements:\r\n result = Impl(statement)\r\n if result is None:\r\n return None\r\n\r\n results.append(result)\r\n\r\n if sort_results:\r\n # Stable sort according to the criteria:\r\n # - Success\r\n # - Longest matched content\r\n\r\n sort_data = [\r\n (\r\n index,\r\n 1 if result.Success else 0,\r\n result.Iter.Offset,\r\n )\r\n for index, result in enumerate(results)\r\n ]\r\n\r\n sort_data.sort(\r\n key=lambda value: value[1:],\r\n reverse=True,\r\n )\r\n\r\n result = results[sort_data[0][0]]\r\n\r\n else:\r\n result = None\r\n\r\n for potential_result in results:\r\n if potential_result.Success:\r\n result = potential_result\r\n\r\n break\r\n\r\n if result is None:\r\n result = results[0]\r\n\r\n if result.Success:\r\n return Statement.ParseResult(\r\n True,\r\n [\r\n Statement.StatementParseResultItem(\r\n original_statements,\r\n result.Results,\r\n ),\r\n ],\r\n result.Iter,\r\n )\r\n\r\n return_results: Statement.ParseResultItemsType = []\r\n max_iter: Optional[NormalizedIterator] = None\r\n\r\n for result in results:\r\n return_results += result.Results\r\n\r\n if max_iter is None or result.Iter.Offset > max_iter.Offset:\r\n max_iter = result.Iter\r\n\r\n return Statement.ParseResult(\r\n False,\r\n [\r\n Statement.StatementParseResultItem(\r\n original_statements,\r\n return_results,\r\n ),\r\n ],\r\n cast(NormalizedIterator, max_iter),\r\n )",
"def zip_values(row, widths, types):\n expanded_row = []\n for i, cell in enumerate(row):\n if isinstance(cell, basestring):\n expanded_row.append(tuple(split_text(cell, widths[i])))\n else:\n expanded_row.append((cell, ))\n for row in zip_longest(*expanded_row, fillvalue=empty):\n line = []\n for c, w, t in zip(row, widths, types):\n line.append((c, w, t))\n yield line",
"def parse_lines(lines, packages):\n for line in lines:\n x = line.split(' ')\n cmd = x[0].upper()\n #LOG.debug(cmd)\n if 'LIST' in cmd:\n getattr(commands, cmd)(p)\n else:\n getattr(commands, cmd)(line, p)",
"def preprocess_raw(df):\n nlp = spacy.load(\"en_core_web_sm\")\n\n print(\"Enriching data from dataframe...\")\n\n parse_cols = [\"s1\", \"s2\"]\n s1_docs = []\n s2_docs = []\n for col in parse_cols:\n parse_fail = 0\n\n for doc in nlp.pipe(df[col].values, batch_size=50, n_threads=4):\n if doc.is_parsed:\n if col == \"s1\":\n s1_docs.append(doc)\n else:\n s2_docs.append(doc)\n else:\n # Ensure parse lists have the same number of entries as the original\n # Dataframe regardless of parse failure\n parse_fail += 1\n if col == \"s1\":\n s1_docs.append(None)\n else:\n s2_docs.append(None)\n\n print(f\"{col.upper()} parse failures: {parse_fail}\")\n\n print()\n\n return list(zip(s1_docs, s2_docs))",
"def tag_translator(self, tag_dict, columns_list):\n # translate tags column by column\n for column in columns_list:\n # first deal with special cases (group_list and tag_list)\n if column[0] == 'group_list':\n for i in range(1, len(column)):\n group_li = column[i].split(',')\n # translate and replace the tag code in group list directly\n for j in range(len(group_li)):\n group_li[j] = tag_dict[column[0]].get(group_li[j],\n 'null')\n # now transfer the translated list to a string and write\n # the string back to the column to replace the current code\n # group; first create a str to replace each group\n group_translated_str = ''\n for tag in group_li:\n group_translated_str += tag + ','\n # now write the string back to the list, remove trailing\n # comma\n column[i] = group_translated_str[:-1]\n elif column[0] == 'tag_list':\n for i in range(1, len(column)):\n if '=' in column[i]:\n tag_prob_sep_list = column[i].split('=')\n # so the first entry is a str of tags, the 2nd entry is\n # a str of probability in the tag_prob_sep_list\n tag_code_list = tag_prob_sep_list[0].split(',')\n for j in range(len(tag_code_list)):\n tag_code_list[j] = tag_dict[column[0]].get(\n tag_code_list[j], 'null')\n # now transfer the translated list to a string and write\n # back to the column\n tag_list_translated_str = ''\n for tag in tag_code_list:\n tag_list_translated_str += tag + ','\n tag_list_translated_str = tag_list_translated_str[\n :-1] + '=' + tag_prob_sep_list[1]\n column[i] = tag_list_translated_str\n else: # for some mistakes without equal sign or empty\n mistake_list = column[i].split(',')\n for k in range(len(mistake_list)):\n mistake_list[k] = tag_dict[column[0]].get(\n mistake_list[k], 'null')\n mistake_list_str = ''\n for tag in mistake_list:\n mistake_list_str += tag + ','\n\n column[i] = mistake_list_str[:-1]\n\n # case 1 to 1 if this column needs translation, replace the tag\n # code(key) with the value in the dictionary\n elif column[0] in tag_dict.keys():\n for i in range(1, len(column)):\n column[i] = tag_dict[column[0]].get(column[i], 'null')\n else: # case that the column does not need translation\n pass\n return",
"def parse( self, exprlist ):\n t = self.prop[\"DSEC\"]\n E = self.prop[\"ELE\"]\n V = self.prop[\"VOL\"]\n T = self.prop[\"TEMP\"]\n C = self.prop[\"CONC\"]\n S = self.prop[\"SPEC\"]\n pH = E\n row = []\n for i in exprlist:\n i = i.replace(\"A\", \"self.absorbance\")\n exec 'row.append(' + i + ')'\n return row",
"def format_row(row):\n assert isinstance(row,list)\n \n data_row=[0]*len(header) #Formatted data row to be output and appeneded to 'data'\n \n for i in [0,1,11,13,14,15,16,17,19,20,21,28,31,45,46,47,48]: data_row[i]=row[i] #emptry string will NOT return None\n for i in [2,3,12,18]: data_row[i]=type_cast(lambda x: int(float(x)),row[i])\n for i in [6,7,8,9,10,23,24,25,26,27,29,30]: data_row[i]=type_cast(float,row[i])\n for i in [4,5,22]: data_row[i]=type_cast(datetime.strptime,row[i],'%Y-%m-%d %H:%M:%S')\n for i in range(32,45):\n if row[i]=='False': data_row[i]=False #bool('False') returns True!\n elif row[i]=='True': data_row[i]=True\n else: data_row[i]=None\n return data_row",
"def cast(val):\n\n for func in [int, float, lambda x: x.strip(), lambda x: x]:\n try:\n return func(val)\n except ValueError:\n pass",
"def parse_rows(self, rows):\r\n rows = [\r\n (row_id, parse_date(created), student_module_id)\r\n for row_id, created, student_module_id in rows\r\n ]\r\n return rows",
"def parse_row(self, response, row):\n raise NotImplementedError"
] | [
"0.7760424",
"0.7708113",
"0.5941967",
"0.5765646",
"0.5765646",
"0.5420369",
"0.53176457",
"0.513226",
"0.50360847",
"0.49477687",
"0.49304157",
"0.48991036",
"0.4804586",
"0.4744639",
"0.46920457",
"0.46880016",
"0.46772683",
"0.46717724",
"0.46683633",
"0.46663684",
"0.4642575",
"0.46287268",
"0.46163613",
"0.46159002",
"0.4541884",
"0.4533785",
"0.45284688",
"0.45172507",
"0.4511699",
"0.4507096"
] | 0.7862312 | 0 |
wrap a reader to apply the parsers to each of its rows | def parse_rows_with(reader, parsers):
for row in reader:
yield parse_row(row, parsers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row,parsers)",
"def parse_row(input_row, parsers):\n\n return [parser(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def fit_reader(self, reader):\n return self.fit(line for (_, line) in reader.readsents(silent=False))",
"def parse_row(self, response, row):\n raise NotImplementedError",
"def transform(self,reader,transformers):\n def transform_reader():\n for (X,seq_lengths,y) in reader:\n for transformer in transformers:\n X = transformer(X,seq_lengths)\n # 当y 用于分类是, y 必须是int型\n #y = y.long()\n yield (X,y)\n return transform_reader",
"def __iter__(self):\n if not self.resolved:\n self._resolve_reader()\n\n if isinstance(self.resolved, WriterType):\n raise TypeError('Writer is not iterable')\n\n def gen():\n for row in self.resolved:\n if self.header and self.rowklass:\n row = self.rowklass(*row)\n yield row\n\n return gen()",
"def parse_row(input_row, parsers):\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def parse_row(input_row, parsers):\n\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def parse_rows(self, response):\n\n for row in csviter(\n response, self.delimiter, self.headers, quotechar=self.quotechar\n ):\n ret = iterate_spider_output(self.parse_row(response, row))\n for result_item in self.process_results(response, ret):\n yield result_item",
"def parseRows(self, infile, columns, mapping, types):\n parser = make_parser()\n parser.setContentHandler(ParseData(columns, mapping, types,\n self.ns_repdata, self.output.write,\n self.form_line)) #, self.debug\n parser.setFeature(handler.feature_namespaces, 1)\n parser.parse(infile)",
"def _load_reading_row(self):\n\n raise NotImplementedError()",
"def _resolve_reader(self):\n self.fh = self.path.fs.open(self.path, 'rU')\n self.resolved = csv.reader(self.fh, delimiter=self.delimiter)",
"def parse_row(self, row):\n \n self.metadata = row",
"def iter_rows_raw(self, *args):\n\n for row in super().iter_rows_raw(*args):\n row[0] = row[1] # sequential catalog index not right in this case; overwrite to match finder id\n yield row",
"def parse(filehandle):\n for row in csv.DictReader(filehandle):\n yield cccalc.types.Fill(row)",
"def get_iter(self, reader: DataReader):\n\n if reader is None:\n return None\n\n xs, ys = get_dataset(reader)\n\n return self.prepare_dataset(xs, ys)",
"def parse(self):\n if not self.is_parsed:\n assert self._is_valid is not None, 'You need to call is_valid before parsing.'\n assert self._is_valid, 'File structure doesn\\'t correspond provided serializer. Check file content.'\n\n for row in self._iterator:\n mapped_row = {\n serializer_field: row.value for serializer_field, row in zip(\n self.serializer.Meta.fields, row[:self.slice_index]\n )\n }\n\n # some editors saves more than 1 million empty rows. So we stop parsing when find first empty row\n if not any(mapped_row.values()):\n break\n\n row_serializer = self.serializer(data=mapped_row)\n if row_serializer.is_valid():\n self.rows_imported += 1\n # for speed improvement we collect all objects and use bulk create\n # row_serializer.save()\n self.objects.append(self.serializer.Meta.model(**row_serializer.validated_data))\n else:\n self.rows_skipped += 1\n\n self.serializer.Meta.model.objects.bulk_create(self.objects)",
"def iter_rows_raw(self, *args):\n with open(self.filename) as f:\n header = self._read_column_names(f)\n cnt = 0\n ids = [0]\n for a in args:\n try:\n ids.append(header.index(a))\n except ValueError:\n ids.append(None)\n for l in f:\n if not l.startswith(\"#\"):\n col_data = self._get_values_for_columns(ids, l)\n col_data.insert(0, cnt+self._finder_offset_start)\n yield col_data\n cnt += 1",
"def rowgen(searchcursor_rows):\n rows = searchcursor_rows\n row = rows.next() \n while row:\n yield row\n row = rows.next()",
"def map(query, fh, skip_header_row, default_obj={}):\n\n # First, try the JsonRecordReader; then attempt the csv record reader\n reader = MetaRecordReader(default_obj)\n\n # Hack: append an 'else []' to queries that lack an else clause\n if \" if \" in query and not \" else \" in query:\n query = query + \" else []\"\n\n compiled_query = compile(query, 'STRING', 'eval')\n\n it = iter(fh)\n if skip_header_row:\n next(it)\n\n for line in it:\n obj, env = reader.get_record(line)\n obj_out = eval(compiled_query, env)\n if isinstance(obj_out, list):\n # Lists are treated as flatmap\n yield from obj_out\n else:\n yield obj_out",
"def clean_rows(reader):\n return [[a.strip() for a in row] for row in reader if row]",
"def un(source, wrapper=list, error_bad_lines=True):\n if isinstance(source, six.string_types):\n source = six.StringIO(source)\n\n # Prepare source lines for reading\n rows = parse_lines(source, error_bad_lines)\n\n # Get columns\n if is_namedtuple(wrapper):\n columns = wrapper._fields\n wrapper = wrapper._make\n else:\n columns = next(rows, None)\n if columns is not None:\n i, columns = columns\n yield wrapper(columns)\n\n # Get values\n for i, values in rows:\n if check_line_consistency(columns, values, i, error_bad_lines):\n yield wrapper(values)",
"def datagetter(cls):\n with open('myfile', 'rt') as f:\n rows = [r for r in csv.reader(f)]\n dothing = lambda _: [i for i, v in enumerate(_)]\n rows = [dothing(_) for _ in rows]\n raise NotImplementedError('You need to implement this yourlself!')\n return rows",
"def parse_records(self, handle, do_features=...): # -> Generator[SeqRecord, None, None]:\n ...",
"def _read_rows(geo_id: int, geo_type: str, *names: str) -> geo.GeoRecord:\n cls = geo.GeoMeta.registry[geo_type]\n record = geo.GeoRecord(geo_id, cls.from_row_record(*names))\n return record",
"def __next__(self):\n validation_errors = []\n def callback(error):\n \"\"\"\n Collect validation errors\n \"\"\"\n validation_errors.append(error)\n self.schema.callback = callback\n\n \"\"\"\n Read rows until we find an error (unless we're printing all rows)\n \"\"\"\n row = next(self.source)\n while row:\n if self.show_all or not self.schema.validateRow(row):\n # append error data to row\n error_row = copy(row)\n messages = \"\\n\".join(map(lambda e: e.message, validation_errors))\n tags = \"\\n\".join(map(lambda e: e.rule.hxlTag if e.rule else '', validation_errors))\n rows = \"\\n\".join(map(lambda e: str(e.row.sourceRowNumber) if e.row else '', validation_errors))\n columns = \"\\n\".join(map(lambda e: str(e.column.sourceColumnNumber) if e.column else '', validation_errors))\n error_row.values = error_row.values + [messages, tags, rows, columns]\n return error_row\n else:\n row = next(self.source)",
"def process_rows_in(reader):\n project_rows, other_rows = [], []\n for row in reader:\n if account_type(row) == Account.PROJECT:\n project_rows.append(row)\n else:\n other_rows.append(row)\n\n issue_map = IssueCache()\n logger = logging.getLogger('peacecorps.sync_accounting')\n for row in other_rows + project_rows:\n row = trim_row(row, logger)\n account = Account.objects.filter(code=row['PROJ_NO']).first()\n if account:\n logger.info(\n 'Updating %s, new balance: %s / %s', row['PROJ_NO'],\n row['UNIDENT_BAL'], row['PROJ_REQ'])\n update_account(row, account)\n else:\n logger.info('Creating %s', row['PROJ_NO'])\n create_account(row, issue_map)",
"def open_reader(self, **kw):\n return self.table.open_reader(str(self), **kw)",
"def svevent_reader(in_file):\n with open(in_file) as in_handle:\n while 1:\n line = in_handle.next()\n if line.startswith(\">\"):\n break\n header = line[1:].rstrip().split(\"\\t\")\n reader = csv.reader(in_handle, dialect=\"excel-tab\")\n for parts in reader:\n out = {}\n for h, p in zip(header, parts):\n out[h] = p\n yield out",
"def _get_load_iterators(options):\n \n #declare delimiters and field/line iterators\n field_delimiter = ','\n line_delimiter = '\\n'\n escape_char = '\\\\'\n \n line_pattern = re.compile('(?s)^(.*?)' + re.escape(line_delimiter) + '(.*)$')\n field_pattern = re.compile('(?s)^(.*?)' + re.escape(field_delimiter) + '(.*)$')\n \n def _line_iter(f):\n buffer = ''\n while True:\n next = f.read(1024 * 4)\n if next == '':\n if buffer:\n yield buffer\n \n return\n \n buffer += next\n while buffer:\n m = re.match(line_pattern, buffer)\n if m:\n yield m.group(1)\n buffer = m.group(2)\n else:\n break\n\n unescape_map = dict()\n if options['escape_eol_chars']:\n unescape_map['n'] = '\\n'\n unescape_map['t'] = '\\t'\n unescape_map['r'] = '\\r'\n\n def _unescape(s):\n in_escape = False\n for c in s:\n if in_escape:\n yield unescape_map.get(c, c)\n in_escape = False\n elif c == escape_char:\n in_escape = True\n else:\n yield c\n \n def _field_iter(s):\n while s:\n m = re.match(field_pattern, s)\n if m:\n yield ''.join(m.group(1))\n s = m.group(2)\n else:\n yield ''.join(_unescape(s))\n return\n \n return _line_iter, _field_iter"
] | [
"0.8287405",
"0.6207315",
"0.6118633",
"0.59529907",
"0.5909166",
"0.5875364",
"0.5849649",
"0.584668",
"0.5829653",
"0.5819698",
"0.57757884",
"0.569907",
"0.5674658",
"0.5655691",
"0.56322485",
"0.562984",
"0.5621524",
"0.56190336",
"0.55955404",
"0.5495519",
"0.54885215",
"0.54721564",
"0.5461641",
"0.54244566",
"0.5418254",
"0.5398987",
"0.5395498",
"0.53899807",
"0.53897244",
"0.5365261"
] | 0.8208408 | 1 |
wraps f to return None if f raises an exception assumes f takes only one input | def try_or_none(f):
def f_or_none(x):
try: return f(x)
except: return None
return f_or_none | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def try_or_none(f):\n def f_or_none(x):\n try: return f(x)\n except: return None\n return f_or_none",
"def None_if_exception(f, val):\n def decorated_f(*args, **kwargs):\n try:\n x = f(*args, **kwargs)\n except Exception:\n return None\n else:\n return x\n return decorated_f",
"def safe_one_retval_wrapper(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n return None, f(*args, **kwargs)\n except Exception:\n return traceback.format_exc(), None\n\n return wrapper",
"def return_none_if_error(func):\n def wrapper(*args, **kwargs):\n try:\n value = func(*args, **kwargs)\n except Exception as e:\n value = None\n return value\n\n return wrapper",
"def try_or_none(arbitrary_function):\n\n def f_or_none(x):\n try:\n return arbitrary_function(x)\n except (ValueError, IndexError, KeyError, OSError):\n return None\n\n return f_or_none",
"def safe(f):\n def safe_f(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except:\n return float('inf') # this means infinity in python\n return safe_f",
"def safe(f):\n\n def safe_f(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except:\n return float('inf') # this means \"infinity\" in Python\n\n return safe_f",
"def optional_apply(f, value):\n if value is not None:\n return f(value)",
"def safe(f):\n def safe_f(*args, **kwargs):\n try:\n return f(*args, ** kwargs)\n except:\n return float('inf')\n return safe_f",
"def safe(f):\n\tdef safe_f(*args,**kwargs):\n\t\ttry:\n\t\t\treturn f(*args,**kwargs)\n\t\texcept:\n\t\t\treturn float('inf')\n\treturn safe_f",
"def optionalize(func: Callable, exc_callback: Callable = zero) -> Callable:\n @wraps(func)\n def optionally(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n exc_callback(e)\n return optionally",
"def arguments_not_none(func):\n def wrapper(*args, **kwargs):\n for arg in args:\n if arg is None:\n raise NullArgument()\n for arg, val in kwargs.iteritems():\n if val is None:\n raise NullArgument()\n try:\n return func(*args, **kwargs)\n except TypeError as ex:\n if 'takes exactly' in ex.args[0]:\n raise NullArgument('Wrong number of arguments provided: ' + str(ex.args[0]))\n else:\n raise\n\n return wrapper",
"def EOF_or_raise(f):\n try:\n f.next()\n except StopIteration:\n return\n else:\n raise Exception(str(f))",
"def maybe_ignore_exc(\n func: t.Callable[[S], T], exc: t.Type[BaseException], val: t.Optional[S]\n) -> t.Optional[T]:\n return maybe(partial(ignore_exc, func, exc), val)",
"def exceptNull(func): # -> (*args: Unknown, **kwargs: Unknown) -> Unknown:\n ...",
"def if_error(f, x, upon_error):\n try:\n return f(x)\n except:\n return upon_error",
"def maybe(f: Callable[P, B]) -> Callable[P, Maybe[B]]:\n @wraps(f)\n def dec(*args: P.args, **kwargs: P.kwargs) -> Maybe[B]:\n try:\n return Just(f(*args, **kwargs))\n except: # noqa\n return Nothing()\n\n return dec",
"def try_ex(func):\n\n try:\n return func()\n except KeyError:\n return None",
"def try_ex(func):\n\n try:\n return func()\n except KeyError:\n return None",
"def error_if_null_return(retval: Any, func: Callable, args: Tuple[Any]):\n if not retval:\n raise WinError()\n return retval",
"def ignore_exc(\n func: t.Callable[[S], T], exc: t.Type[BaseException], val: S\n) -> t.Optional[T]:\n try:\n return func(val)\n except exc:\n return None",
"def test_require_at_least_one_but_none_provided(self):\n from plone.api.exc import MissingParameterError\n _func = at_least_one_of('arg1', 'arg2')(undecorated_func)\n with self.assertRaises(MissingParameterError):\n _func()",
"def try_func(func):\n try:\n return func()\n except Exception as e:\n return e",
"def test_do_non_gf():\n f = lambda: None\n with raises(TypeError) as err_info:\n perf(do(f)())\n assert str(\n err_info.value\n ) == \"%r is not a generator function. It returned None.\" % (f,)",
"def lift(f, message):\n def lift_impl(x):\n if f(x):\n return return_(x)\n return Fail(message)\n\n return lift_impl",
"def function_with_wrong_return() -> None:\n return 42",
"def return_first(fn):\n def wrapped(*args, **kwargs):\n res = fn(*args, **kwargs)\n return res if _HVD.rank() == 0 else None\n return wrapped",
"def create_attempter(f):\n def attempt(fn, *args, **kwargs):\n if f.done():\n return\n\n try:\n fn(*args, **kwargs)\n except Exception as e:\n f.set_exception(e)\n\n return attempt",
"def exception(input_fn, *args, **kwargs):\n\n if hasattr(input_fn, \"exception\"):\n raise AttributeError(\"Cannot decorate input_fn because it already has and 'exception' attribute\")\n def new(*args, **kwargs):\n from sys import exc_info\n try :\n new.exception = None\n ret = input_fn(*args, **kwargs)\n except:\n new.exception = exc_info()\n raise\n return ret\n new.__dict__ = input_fn.__dict__\n new.exception = None\n return new",
"def wrap(config: Config, failures: List[Any], identifier: str, nullary_function: Callable, default: Any = None) -> Any:\n try:\n return nullary_function()\n except Exception:\n if config.stop_on_first_failure:\n raise\n failures.append((identifier, traceback.format_exc()))\n return default"
] | [
"0.7946012",
"0.77939826",
"0.73445934",
"0.72532094",
"0.7125534",
"0.66916907",
"0.6634186",
"0.66205657",
"0.6533325",
"0.65153587",
"0.64503604",
"0.64382523",
"0.63942015",
"0.6357486",
"0.63371694",
"0.63331324",
"0.61645675",
"0.6157735",
"0.6157735",
"0.60959184",
"0.6065215",
"0.5988695",
"0.5980415",
"0.59274256",
"0.5891608",
"0.5875871",
"0.5860637",
"0.58253676",
"0.5772984",
"0.575126"
] | 0.7956505 | 0 |
given a list of parsers (some of which may be None) apply the appropriate one to each element of the input row | def parse_row(input_row, parsers):
return [try_or_none(parser)(value) if parser is not None else value
for value, parser in zip(input_row, parsers)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_row(input_row, parsers):\n\n return [parser(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def parse_row(input_row, parsers):\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row,parsers)",
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row, parsers)",
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row, parsers)",
"def whole(parsers):\n if len(parsers) == 0:\n return finished >> (lambda x: [])\n if len(parsers) == 1:\n return parsers[0] + finished >> (lambda x: x[:-1])\n return reduce(add, parsers) + skip(finished)",
"def parse_order(line, *line_parsers):\r\n for parser in line_parsers:\r\n try:\r\n return parser.parse(line)\r\n except ValueError:\r\n continue",
"def merge_one_row(self, row, combine):\n for index in range(0, len(row)):\n columns_to_combine = self.column_index_to_columns[index]\n if columns_to_combine is not None:\n yield combine(row, columns_to_combine)",
"def _try_parse(self, *parse_funcs: ParseFunc) -> Optional[node.NodeType]:\n for parse_func in parse_funcs:\n try:\n with self.tokens:\n return parse_func()\n except ParserException:\n pass\n return None",
"def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert",
"def parser(sent_list): #input: list of sentences",
"def parse(rows):\n compiled_data = []\n for row in grouper(rows, 5):\n row1_fields = ('year', 'id', 'name', 'age', 'sex', 'city', 'st', 'co')\n row2_fields = ('pace', 'proj_time', 'offl_time', 'place')\n split_fields = ('5k', '10k', '15k', '20k', 'Half', '25k',\n '30k', '35k', '40k', 'Pre-Finish', 'Finish')\n \n row1 = field_labels(row1_fields, row[0])\n row2 = field_labels(row2_fields, row[4][0:4])\n\n times = row[2] + [row2['offl_time'], row2['offl_time']]\n dists = [5, 10, 15, 20, 21.097494, 25, 30, 35, 40, 42.194, 42.195]\n split_data = zip(split_fields, times, dists)\n fmt_splits = {'splits': split_format(split_data)}\n datum = map_union(row1, row2, fmt_splits)\n compiled_data.append(datum)\n\n return compiled_data",
"def safe_apply(row, fn):\n if row:\n return fn(row)\n else:\n return row",
"def apply_rules(self, token_parse_list):\r\n return token_parse_list",
"def merge_rows(self, rows):\n for row in rows:\n yield tuple(self.merge_one_row(row, combine_measurements))",
"def _list_parser(self, old_list):\n for i, item in enumerate(old_list):\n if isinstance(item, dict):\n old_list[i] = Yaco(item)\n elif isinstance(item, list):\n old_list[i] = self._list_parser(item)\n else:\n pass\n return old_list",
"def _parse_rows(rows, header='infer'):\n if not rows:\n raise ValueError('rows={0} is invalid'.format(rows))\n rows = copy.copy(rows)\n label = rows[0][0].replace(' ', '_').lower()\n\n if header == 'infer':\n if len(rows) >= 3:\n if _infer_dtype(rows[1][-1]) != _infer_dtype(rows[2][-1]):\n header = True\n else:\n header = False\n else:\n header = False\n if header is True:\n colnames = rows[1]\n data_idx = 2\n else:\n colnames = None\n data_idx = 1\n\n data_dtypes = [_infer_dtype(val) for val in rows[data_idx]]\n if any(dd == 'pct' for dd in data_dtypes):\n label += '_pct'\n\n parsed_rows = []\n for row in rows[data_idx:]:\n vals = [_convert_val(val, dtype) for val, dtype in zip(row, data_dtypes)]\n if colnames:\n parsed_rows.append({colname:val for colname, val in zip(colnames, vals)})\n else:\n parsed_rows.append(vals)\n\n return label, parsed_rows",
"def parse(\n colstr,\n *,\n hex6=True,\n hex3=True,\n rgbfunc_int=True,\n rgbfunc_float=True,\n rgbfunc_percent=True,\n name_css=True,\n name_crayola=True,\n name_xkcd=True,\n name_meodai_best=True,\n name_meodai=True,\n):\n funcs = []\n if hex6:\n funcs.append(parse_hex6)\n if hex3:\n funcs.append(parse_hex3)\n if rgbfunc_int:\n funcs.append(parse_rgbfunc_int)\n if rgbfunc_float:\n funcs.append(parse_rgbfunc_float)\n if rgbfunc_percent:\n funcs.append(parse_rgbfunc_percent)\n if name_css:\n funcs.append(parse_name_css)\n if name_crayola:\n funcs.append(parse_name_crayola)\n if name_xkcd:\n funcs.append(parse_name_xkcd)\n if name_meodai_best:\n funcs.append(parse_name_meodai_best)\n if name_meodai:\n funcs.append(parse_name_meodai)\n\n res = None\n for func in funcs:\n try:\n res = func(colstr)\n except ValueError:\n pass\n if res is None:\n raise ValueError(f\"Could not find a working parser for {colstr!r}.\")\n return res",
"def process_fix_list(fix_list, fixes):\r\n for line in fix_list:\r\n yield process_fix_line(line, fixes)",
"def _postprocess(\n self,\n result: List[str],\n eojeols: List[str],\n poses: List[str],\n ):\n token_indices = []\n temp_group = []\n for i, res in enumerate(result):\n if (\"<\" in res) or (\">\" in res):\n continue\n if not temp_group:\n temp_group.append(i)\n else:\n if i == (temp_group[-1] + 1):\n temp_group.append(i)\n else:\n token_indices.append(temp_group)\n temp_group = [i]\n token_indices.append(temp_group)\n\n lucrative = 0\n for i, li_index in enumerate(token_indices):\n if poses:\n eojeol = eojeols[i].split(\"+\")\n pos = poses[i].split(\"+\")\n tagged = []\n for e, p in zip(eojeol, pos):\n tagged.append(f\"{e}/{p}\")\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [\"+\".join(tagged)]\n else:\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [eojeols[i]]\n lucrative += len(li_index) - 1\n\n return result",
"def ParseMultiple(\r\n cls,\r\n statements: List[\"Statement.ItemType\"],\r\n normalized_iter: NormalizedIterator,\r\n observer: \"Statement.Observer\",\r\n ignore_whitespace=False,\r\n\r\n # True to ensure that results are sorted to find the best possible match\r\n # (regardless of statement order). False will return the first statement\r\n # matched.\r\n sort_results=True,\r\n\r\n # True to execute all statements within a single thread\r\n single_threaded=False,\r\n ) -> Optional[\"Statement.ParseResult\"]:\r\n\r\n original_statements = statements\r\n if isinstance(original_statements, Statement.NamedItem):\r\n statements = original_statements.Item\r\n\r\n use_futures = not single_threaded and len(statements) != 1\r\n\r\n # ----------------------------------------------------------------------\r\n def Impl(statement):\r\n parser = cls._Parser(\r\n statement,\r\n normalized_iter.Clone(),\r\n observer,\r\n ignore_whitespace=ignore_whitespace,\r\n single_threaded=single_threaded,\r\n )\r\n\r\n success = parser.ParseItem(statement)\r\n if success is None:\r\n return None\r\n\r\n return Statement.ParseResult(success, parser.results, parser.normalized_iter)\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n if use_futures:\r\n futures = observer.Enqueue(\r\n [\r\n lambda statement=statement: Impl(statement)\r\n for statement in statements\r\n ],\r\n )\r\n\r\n results = []\r\n\r\n for future in futures:\r\n result = future.result()\r\n if result is None:\r\n return None\r\n\r\n results.append(result)\r\n\r\n else:\r\n results = []\r\n\r\n for statement in statements:\r\n result = Impl(statement)\r\n if result is None:\r\n return None\r\n\r\n results.append(result)\r\n\r\n if sort_results:\r\n # Stable sort according to the criteria:\r\n # - Success\r\n # - Longest matched content\r\n\r\n sort_data = [\r\n (\r\n index,\r\n 1 if result.Success else 0,\r\n result.Iter.Offset,\r\n )\r\n for index, result in enumerate(results)\r\n ]\r\n\r\n sort_data.sort(\r\n key=lambda value: value[1:],\r\n reverse=True,\r\n )\r\n\r\n result = results[sort_data[0][0]]\r\n\r\n else:\r\n result = None\r\n\r\n for potential_result in results:\r\n if potential_result.Success:\r\n result = potential_result\r\n\r\n break\r\n\r\n if result is None:\r\n result = results[0]\r\n\r\n if result.Success:\r\n return Statement.ParseResult(\r\n True,\r\n [\r\n Statement.StatementParseResultItem(\r\n original_statements,\r\n result.Results,\r\n ),\r\n ],\r\n result.Iter,\r\n )\r\n\r\n return_results: Statement.ParseResultItemsType = []\r\n max_iter: Optional[NormalizedIterator] = None\r\n\r\n for result in results:\r\n return_results += result.Results\r\n\r\n if max_iter is None or result.Iter.Offset > max_iter.Offset:\r\n max_iter = result.Iter\r\n\r\n return Statement.ParseResult(\r\n False,\r\n [\r\n Statement.StatementParseResultItem(\r\n original_statements,\r\n return_results,\r\n ),\r\n ],\r\n cast(NormalizedIterator, max_iter),\r\n )",
"def zip_values(row, widths, types):\n expanded_row = []\n for i, cell in enumerate(row):\n if isinstance(cell, basestring):\n expanded_row.append(tuple(split_text(cell, widths[i])))\n else:\n expanded_row.append((cell, ))\n for row in zip_longest(*expanded_row, fillvalue=empty):\n line = []\n for c, w, t in zip(row, widths, types):\n line.append((c, w, t))\n yield line",
"def preprocess_raw(df):\n nlp = spacy.load(\"en_core_web_sm\")\n\n print(\"Enriching data from dataframe...\")\n\n parse_cols = [\"s1\", \"s2\"]\n s1_docs = []\n s2_docs = []\n for col in parse_cols:\n parse_fail = 0\n\n for doc in nlp.pipe(df[col].values, batch_size=50, n_threads=4):\n if doc.is_parsed:\n if col == \"s1\":\n s1_docs.append(doc)\n else:\n s2_docs.append(doc)\n else:\n # Ensure parse lists have the same number of entries as the original\n # Dataframe regardless of parse failure\n parse_fail += 1\n if col == \"s1\":\n s1_docs.append(None)\n else:\n s2_docs.append(None)\n\n print(f\"{col.upper()} parse failures: {parse_fail}\")\n\n print()\n\n return list(zip(s1_docs, s2_docs))",
"def parse_lines(lines, packages):\n for line in lines:\n x = line.split(' ')\n cmd = x[0].upper()\n #LOG.debug(cmd)\n if 'LIST' in cmd:\n getattr(commands, cmd)(p)\n else:\n getattr(commands, cmd)(line, p)",
"def tag_translator(self, tag_dict, columns_list):\n # translate tags column by column\n for column in columns_list:\n # first deal with special cases (group_list and tag_list)\n if column[0] == 'group_list':\n for i in range(1, len(column)):\n group_li = column[i].split(',')\n # translate and replace the tag code in group list directly\n for j in range(len(group_li)):\n group_li[j] = tag_dict[column[0]].get(group_li[j],\n 'null')\n # now transfer the translated list to a string and write\n # the string back to the column to replace the current code\n # group; first create a str to replace each group\n group_translated_str = ''\n for tag in group_li:\n group_translated_str += tag + ','\n # now write the string back to the list, remove trailing\n # comma\n column[i] = group_translated_str[:-1]\n elif column[0] == 'tag_list':\n for i in range(1, len(column)):\n if '=' in column[i]:\n tag_prob_sep_list = column[i].split('=')\n # so the first entry is a str of tags, the 2nd entry is\n # a str of probability in the tag_prob_sep_list\n tag_code_list = tag_prob_sep_list[0].split(',')\n for j in range(len(tag_code_list)):\n tag_code_list[j] = tag_dict[column[0]].get(\n tag_code_list[j], 'null')\n # now transfer the translated list to a string and write\n # back to the column\n tag_list_translated_str = ''\n for tag in tag_code_list:\n tag_list_translated_str += tag + ','\n tag_list_translated_str = tag_list_translated_str[\n :-1] + '=' + tag_prob_sep_list[1]\n column[i] = tag_list_translated_str\n else: # for some mistakes without equal sign or empty\n mistake_list = column[i].split(',')\n for k in range(len(mistake_list)):\n mistake_list[k] = tag_dict[column[0]].get(\n mistake_list[k], 'null')\n mistake_list_str = ''\n for tag in mistake_list:\n mistake_list_str += tag + ','\n\n column[i] = mistake_list_str[:-1]\n\n # case 1 to 1 if this column needs translation, replace the tag\n # code(key) with the value in the dictionary\n elif column[0] in tag_dict.keys():\n for i in range(1, len(column)):\n column[i] = tag_dict[column[0]].get(column[i], 'null')\n else: # case that the column does not need translation\n pass\n return",
"def parse( self, exprlist ):\n t = self.prop[\"DSEC\"]\n E = self.prop[\"ELE\"]\n V = self.prop[\"VOL\"]\n T = self.prop[\"TEMP\"]\n C = self.prop[\"CONC\"]\n S = self.prop[\"SPEC\"]\n pH = E\n row = []\n for i in exprlist:\n i = i.replace(\"A\", \"self.absorbance\")\n exec 'row.append(' + i + ')'\n return row",
"def format_row(row):\n assert isinstance(row,list)\n \n data_row=[0]*len(header) #Formatted data row to be output and appeneded to 'data'\n \n for i in [0,1,11,13,14,15,16,17,19,20,21,28,31,45,46,47,48]: data_row[i]=row[i] #emptry string will NOT return None\n for i in [2,3,12,18]: data_row[i]=type_cast(lambda x: int(float(x)),row[i])\n for i in [6,7,8,9,10,23,24,25,26,27,29,30]: data_row[i]=type_cast(float,row[i])\n for i in [4,5,22]: data_row[i]=type_cast(datetime.strptime,row[i],'%Y-%m-%d %H:%M:%S')\n for i in range(32,45):\n if row[i]=='False': data_row[i]=False #bool('False') returns True!\n elif row[i]=='True': data_row[i]=True\n else: data_row[i]=None\n return data_row",
"def cast(val):\n\n for func in [int, float, lambda x: x.strip(), lambda x: x]:\n try:\n return func(val)\n except ValueError:\n pass",
"def parse_rows(self, rows):\r\n rows = [\r\n (row_id, parse_date(created), student_module_id)\r\n for row_id, created, student_module_id in rows\r\n ]\r\n return rows",
"def prepareForInsertion(parsed, values):\r\n for element in parsed:\r\n result = []\r\n for arg in Statements.insertRecord.args:\r\n name = arg.name\r\n if arg in values:\r\n result.append(values[arg])\r\n else:\r\n value = element[name] if name in element else None\r\n #Cast the value to the appropriate type when necessary\r\n result.append(arg.cast(value) if arg.cast and value else \r\n value)\r\n yield result"
] | [
"0.7860895",
"0.770681",
"0.5940243",
"0.5763972",
"0.5763972",
"0.5419355",
"0.5315998",
"0.51319647",
"0.5035327",
"0.4946088",
"0.49279934",
"0.4897018",
"0.48079637",
"0.47428817",
"0.46922794",
"0.4687607",
"0.4675127",
"0.46703136",
"0.46676153",
"0.46672013",
"0.4641849",
"0.46291497",
"0.461525",
"0.46142948",
"0.45436153",
"0.45313206",
"0.45287976",
"0.45187458",
"0.45105705",
"0.45055163"
] | 0.77591646 | 1 |
try to parse value using the appropriate functin from parser dict | def try_parse_field(field_name, value, parser_dict):
parser = parser_dict.get(field_name) # None if no such entry
if parser is not None:
return try_or_none(parser)(value)
else:
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def try_parse_field(field_name, value, parser_dict):\n parser = parser_dict.get(field_name) # None if no such entry\n if parser is not None:\n return try_or_none(parser)(value)\n else:\n return value",
"def _parse(value, function, fmt):\n try:\n return function(value)\n except ValueError as e:\n raise_from(ValueError(fmt.format(e)), None)",
"def _parse_value(value):\n # Check if it is a boolean, int, or float value\n try:\n value = json.loads(value.lower())\n return value\n except ValueError:\n return value",
"def __call__(self, value):\n with tf.name_scope('parser'):\n data = decode(value)\n return self._parse_fn(data)",
"def parser(self, value: Optional[Callable[[Mapping], Mapping]]) -> None:\n self._parse = value",
"def parse(self, value):\n raise NotImplementedError(\"Please implement the Class\")",
"def parse_value(cls, value):\n return value",
"def parse_value(cls, value):\n return value",
"def parse_value(cls, value):\n raise NotImplementedError(\"subclass must implement parse_value()\")",
"def value(self) -> ParsedT:\n ...",
"def _parse_and_validate(self, val):\n if self._is_parameter_type:\n val = self._parse(val) if isinstance(val, str) else val\n self._validate_or_throw(val)\n return val",
"def parse_value(self, value):\n\t\t\n\t\tif goodies.is_float(value):\n\t\t\treturn float(value)\n\t\telif goodies.is_int(value):\n\t\t\treturn int(value)\n\t\telif goodies.is_bool(value):\n\t\t\treturn bool(value.capitalize())\n\t\telse:\n\t\t\treturn value",
"def maybe_parse(val, parse_func):\n if val is None:\n return []\n if isinstance(val, (bytes, str)):\n return parse_func(val)\n if isinstance(val, dict):\n return list(val.items())\n if isinstance(val, (list, tuple)):\n return list(val)\n return val",
"def val_parser(parser, inputstring):\n\n inputstring = inputstring.strip()\n\n if float(inputstring) == 9.9e37:\n output = float('inf')\n else:\n output = float(inputstring)\n if parser == int:\n output = parser(output)\n\n return output",
"def _parse_value(\n value_expr: str, target_expr: str, ref_parts: List[str],\n a_type: mapry.Type, registry_exprs: Mapping[mapry.Class, str],\n auto_id: mapry.py.generate.AutoID, py: mapry.Py) -> str:\n # pylint: disable=too-many-branches\n if isinstance(a_type, mapry.Boolean):\n body = _parse_boolean(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n auto_id=auto_id)\n\n elif isinstance(a_type, mapry.Integer):\n body = _parse_integer(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n auto_id=auto_id)\n\n elif isinstance(a_type, mapry.Float):\n body = _parse_float(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n auto_id=auto_id)\n\n elif isinstance(a_type, mapry.String):\n body = _parse_string(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n auto_id=auto_id)\n\n elif isinstance(a_type, mapry.Path):\n body = _parse_path(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n auto_id=auto_id,\n py=py)\n\n elif isinstance(a_type, mapry.Date):\n body = _parse_date(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n auto_id=auto_id)\n\n elif isinstance(a_type, mapry.Datetime):\n body = _parse_date_time(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n auto_id=auto_id)\n\n elif isinstance(a_type, mapry.Time):\n body = _parse_time(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n auto_id=auto_id)\n\n elif isinstance(a_type, mapry.TimeZone):\n body = _parse_time_zone(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n auto_id=auto_id,\n py=py)\n\n elif isinstance(a_type, mapry.Duration):\n body = _parse_duration(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n auto_id=auto_id)\n\n elif isinstance(a_type, mapry.Array):\n body = _parse_array(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n registry_exprs=registry_exprs,\n auto_id=auto_id,\n py=py)\n\n elif isinstance(a_type, mapry.Map):\n body = _parse_map(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n registry_exprs=registry_exprs,\n auto_id=auto_id,\n py=py)\n\n elif isinstance(a_type, mapry.Class):\n body = _parse_instance_reference(\n value_expr=value_expr,\n target_expr=target_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n registry_expr=registry_exprs[a_type],\n auto_id=auto_id)\n\n elif isinstance(a_type, mapry.Embed):\n body = _parse_embed(\n target_expr=target_expr,\n value_expr=value_expr,\n ref_parts=ref_parts,\n a_type=a_type,\n registry_exprs=registry_exprs,\n auto_id=auto_id,\n py=py)\n\n else:\n raise NotImplementedError(\n \"Unhandled parsing of type: {}\".format(a_type))\n\n return body",
"def _parse_value(self, write_token=True, override=None):\n v_str = self.prior_token\n\n # Construct the complex string\n if v_str == '(':\n v_re = self.token\n\n self._update_tokens(write_token)\n assert self.token == ','\n\n self._update_tokens(write_token)\n v_im = self.token\n\n self._update_tokens(write_token)\n assert self.token == ')'\n\n self._update_tokens(write_token, override)\n v_str = '({0}, {1})'.format(v_re, v_im)\n\n recast_funcs = [int, pyfloat, pycomplex, pybool, pystr]\n\n for f90type in recast_funcs:\n try:\n # Unclever hack.. integrate this better\n if f90type == pybool:\n value = pybool(v_str, self.strict_logical)\n else:\n value = f90type(v_str)\n return value\n except ValueError:\n continue",
"def _parse(self):\n pass",
"def extract_value(k, d, f=''):\n if k in d:\n if f != '':\n p = f(d[k])\n else:\n p = d[k]\n\n if type(p) == str:\n v = unicode_decode(p)\n else:\n v = p\n else:\n v = unicode_decode('')\n return v",
"def parseValue(self, value):\n if self.isNumericVector():\n return map(self._pythonType, value.split(','))\n if self.typ == 'boolean':\n return _parseBool(value)\n return self._pythonType(value)",
"def _parse(self, val):\n if self.type == \"integer\":\n return int(val)\n elif self.type == \"number\":\n return float(val)\n elif self.type == \"boolean\":\n lower_val = str(val).lower()\n if lower_val not in {\"true\", \"false\"}:\n msg = \"Boolean parameter '{}' only accept True/False, got {}.\"\n raise ValidationException(\n message=msg.format(self.name, val),\n no_personal_data_message=msg.format(\"[self.name]\", \"[val]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n )\n return True if lower_val == \"true\" else False\n return val",
"def parse_value(string: str) -> Union[str, dict, bool, int, float]:\n unesc_str = unescape(string)\n stripped = string.strip()\n if REGEX_RE.match(stripped):\n return {\"regex\": unesc_str.strip()[7:-2]}\n elif BOOL_RE.match(stripped):\n return stripped.lower() == \"true\"\n elif INT_RE.match(stripped):\n return int(stripped)\n elif FLOAT_RE.match(stripped):\n return float(stripped)\n else:\n return unesc_str[1:-1]",
"def foundValue(key, value):\n value=value.lower().strip()\n d={}\n try:\n\thandlerfn=globals()['convert%s' % key.title()]\n\tresults=handlerfn(value)\n\tif type(results)==type({}):\n\t d.update(results)\n\telif type(results)==type(''):\n\t d[key]=results\n\telif type(results)==type(None):\n\t pass\n\telse:\n\t fatal(\"handler %s returned %s %s\" % (handlerfn, type(results), results))\n except KeyError:\n \td[key]=value\n\n ans=[]\n for k,v in d.items():\n\t#ans.append((k,v.replace(' ','_')))\n\tans.append((k,v))\n return ans",
"def _decode_value(data):\n\n if type(data) is tuple:\n data = data[0]\n\n # Key does not exist\n if data == '0' or data == \"\":\n return None\n \n elif data[0] == _PREFIX:\n\n encoding = data[:2]\n value = data[2:]\n\n if encoding == _TYPE_DOUBLE or encoding == _TYPE_DOUBLE_C:\n return float(value)\n elif encoding == _TYPE_STRING or encoding == _TYPE_STRING_C:\n return value\n elif encoding == _TYPE_INT or encoding == _TYPE_INT_C:\n return int(value)\n elif encoding == _TYPE_BOOL or encoding == _TYPE_BOOL_C:\n return value == \"true\"\n else:\n return data\n\n elif data.startswith(\"<elsystem.collections.vector>\"):\n return _decode_vector(data)\n elif data.startswith(\"<elsystem.collections.dictionary>\"):\n return _decode_dictionary(data)\n else:\n return data",
"def parse_metric_value(self, value):\n if isinstance(value, str):\n if value == \"\":\n return None\n\n # yes|true|on\n if self.is_true.match(value):\n return 1\n # no|false|off\n if self.is_false.match(value):\n return 0\n if self.is_null.match(value):\n return -1\n\n # anything else, try to convert it to a float\n try:\n r = float(value)\n return r\n except:\n pass\n\n return None\n\n return value",
"def mock_parser_fcn(s):",
"def _str_to_val(self, value):\n kind, value = value.split(': ', 1)\n\n # Lists and dictionaries are special case\n if kind in ('L', 'D'):\n return eval(value)\n\n if kind in TYPE_MAPPING.keys():\n if kind == 'B':\n if value != 'True':\n return False\n\n value = TYPE_MAPPING[kind](value)\n\n return value\n else:\n raise ValueError(\"An Unknown type of setting was found!\")",
"def parse(t):\n return t",
"def parse_value(tokens: deque) -> JSON:\n tk = tokens[0]\n\n if tk == \"[\":\n return parse_list(tokens)\n elif tk.type == \"NUMBER\":\n tokens.popleft() # É necessário consumir o 1o token\n return float(tk)\n \n # Complete com as outras regras de objeto, STRING, BOOL e NULL\n # ...\n else:\n raise SyntaxError(\"token inesperada em lista: %r\" % tk)",
"def process_value(self, value: str) -> Any:\n\n if not value:\n if self.data_required:\n raise ValueError('A value is required')\n return None\n return self.data_type(value)",
"def parse(self):\r\n for key, value in KLVParser(self.value, self.key_length):\r\n try:\r\n self.items[key] = self.parsers[key](value)\r\n except Exception:\r\n None"
] | [
"0.6987377",
"0.68641037",
"0.6722652",
"0.66606456",
"0.6572043",
"0.6551176",
"0.6439942",
"0.6439942",
"0.6251729",
"0.6146448",
"0.60798484",
"0.597589",
"0.59163487",
"0.5910115",
"0.5864216",
"0.5855629",
"0.58271813",
"0.5824096",
"0.58008903",
"0.5800582",
"0.5771732",
"0.57661396",
"0.57554907",
"0.57387114",
"0.57383966",
"0.5709823",
"0.5694827",
"0.5685944",
"0.566281",
"0.5649722"
] | 0.6986815 | 1 |
returns a function that picks a field out of a dict | def picker(field_name):
return lambda row: row[field_name] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_field(key, obj):\n\n val = obj\n\n for subkey in key.split('.'):\n val = val[subkey]\n\n return val",
"def getfield(value, arg):\n #import pdb; pdb.set_trace()\n if hasattr(value, \"fields\"):\n fields = getattr(value, \"fields\")\n if str(arg) in fields:\n return str(fields[str(arg)])",
"def lookup(self, name):\n return self.fieldDict[name]",
"def get_field(self, field):\n return self._dict.get(field)",
"def callableize(f_or_d):\n return f_or_d.get if isinstance(f_or_d,dict) else f_or_d",
"def lookup(model, field_name):\n return getattr(model, field_name)",
"def map_value(field):\n\n if is_map(field):\n return field\n return None",
"def getfield(form, fieldname):\n try:\n return form[fieldname]\n except KeyError:\n return None",
"def get_field_by_key(field, key, val, session):\n sql = select([field]).where(key == val)\n value = session.execute(sql).scalar()\n return value",
"def field(self, field):\n return self.__getitem__(field)",
"def _to_known_field(cls, field_name: str, value) -> (Column, dict):\n field_names = field_name.split(\".\", maxsplit=1)\n if len(field_names) == 2:\n for field in cls.__fields__:\n if field.name == field_names[0] and field.field_type == dict:\n return field, {field_names[1]: value}\n return None, None",
"def get_field(self, field):\n idx = self._keys.index(field)\n return self._data[idx]",
"def bind_function(field):\n t = field['type']\n try:\n if t == 'int':\n return lambda x: random.randint(field['min'], field['max'])\n elif t == 'float':\n return lambda x: field['min'] + ((field['max'] - field['min']) * random.random())\n elif t == 'string':\n return lambda x: ''.join(random.choice(string.letters) for i in xrange(random.randint(1, 15))).lower()\n elif t == 'seededstring':\n return lambda x: field['seed'] + ''.join(random.choice(string.letters) for i in xrange(random.randint(1, 15))).lower()\n elif t == 'ipsum':\n return lambda x: ipsum(field['lines'])\n elif t == 'choice':\n return lambda x: random.choice(field['values'])\n elif t == 'bool':\n return lambda x: bool(random.randint(0, 1))\n elif t == 'name':\n return lambda x: random.choice(names)\n elif t == 'date':\n return lambda x: randdate(field)\n elif t == 'fixed':\n return lambda x: field['value']\n elif t == 'nest':\n return lambda x: nest(field)\n elif t == 'counter':\n return lambda x: x * field.get(\"multiplier\", 1) + field.get(\"offset\", 0)\n else:\n print 'Unknown field type,' + t + ' exiting'\n sys.exit(1)\n except KeyError, k:\n print k\n sys.exit(1)",
"def get_field(entry, field):\n\n if field.name in entry.field_dict:\n if field.choices:\n return getattr(entry.object, \"get_%s_display\" % field.name)()\n return entry.field_dict[field.name]\n else:\n return settings.TEMPLATE_STRING_IF_INVALID",
"def get_dict_field_no_case(data_dict, field):\n if type(data_dict) != dict:\n raise TypeError(\"Not a dictionary\")\n\n for key, value in data_dict.items():\n if str(key).lower() == field:\n return value\n\n return None",
"def try_parse_field(field_name, value, parser_dict):\n parser = parser_dict.get(field_name) # None if no such entry\n if parser is not None:\n return try_or_none(parser)(value)\n else:\n return value",
"def try_parse_field(field_name, value, parser_dict):\n parser = parser_dict.get(field_name) # None if no such entry\n if parser is not None:\n return try_or_none(parser)(value)\n else:\n return value",
"def _lookup_wrapper(d):\n def _inner(key):\n return d[key]\n return _inner",
"def __call__(self, field_name):\n return getattr(self, field_name)",
"def __getattr__(self, key):\n return Field(key, self._mappings[key])",
"def callFuncBasedOnDict(func, argdict, **kwargs):\n if argdict is None:\n argdict = {}\n seldict = selectArgsFromDict(func, argdict)\n if kwargs is not None:\n seldict.update(kwargs)\n return func(**seldict)",
"def use_cached_field(anki: AnkiDeck, mid: int, field: int) -> FieldFunction[str]:\n return lambda sort_field: lookup_field(anki, mid, field, sort_field)",
"def __call__(self, field_name):\n try:\n return getattr(self, field_name)\n except Exception:\n return self._encoded_fields[field_name]",
"def get_by_field(self, field, value):\n for item in self.items:\n if item.__dict__[field] == value:\n return item\n return None",
"def __getitem__(self, key):\n return self.get_field(key)",
"def extractField(field: str, event: LogEvent) -> Any:\n keyFlattener = KeyFlattener()\n\n [[literalText, fieldName, formatSpec, conversion]] = aFormatter.parse(\n \"{\" + field + \"}\"\n )\n\n assert fieldName is not None\n\n key = keyFlattener.flatKey(fieldName, formatSpec, conversion)\n\n if \"log_flattened\" not in event:\n flattenEvent(event)\n\n return event[\"log_flattened\"][key]",
"def get_field_from_dict(example_dict, field_name, height_m_agl=None):\n\n check_field_name(field_name)\n\n if field_name in ALL_SCALAR_PREDICTOR_NAMES:\n height_m_agl = None\n field_index = example_dict[SCALAR_PREDICTOR_NAMES_KEY].index(field_name)\n data_matrix = example_dict[SCALAR_PREDICTOR_VALS_KEY][..., field_index]\n elif field_name in ALL_SCALAR_TARGET_NAMES:\n height_m_agl = None\n field_index = example_dict[SCALAR_TARGET_NAMES_KEY].index(field_name)\n data_matrix = example_dict[SCALAR_TARGET_VALS_KEY][..., field_index]\n elif field_name in ALL_VECTOR_PREDICTOR_NAMES:\n field_index = example_dict[VECTOR_PREDICTOR_NAMES_KEY].index(field_name)\n data_matrix = example_dict[VECTOR_PREDICTOR_VALS_KEY][..., field_index]\n else:\n field_index = example_dict[VECTOR_TARGET_NAMES_KEY].index(field_name)\n data_matrix = example_dict[VECTOR_TARGET_VALS_KEY][..., field_index]\n\n if height_m_agl is None:\n return data_matrix\n\n height_index = match_heights(\n heights_m_agl=example_dict[HEIGHTS_KEY],\n desired_height_m_agl=height_m_agl\n )\n\n return data_matrix[..., height_index]",
"def get_property_func(key):\n def get_it(obj):\n try:\n return getattr(obj, key)\n except AttributeError:\n return obj.tags.get(key)\n return get_it",
"def get_random_value_from_dict(d: dict):\n return d[get_random_key_from_dict(d)]",
"def _get(obj, name):\n try:\n # try to get value using dict's __getitem__ descriptor first\n return dict.__getitem__(obj, name)\n except TypeError:\n # if it's a dict, then preserve the TypeError\n if isinstance(obj, dict):\n raise\n # otherwise try one last time, relying on __getitem__ if any\n return obj[name]"
] | [
"0.66878",
"0.66624385",
"0.6654425",
"0.65902525",
"0.6499913",
"0.6372742",
"0.6320475",
"0.6256116",
"0.620161",
"0.6171656",
"0.61647815",
"0.6113857",
"0.6098898",
"0.6067647",
"0.60438067",
"0.6030453",
"0.6029466",
"0.5986976",
"0.5981687",
"0.5969282",
"0.5962245",
"0.59517175",
"0.5947724",
"0.59153426",
"0.5895796",
"0.58679634",
"0.582714",
"0.57951146",
"0.57623404",
"0.57545096"
] | 0.6674588 | 1 |
Send an email with the given information using sendgrid API. ``sender`` and ``recipient`` must be ``Contact`` instances. | def send_email(sender, recipient, subject, html_content, txt_content, api_key):
post_headers = SENDGRID_POST_HEADERS.copy()
post_headers['Authorization'] = 'Bearer {0}'.format(api_key)
data = {
'content': [
{
'type': 'text/plain',
'value': txt_content,
},
{
'type': 'text/html',
'value': html_content,
}
],
'from': {
'email': sender.email,
'name': sender.name,
},
'personalizations': [
{
'to': [
{
'email': recipient.email,
'name': recipient.name,
}
],
}
],
'subject': subject,
}
response = requests.post(
'{api_url}/mail/send'.format(api_url=SENDGRID_API_URL),
headers=post_headers,
data=json.dumps(data, ensure_ascii=False).encode('utf-8')
)
response.raise_for_status() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_email(email_subject, recipient, message, config = None):\n try:\n config = current_app.config\n except:\n config = config\n\n sender = sendgrid.SendGridClient(config['SENDGRID_API_KEY'])\n\n email = sendgrid.Mail()\n\n email.set_subject(email_subject)\n email.add_to(recipient)\n email.set_from(config['FROM_EMAIL'])\n email.set_from_name(config['FROM_NAME'])\n email.set_replyto(config['FROM_NAME'])\n email.set_html(message)\n\n status, msg = sender.send(email)\n\n return status, msg",
"def send(self, from_email, to_list, cc_list, bcc_list, subject, text):\n\n to_address_list = []\n\n if len(to_list) > 0:\n for to_address in to_list:\n to_address_list.append(\n {\n \"email\": to_address,\n \"type\": \"to\"\n }\n )\n\n if len(cc_list) > 0:\n for cc_address in cc_list:\n to_address_list.append(\n {\n \"email\": cc_address,\n \"type\": \"cc\"\n }\n )\n\n if len(bcc_list) > 0:\n for bcc_address in bcc_list:\n to_address_list.append(\n {\n \"email\": bcc_address,\n \"type\": \"bcc\"\n }\n )\n\n sendgrid_data = {\n \"key\": sendgrid_key,\n \"message\": {\n \"text\": text,\n \"subject\": subject,\n \"from_email\": from_email,\n \"to\": to_address_list\n },\n \"async\": False,\n }\n\n response = requests.post(\n sendgrid_url,\n data=json.dumps(sendgrid_data)\n )\n\n if response.ok:\n status = 0\n else:\n status = 1\n\n message = str(response.content)\n\n return status, message",
"def send_mail(sender_domain, sender_name, sender_account, recipient, subject, text=None, html=None):\n msg_data = {\n 'from': '{} <{}@{}>'.format(sender_name, sender_account, sender_domain),\n 'to': [recipient],\n 'subject': subject\n }\n if text:\n msg_data['text'] = text\n elif html:\n msg_data['html'] = html\n else:\n raise ValueError('Expected html or text body')\n\n resp = requests.post(\n 'https://api.mailgun.net/v3/{}/messages'.format(sender_domain),\n auth=(\"api\", mailgun_key),\n data=msg_data\n )\n resp.raise_for_status()\n\n return 'mailgun: ' + resp.json()['id']",
"def send_api(recipient, subject, text):\n mailer = Mail(\n from_email=settings.MAIL_FROM_ADDRESS,\n to_emails=recipient,\n subject=subject,\n html_content=text)\n try:\n sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n response = sg.send(mailer)\n return response.status_code\n except Exception as e:\n print(e)",
"def send_email(to_email, to_name, from_email, subject, text_msg, html_msg):\n params = {'api_user': SENDGRID_API_USERNAME, 'api_key': SENDGRID_API_KEY, 'to': to_email, 'toname': to_name, 'subject': subject}\n params['from'] = from_email\n if text_msg != \"\":\n params['text'] = text_msg\n if html_msg != \"\":\n params['html'] = html_msg\n\n resp = requests.post(SENDGRID_WEB_API_SEND_EMAIL_URL, params)\n print(resp.text)\n json_resp = json.loads(resp.text)\n if 'message' in json_resp and json_resp['message'] == \"success\":\n return True\n else:\n return False",
"def send(self, address_to, message, emailSubject = \"Automated Email\", attachmentFilePath = None):\r\n\t\tmail = self._createEmail(address_to, message, emailSubject)\r\n\t\tif attachmentFilePath != None:\r\n\t\t\tmail.attachment = self._createAttachment(attachmentFilePath)\r\n\t\tsg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\r\n\t\tresponse = sg.send(mail)\r\n\t\tif response.status_code == 202:\r\n\t\t\tprint(\"Email sent\")\r\n\t\telse:\r\n\t\t\tprint(\"Email not sent. Please check error codes below - \")\r\n\t\t\tprint(response.status_code)\r\n\t\t\tprint(response.headers)",
"def send_message():\n # @todo validation & error handling.\n sg = SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))\n log(\"Message generated and sent at {}\".format(strftime('%x %H:%M:%S')))\n sg.client.mail.send.post(request_body=build_message())",
"def send_email(subject, sender, recipients, html_body):\n\n try:\n # Create a new SendGrid Mail object with the arguments given\n message = Mail(\n from_email=sender,\n to_emails=recipients,\n subject=subject,\n html_content=html_body)\n\n # We prepare a new Thread here to send the email in the background. This takes in the send_async_email\n # function as its target and runs the function with the parameters passed through args.\n Thread(target=send_async_email,\n args=(current_app._get_current_object(), message)).start()\n\n except Exception as e:\n print(e)\n # FIXME: should do some type of error handling here or allow error to bubble up",
"def send_email(self, to_address, subject, body, cc_recipients=[]):\n\n # Build and send message\n msg = Message(\n account=self.account,\n folder=self.account.sent,\n subject=subject,\n body= HTMLBody(body),\n to_recipients=[Mailbox(email_address=to_address)],\n cc_recipients=[(Mailbox(email_address=x)) for x in cc_recipients]\n )\n\n msg.send_and_save()\n print(\"Message to {} sent.\".format(to_address))",
"def send_email(recipient, subject, message):\n from_email = os.getenv(\"EMAIL_SENDER\")\n status = send_mail(subject, message, from_email, [recipient])\n return status",
"def send_email(from_email, to_emails, subject, text, smtp_domain):\n if smtp_domain not in config[\"GUN_MAIL\"] or not config[\"GUN_MAIL\"].get(\n smtp_domain\n ).get(\"smtp_password\"):\n raise NotFound(\n \"SMTP Domain '{}' does not exist in configuration for GUN_MAIL or \"\n \"smtp_password was not provided. \"\n \"Cannot send email.\".format(smtp_domain)\n )\n\n api_key = config[\"GUN_MAIL\"][smtp_domain].get(\"api_key\", \"\")\n email_url = config[\"GUN_MAIL\"][smtp_domain].get(\"api_url\", \"\") + \"/messages\"\n\n return requests.post(\n email_url,\n auth=(\"api\", api_key),\n data={\"from\": from_email, \"to\": to_emails, \"subject\": subject, \"text\": text},\n )",
"def _send_smtp(message, subject, to, to_name, sender, sender_name):\n host = app.config.get('MAIL_HOST')\n\n if not host:\n raise MailFailure('SMTP Server Not Configured')\n\n try:\n server = smtplib.SMTP(host)\n except (smtplib.SMTPConnectError, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error connecting to SMTP server.')\n\n msg = text.MIMEText(message)\n msg['Subject'] = subject\n msg['To'] = email.utils.formataddr((to_name, to))\n msg['From'] = email.utils.formataddr((sender_name, sender))\n\n try:\n if app.debug:\n server.set_debuglevel(True)\n server.sendmail(sender, [to], msg.as_string())\n except (smtplib.SMTPException, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error sending mail to SMTP server.')\n finally:\n try:\n server.quit()\n except smtplib.SMTPException:\n pass",
"def send_email(from_email, from_name, subject, body, to_address):\n\n # Initializing important data from environment\n mg_domain = os.environ.get('MAILGUN_DOMAIN_NAME', None)\n mg_key = os.environ.get('MAILGUN_API_KEY', None)\n\n # Preparing the data to be sent as email\n url = 'https://api.mailgun.net/v3/{}/messages'.format(mg_domain)\n auth = ('api', mg_key)\n data = {\n 'from': '{} <{}>'.format(from_name, from_email),\n 'to': to_address,\n 'subject': subject,\n 'text': body,\n }\n\n # Sending the email\n response = requests.post(url, auth=auth, data=data)\n return response.status_code",
"def send_email(self, email_from, email_to, message):\n logging.info(\"Attempting to send email from \" + email_from + \" to \" + email_to)\n self.conn.sendmail(email_from, email_to, message)\n logging.info(\"Email sent\")",
"def send_mail(from_email=None, to_email=None, subject=None, body=None):\n\n mandrill_cli = mandrill.Mandrill(MANDRILL_API_KEY)\n\n message = {\n 'from_email': from_email,\n 'headers': {'Reply-To': from_email},\n 'html': body,\n 'metadata': {'website': WEBSITE_URL},\n 'subject': subject,\n 'to': to_email\n }\n\n mandrill_cli.messages.send(message=message, async=True)",
"def send_email(self):\n message = MIMEText(self.email_body, 'plain', 'utf-8')\n\n message['Subject'] = self.email_subject\n message['From'] = gmail_user\n message['To'] = ', '.join(self.recipients)\n\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n\n server.login(gmail_user, gmail_password)\n\n server.sendmail(message['From'], self.recipients, message.as_string())\n\n server.close()\n\n print('Email sent!')\n except Exception as err:\n # TODO Write error to log file\n raise err",
"def send(self, recipient, template_path, context, subject, bcc_email=[]):\n\n body = self.email_render(template_path, context)\n self.send_email(recipient, subject, body, bcc_email)",
"def _send_email(\n recipient_id, sender_id, intent, email_subject, email_html_body,\n sender_email, bcc_admin=False, sender_name=None, reply_to_id=None):\n\n if sender_name is None:\n sender_name = EMAIL_SENDER_NAME.value\n\n _require_sender_id_is_valid(intent, sender_id)\n\n recipient_email = user_services.get_email_from_user_id(recipient_id)\n cleaned_html_body = html_cleaner.clean(email_html_body)\n if cleaned_html_body != email_html_body:\n log_new_error(\n 'Original email HTML body does not match cleaned HTML body:\\n'\n 'Original:\\n%s\\n\\nCleaned:\\n%s\\n' %\n (email_html_body, cleaned_html_body))\n return\n\n raw_plaintext_body = cleaned_html_body.replace('<br/>', '\\n').replace(\n '<br>', '\\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\\n<p>')\n cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)\n\n if email_models.SentEmailModel.check_duplicate_message(\n recipient_id, email_subject, cleaned_plaintext_body):\n log_new_error(\n 'Duplicate email:\\n'\n 'Details:\\n%s %s\\n%s\\n\\n' %\n (recipient_id, email_subject, cleaned_plaintext_body))\n return\n\n def _send_email_in_transaction():\n \"\"\"Sends the email to a single recipient.\"\"\"\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_mail(\n sender_name_email, recipient_email, email_subject,\n cleaned_plaintext_body, cleaned_html_body, bcc_admin,\n reply_to_id=reply_to_id)\n email_models.SentEmailModel.create(\n recipient_id, recipient_email, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())\n\n transaction_services.run_in_transaction(_send_email_in_transaction)",
"def send_email(form_instance, **kwargs):\n cleaned_data = form_instance.cleaned_data\n\n try:\n from_email = cleaned_data.pop(kwargs[\"from_email_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"send_email\", \"from_email_field\")\n try:\n to_email = cleaned_data.pop(kwargs[\"to_email_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"send_email\", \"to_email_field\")\n try:\n subject = cleaned_data.pop(kwargs[\"subject_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"send_email\", \"subject_field\")\n\n if \"uuid\" in cleaned_data:\n del cleaned_data[\"uuid\"]\n\n if \"form_id\" in cleaned_data:\n del cleaned_data[\"form_id\"]\n\n email_body = \"\".join([\n \"%s: %s\\n\\r\" % (get_label(form_instance, label), value)\n for label, value in cleaned_data.items()\n ])\n send_mail(subject, email_body, from_email, [to_email])",
"def send_email(self, source, subject, body, to_addresses, cc_addresses=None,\r\n bcc_addresses=None, format='text', reply_addresses=None,\r\n return_path=None, text_body=None, html_body=None):\r\n format = format.lower().strip()\r\n if body is not None:\r\n if format == \"text\":\r\n if text_body is not None:\r\n raise Warning(\"You've passed in both a body and a text_body; please choose one or the other.\")\r\n text_body = body\r\n else:\r\n if html_body is not None:\r\n raise Warning(\"You've passed in both a body and an html_body; please choose one or the other.\")\r\n html_body = body\r\n\r\n params = {\r\n 'Source': source,\r\n 'Message.Subject.Data': subject,\r\n }\r\n\r\n if return_path:\r\n params['ReturnPath'] = return_path\r\n\r\n if html_body is not None:\r\n params['Message.Body.Html.Data'] = html_body\r\n if text_body is not None:\r\n params['Message.Body.Text.Data'] = text_body\r\n\r\n if(format not in (\"text\",\"html\")):\r\n raise ValueError(\"'format' argument must be 'text' or 'html'\")\r\n\r\n if(not (html_body or text_body)):\r\n raise ValueError(\"No text or html body found for mail\")\r\n\r\n self._build_list_params(params, to_addresses,\r\n 'Destination.ToAddresses.member')\r\n if cc_addresses:\r\n self._build_list_params(params, cc_addresses,\r\n 'Destination.CcAddresses.member')\r\n\r\n if bcc_addresses:\r\n self._build_list_params(params, bcc_addresses,\r\n 'Destination.BccAddresses.member')\r\n\r\n if reply_addresses:\r\n self._build_list_params(params, reply_addresses,\r\n 'ReplyToAddresses.member')\r\n\r\n return self._make_request('SendEmail', params)",
"def send_email(subject, sender, recipients, text_body, html_body):\n\t\tmsg = Message(subject, sender=sender, recipients=recipients)\n\t\tmsg.body = text_body\n\t\tmsg.html = html_body\n\t\tmail.send(msg)",
"def _send_mailjet(message, subject, to, to_name, sender, sender_name):\n api_key = app.config.get('MJ_APIKEY_PUBLIC')\n api_secret = app.config.get('MJ_APIKEY_PRIVATE')\n if not api_key or not api_secret:\n app.logger.error('Missing MJ_APIKEY_PUBLIC/MJ_APIKEY_PRIVATE!')\n return\n # Note the data structures we use are api v3.1\n client = mailjet_rest.Client(\n auth=(api_key, api_secret),\n api_url='https://api.mailjet.com/',\n version='v3.1')\n from_obj = {\n \"Email\": sender,\n }\n if sender_name:\n from_obj[\"Name\"] = sender_name\n to_obj = [{\n \"Email\": to,\n }]\n if to_name:\n to_obj[0][\"Name\"] = to_name\n message = {\n \"From\": from_obj,\n \"To\": to_obj,\n \"Subject\": subject,\n \"TextPart\": message,\n }\n result = client.send.create(data={'Messages': [message]})\n if result.status_code != 200:\n app.logger.error(\n 'Error sending via mailjet: (%d) %r',\n result.status_code, result.text)\n raise MailFailure('Error sending via mailjet!')\n try:\n j = result.json()\n except Exception:\n app.logger.error('Error sending via mailjet: %r', result.text)\n raise MailFailure('Error sending via mailjet!')\n if j['Messages'][0]['Status'] != 'success':\n app.logger.error('Error sending via mailjet: %r', j)\n raise MailFailure('Error sending via mailjet!')",
"def send_email(subject, sender, recipients, text_body, html_body):\n msg = Message(subject=subject, sender=sender, recipients=recipients)\n msg.body = text_body\n msg.html = html_body\n mail.send(msg)",
"def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'",
"def send_email_via_api(self, to, subject, message):\n\n return self.mail.send(to, subject, message)",
"def send(self, to, context={}, template=None, from_email=None, bcc=[],\n connection=None, attachments=[], headers={}, cc=[], reply_to=[], fail_silently=False):\n email_message = self.get_email_message(to, context, template, from_email, bcc, connection, attachments, headers, cc, reply_to)\n return email_message.send(fail_silently=fail_silently)",
"def mail(note,\n sender,\n recipients,\n cc_recipients=[],\n attachments=[],\n subject = '',\n verbosity = 0):\n if verbosity > 1:\n msgb(\"SENDING EMAIL\")\n note = [x.rstrip() for x in note]\n body = '\\n'.join(note)\n att = []\n for attachment in attachments:\n att.append( (attachment, os.path.basename(attachment)) )\n try:\n _send_email(recipients,\n sender,\n subject,\n body,\n att,\n cc_recipients,\n verbosity)\n except:\n die(\"Sending email failed\")\n return 0",
"def send_mail(email_name, subject, recipients, request=None, **kwargs):\n\n # Make recipients iterable if it is not already (allow caller to pass a\n # single recipient, or a list.\n if isinstance(recipients, str):\n recipients = (recipients,)\n\n # A text template is required, if we can't load it, fail.\n try:\n text_template = select_template([\n 'main/email/{}.txt'.format(email_name),\n 'main/email/{}.text'.format(email_name),\n ])\n except TemplateDoesNotExist:\n raise ValueError('No template for email: %s' % email_name)\n\n # An HTML template is optional.\n try:\n html_template = get_template('main/email/{}.html'.format(email_name))\n except TemplateDoesNotExist:\n html_template = None\n\n # Produce our message body(s) from our templates using supplied context\n # (if any).\n message = text_template.render(context=kwargs, request=request)\n\n if html_template:\n html_message = html_template.render(context=kwargs, request=request)\n else:\n html_message = None\n\n # Build the from address.\n email_from = '%s <%s>' % settings.EMAIL_FROM\n\n # Send the email using the Django send_mail() function.\n _send_mail(subject, message, email_from, recipients,\n html_message=html_message)",
"def send_mail(to, subject, html, text, reply_to=None, sender=SENDER):\n\n from caravel.utils import principals\n\n # Verify that we are not sending spam to people.\n if not (isinstance(to, principals.Principal) and to.valid):\n raise ValueError(\"{!r} does not consented to email.\".format(to))\n\n # Verify that we are not sending spam from people.\n if reply_to:\n if not (isinstance(reply_to, principals.Principal) and reply_to.valid):\n raise ValueError(\"{!r} has not consented to send email.\"\n .format(reply_to))\n\n # Actually send the message to the user.\n _send_raw_mail(\n to=to.email,\n subject=subject,\n html=html,\n text=text,\n reply_to=reply_to.email if reply_to else None,\n sender=sender\n )",
"def send_email(self, to_addr, cc_addr, bcc_addr, topic, text, callback):\n\n\t\tmail_data = {\n\t\t\t\"key\": self.key,\n\t\t\t\"message\": self._prepare_message(to_addr, cc_addr, bcc_addr, topic, text)\n\t\t}\n\t\tbody = tornado.escape.json_encode(mail_data)\n\n\t\trequest = HTTPRequest(\n\t\t\turl=config.MANDRILL_URL + \"/messages/send.json\",\n\t\t\tconnect_timeout=config.TIMEOUT, request_timeout=config.TIMEOUT,\n\t\t\tbody=body, method='POST', validate_cert = False)\n\n\t\tresponse = yield tornado.gen.Task(\n\t\t\tself.http_client.fetch, request)\n\n\t\tbody = json.loads(response.body)\n\t\tif (int(response.code) == config.RESPONSE_OK\n\t\t\t\tand body[0]['status'] == 'sent'):\n\t\t\t# Each sent email gets assigned a different id. First (To address) used.\n\t\t\temail_id = body[0]['_id']\n\t\t\tcallback(config.SEND_STATUS.SENT, email_id)\n\t\t\treturn\n\t\telse:\n\t\t\tcallback(config.SEND_STATUS.FAILED, None)\n\t\t\treturn"
] | [
"0.7199089",
"0.6802284",
"0.66871035",
"0.6629718",
"0.6578772",
"0.65338874",
"0.63848585",
"0.62755436",
"0.62045836",
"0.61931556",
"0.61594427",
"0.6107116",
"0.6099107",
"0.60616297",
"0.59760606",
"0.59718096",
"0.595867",
"0.59301484",
"0.59264135",
"0.59",
"0.5880816",
"0.5862126",
"0.58566284",
"0.58559823",
"0.5849173",
"0.58274806",
"0.58268696",
"0.58162516",
"0.5811251",
"0.58080477"
] | 0.7293909 | 0 |
this is checking 'ie' against certain words that are put in the arguments also it is taking into consideration the fact that after c it is ei, and I do that by evaluating cie in the parameter (word) | def check(word):
if 'ie' in word:
print('{} doesn\'t follow the rule'.format(word))
elif 'cie' in word:
print('{} doesn\'t follow the rule'.format(word))
else:
print('{} does follow the rule'.format(word)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search(self, word):",
"def basic_check(word):\n if word[-1] == \"b\" or word[-1] == \"g\":\n return False\n consonant_counter = 0\n for char in word:\n if char in VOWELS:\n consonant_counter = 0\n else:\n consonant_counter += 1\n if consonant_counter >= 3:\n return False\n return True",
"def check_word(self, gair):\n\n gair = ''.join(gair)\n try:\n r = requests.post(self.url, data={\"search_word\": gair})\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n print(e)\n return None\n ans = self.parse(r)\n if \"non trouvé, orthographe incorrecte.\" in ans:\n return False\n return True",
"def check_word(words, word):\r\n if word in words:\r\n return True\r\n else:\r\n return False",
"def find_words_using_all_vowels():\n pass",
"def search_single_word(word):\n # YOUR CODE HERE #\n pass # delete this when you write your code",
"def find_vowels(s):\n \"*** YOUR CODE HERE ***\"",
"def main():\n word = input(\"Give me a word! \\n\\n\")\n vowels = ['a', 'e', 'i', 'o', 'u']\n if word[0].lower() in vowels:\n print(f\"\\n\\nPig latin: {word}way\")\n else:\n print(f\"\\n\\nPig latin: {word[1:]}{word[0]}ay\")",
"def usedWord(afz, word, output=True):\n count = 0\n for msg in msgs:\n if msg.afz == afz:\n if word.lower() in msg.msg.lower():\n count = count + 1\n print afz, 'heeft', count, 'keer het woord', word, 'gebruikt.'",
"def search_multiple_words(words):\n # YOUR CODE HERE #\n pass # delete this when you write your code",
"def search4vowels(word):\n vowels = set('aeiou')\n found = vowels.intersection(set(word))\n #return found\n for vowels in found:\n print(vowels)",
"def test_find_word(self):\n self.assertEqual(find_word('GREEN'), [(1, 1), (1, 1), (0, 9)])\n self.assertEqual(find_word('ABSENT'), [])\n self.assertEqual(find_word('PW'), [(1, 7), (3, 7), (0, 8)])",
"def step1c(self, word):\r\n\r\n if word.endswith('y'):\r\n result = word.rfind('y')\r\n base = word[:result]\r\n if self.containsVowel(base):\r\n word = base\r\n word += 'i'\r\n return word",
"def checkWord(self, word):\n\t\treturn self.root.checkString(u' ' + word);",
"def search_clues(self):\r\n print(\"\\n************Searching Clues************\\n\")\r\n for word_id in self.words.keys():\r\n if not self.words[word_id].see and not self.words[word_id].wth:\r\n clue = pop_backslash(self.words[word_id].clue)\r\n temp = word_domain(\"allintext:\" + clue +' -crossword',self.words[word_id].length)\r\n temp2 = temp + word_domain(clue +' -crossword',self.words[word_id].length)\r\n domain = temp2 + data_muse(clue, self.words[word_id].length)\r\n unique_list = []\r\n for x in domain: \r\n y = x.upper()\r\n # check if exists in unique_list or not \r\n if y not in unique_list: \r\n unique_list.append(y) \r\n \r\n self.words[word_id].assign_word_domain(unique_list)\r\n print(\"\\nSearch is done...\")",
"def check_strings(aword, anotherword):\n if aword == anotherword:\n return True\n else:\n return False",
"def x_ian(x, word):\n if x == \"\":\n return True\n else:\n letWordLoc = word.find(x[0])\n if (letWordLoc != -1):\n return x_ian(x[1:], word[(letWordLoc + 1):])\n else:\n return False",
"def isValidWord(word, hand, wordList):\n # TO DO ... <-- Remove this comment when you code this function\n #for line in fil:\n # print line\n if word not in w:\n w.append(word)\n #print w\n else:\n #print \"already have a word inside\"\n return False\n for z in word:\n print z\n if z in hand:\n #if p != word:\n #print hand[c]\n if hand[z] > 0:\n hand[z] -= 1\n \n else:\n return False\n else:\n return False\n if word in wordList:\n print \"here is true\"\n return True\n else:\n return False",
"def fry(word):\n\n # looks for a Y or y which will be (captured) followed and ended by an 'ou'\n match_you = re.match('([Yy])ou$', word)\n\n # First group will be the (captured) group so either 'Y' or 'y'\n if match_you:\n return match_you.group(1) + \"'all\"\n\n # looks for anyword ending in 'ing'\n match_ing = re.search('(.+)ing$', word)\n\n # checks if vowel exists before the 'ing'\n if match_ing:\n vowel_check = re.search('[aeiouy]', match_ing.group(1))\n # First group will be the (captured) group so everything before the 'ing'\n if vowel_check:\n return match_ing.group(1) + \"in'\"\n\n return word",
"def choose_word():\n pass",
"def search(self, word: str) -> bool:\n cur = self.root\n for letter in word:\n if letter not in cur:\n return False\n cur = cur[letter]\n if \"isWord\" not in cur:\n return False\n return True",
"def uses_all(word, required):\n pass",
"def search4vowels(phrase:str) -> set:\n vowels = set('aeiou')\n #word = input('Enter a word to search for vowels:')\n found = vowels.intersection(set(phrase))\n #for vowel in found:\n #print(vowel)\n\n return (found)",
"def IsIt(rule,cstr):\n try:\n if(oilcc_isIt[rule].search(cstr)):\n return True\n else:\n return False\n except:\n return False",
"def checkWord(word):\r\n check = word in cachedWordList\r\n if check:\r\n print(word + \" spelt correctly\")\r\n else:\r\n print(word + \" not found in dictionary\")\r\n return check",
"def check(self,word):\n if self.pre:\n def sub_word(chars):\n if re.match('^'+chars+'.*',word):\n return word[len(chars):]\n else:\n return None\n else:\n def sub_word(chars):\n if re.match('^.*'+chars+'$',word):\n return word[:-len(chars)]\n else:\n return None\n\n if word == '':\n return self\n for chars in self.branches.keys():\n res = sub_word(chars)\n if res:\n return self.branches[chars].check(res)\n elif res == '':\n return self.branches[chars]\n return None",
"def is_valid_word(word, hand, word_list):\n failure=True\n word=word.lower()\n if word not in word_list:\n failure=False\n for i in word:\n w=hand.get(i,0)\n if w==0:\n failure=False\n break\n return failure",
"def search_word(word : str = typer.Argument(..., help=\"Searches the trie if the word exists\")):\n response_url = url + \"/search/\" + word\n response = requests.get(response_url)\n typer.echo(response.json()[\"status\"])",
"def isvalid_word1(word, hand, word_list1):\n # TO DO ... <-- Remove this comment when you code this function\n for i in word:\n if i not in hand:\n return False\n i = i\n return word in word_list1",
"def contains (self,phrase,chars):\r\n\r\n for x in chars:\r\n\r\n if x in phrase:\r\n return True\r\n return False"
] | [
"0.6563026",
"0.61535275",
"0.6104765",
"0.60968226",
"0.5950032",
"0.59073645",
"0.5877695",
"0.5859684",
"0.5853565",
"0.5838554",
"0.5829444",
"0.582781",
"0.5826885",
"0.57807684",
"0.5766645",
"0.5766633",
"0.57525086",
"0.5734965",
"0.57145435",
"0.56988156",
"0.5696558",
"0.56959814",
"0.56807196",
"0.5674118",
"0.56667036",
"0.56634426",
"0.56615037",
"0.5652758",
"0.5646736",
"0.5631402"
] | 0.80172086 | 0 |
If the price difference is positive, accept the change. Else, use chance to sometimes accept a negative change. | def check_price(self, price_diff):
chance = exp(price_diff / self.T)
if price_diff < 0 and not chance > random():
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compare_price(self):\n if self.__product_price < self.__alert_price:\n #print(\"price drop...\")\n self.__alert_client = True\n self.__price_difference = self.__product_price - self.__alert_price\n else:\n #print(\"Price not reduced...\")\n self.__alert_client = False\n self.__price_difference = self.__product_price - self.__alert_price",
"def test_accept_change():\n from numpy import sqrt, count_nonzero, exp\n\n energy = MagicMock\n mc = MonteCarlo(energy, [1, 1, 1], temperature=100.0)\n # Should always be true.\n # But do more than one draw,\n # in case randomness incorrectly crept into\n # implementation\n for i in range(10):\n assert mc.accept_change(0.5, 0.4)\n assert mc.accept_change(0.5, 0.5)\n\n # This should be accepted only part of the time,\n # depending on exponential distribution\n prior, successor = 0.4, 0.5\n accepted = [mc.accept_change(prior, successor) for i in range(10000)]\n assert count_nonzero(accepted) / float(len(accepted)) \\\n == approx(exp(-(successor - prior) / mc.temperature), 3e0 / sqrt(len(accepted)))",
"def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")",
"def check_required_change(drink, amount):\n if (drink == \"espresso\" and amount > MENU[drink][\"cost\"]) or (drink == \"latte\" and amount > MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount > MENU[drink][\"cost\"]):\n return amount - MENU[drink][\"cost\"]\n else:\n return 0.00",
"def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False",
"def test_amount_not_enough(self):\n item, change, _ = give_item_and_change('coke', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)",
"def calcul_risk(self):\n if (self.take_profit - self.buy_price) >= (\n self.buy_price - self.stop_loss\n ) * self.risk:\n return True\n else:\n return False",
"def _toss_fair_coin() -> bool:\n return random.random() > 0.5",
"def stealability(self):\n stealability_score = float(self.price) / float(self.weight)\n print (stealability_score)\n\n if stealability_score < 0.5:\n return 'Not so stealable...'\n elif stealability_score >= 0.5 and stealability_score < 1.0:\n return 'Kinda stealable.'\n else:\n return 'Very stealable!'",
"def check_price():\n global NUMBER_OF_TOTAL_COINS, BEVERAGE_PRICE\n\n if NUMBER_OF_TOTAL_COINS == BEVERAGE_PRICE:\n return True\n elif NUMBER_OF_TOTAL_COINS < BEVERAGE_PRICE:\n return False\n else:\n return \"FATAL\"",
"def check_min_value(self, tracked_price):\n if tracked_price < self.min_value and self.warning_calls <= 2:\n print(f'Warning! Price dropeed under {self.min_value} pln {tracked_price}')\n self.make_phone_call()\n self.warning_calls += 1\n elif tracked_price < self.min_value and self.warning_calls == 3:\n self.send_a_message(\n f'This is a warning message. Price of EUR/PLN dropped under critical value!'\n f' {self.min_value} pln')\n print(f'Called 3 times! Price dropeed under {self.min_value} pln {tracked_price}')\n self.warning_calls = 0\n else:\n print(f\"Current price for Euro in PLN is {tracked_price}\")",
"def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE",
"def process_payment(money_received, drink_cost):\n if money_received >= drink_cost:\n change = round(money_received - drink_cost, 2)\n print(f\"Here is ${change} in change.\")\n global profit\n profit += drink_cost\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False",
"def test_NegativePriceCheck(self):\n # Basic price check\n self.log.info(\"Price checking Negative Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Negative Item\")\n \n # Confirm the right item, at the right price\n # NOTE: Price check returns negative prices as possitive. Legacy defect deemed 'Will Not Fix'\n self.read_price_check(\"Negative Item\", \"$5.00\")\n # Add the item\n pos.click(\"Sell Item\")\n \n # Confirm we are in a transaction\n if not self.in_transaction():\n self.tc_fail(\"POS did not start a transaction; can not confirm item was added\")\n else:\n self.log.info(\"Confirmed we are in a transaction\")\n \n # Confirm we added the item, and that it was negative\n ret = self.confirm_line(-1, \"Negative Item\", \"-$5.00\")\n if ret == True:\n self.log.info(\"Confirmed item added\")\n else:\n self.tc_fail(ret)\n \n # Setup for next test\n self.recover()",
"def bet(self, amount):\r\n\r\n if self.players[self.active_player].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[(self.active_player + 1) %\r\n len(self.players)].name)\r\n self.game_message.emit(message)\r\n self.restart()\r\n if self.players[(self.active_player + 1) % len(self.players)].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[self.active_player].name)\r\n self.game_message_warning.emit(message)\r\n self.restart()\r\n\r\n if amount == 0:\r\n message = \"Raises must be larger than zero!\"\r\n self.game_message_warning.emit(message)\r\n\r\n elif self.previous_bet + amount > self.players[self.active_player].credits:\r\n message = \"Not enough money!\"\r\n self.game_message_warning.emit(message)\r\n else:\r\n self.pot += amount\r\n self.new_pot.emit()\r\n\r\n self.players[self.active_player].credits -= (self.previous_bet + amount)\r\n self.new_credits.emit()\r\n\r\n output_text = \"{} bet ${} and raised ${}\".format(self.players[self.active_player].name, self.previous_bet,\r\n amount)\r\n\r\n self.previous_bet = (self.previous_bet + amount)\r\n self.actions += 1\r\n\r\n self.new_output.emit(output_text)\r\n\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n\r\n # Update the players to hide their cards when it is not their turn\r\n for player in self.players:\r\n player.flip_cards()\r\n\r\n self.progress_game()",
"def make_payment(self, cost):\n self.process_coins()\n if self.money_received >= cost:\n change = round(self.money_received - cost, 2)\n print(f\"Here is {self.CURRENCY}{change} in change.\")\n self.profit += cost\n self.money_received = 0\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n self.money_received = 0\n return False",
"def price_check(cash, price, shares):\n affordable = (cash - (price * shares)) > 0\n\n if affordable:\n return affordable\n\n else:\n return False",
"def check_restrictions(self):\n from .signals import determine_availability\n\n responses = determine_availability.send(\n self.item.event, item=self.item,\n variations=[self.to_variation_dict()], context=None,\n cache=self.item.event.get_cache()\n )\n price = self.default_price if self.default_price is not None else self.item.default_price\n for receiver, response in responses:\n if 'available' in response[0] and not response[0]['available']:\n return False\n elif 'price' in response[0] and response[0]['price'] is not None and response[0]['price'] < price:\n price = response[0]['price']\n return price",
"def __call__(self, auctioneer):\n possible_bid = self.bid_increase_perc * auctioneer.get_highest_bid()\n if possible_bid < self.budget and random.random() <= self.bid_probability:\n self.highest_bid = possible_bid\n auctioneer.accept_bid(possible_bid, self)",
"def test_give_correct_change(self):\n item, change, _ = give_item_and_change('coke', 1)\n self.assertEqual(item, 'coke')\n self.assertEqual(change, [.20, .05, .02])",
"def success_chance(dc,modifier=0,adv=False,disadv=False):\r\n if adv:\r\n return 1-((dc-modifier-1)/20)**2\r\n elif disadv:\r\n return (1-(dc-modifier-1)/20)**2\r\n return 1-(dc-modifier-1)/20",
"def check_transaction(coins_inserted, cost_drink, machine_balance):\n if coins_inserted < cost_drink:\n return False\n else:\n if coins_inserted > cost_drink:\n change_given = coins_inserted - cost_drink\n print(f\"Here is ${change_given:0.2f} in change.\")\n return machine_balance + cost_drink",
"def calc_new_bid_price_after_failure( self, cheapest_price ):\n\n new_bid_price = cheapest_price * 1.1\n return str(new_bid_price)",
"def approves(self):\n # verify trailing stop-loss threshold has been met\n thresholdMet = self.analysis.trailing_percentage >= constants.PERCENT_TRAILING_CLOSE_THRESHOLD\n\n # verify price has reverted back to the mean\n if self.analysis.initial_order_type == \"buy\":\n meanReverted = self.analysis.current_price >= self.analysis.current_volume_weighted_average_price\n else:\n meanReverted = self.analysis.current_price <= self.analysis.current_volume_weighted_average_price\n\n # return approval\n _approval = thresholdMet or meanReverted\n if _approval:\n self.logger.log(self.analysis.__dict__)\n self.logger.log(\"%s close approved!\" % self.ticker)\n return _approval",
"def test_not_enough_change(self):\n item, change, _ = give_item_and_change('apple', '.2')\n self.assertIsNone(item)\n self.assertEqual(change, 0.2)",
"def trade_offer(self, price):\n if self.is_sold:\n raise AlreadySoldError(\"Electricity product already sold\")\n\n if self.remaining_slots <= 0:\n raise LeadtimePassedError(\"Lead time passed\")\n\n succesful_trade = random.random() < self.selling_chance(price)\n self.remaining_slots -= 1\n\n if succesful_trade:\n profit = price - self.product_price\n self.is_sold = True\n elif self.remaining_slots == 0:\n profit = -self.product_price\n else:\n profit = 0\n\n return (profit, self.is_sold)",
"def test_callPrice(self):\n call_price1 = calculator.BlackScholes.call_price(**self.params_1)\n call_price2 = calculator.BlackScholes.call_price(**self.params_2)\n self.assertAlmostEqual(call_price1,10.45,delta=0.01)\n self.assertAlmostEqual(call_price2,7.965,delta=0.01)",
"def check_price(self):\n return self.day*self.price",
"def charge(self, price):\n '''try:\n type(price) == int or type(price) == float\n except ValueError: \n print 'Not a number!'\n \n if type(price) != int or type(price) != float:\n raise ValueError(\"Not a number!\")\n '''\n if price < 0:\n return False\n elif price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True",
"def accept_reject(self, energy_new, energy_old):\n with np.errstate(invalid='ignore'):\n # The energy values being fed to Metropolis are 1-length arrays, and if\n # they are equal, their difference is 0, which gets multiplied by beta,\n # which is inf, and array([0]) * float('inf') causes\n #\n # RuntimeWarning: invalid value encountered in multiply\n #\n # Ignore this warning so so when the algorithm is on a flat plane, it always\n # accepts the step, to try to move off the plane.\n prod = -(energy_new - energy_old) * self.beta\n w = math.exp(min(0, prod))\n\n rand = self.random_gen.uniform()\n return w >= rand"
] | [
"0.681601",
"0.66513777",
"0.6579652",
"0.65598977",
"0.64981097",
"0.640799",
"0.6377233",
"0.63660145",
"0.6304649",
"0.6303129",
"0.63005",
"0.625263",
"0.621822",
"0.6180557",
"0.6165783",
"0.6160765",
"0.6143225",
"0.6127727",
"0.60954076",
"0.6059905",
"0.6056775",
"0.6055967",
"0.60322034",
"0.6014373",
"0.60113895",
"0.601084",
"0.6010746",
"0.5982734",
"0.5980307",
"0.5968364"
] | 0.8071096 | 0 |
Test case for liechtensteinsettlements_get | def test_liechtensteinsettlements_get(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_liechtensteinsettlements_id_get(self):\n pass",
"def test_austriansettlements_get(self):\n pass",
"def test_austriansettlements_id_get(self):\n pass",
"def test_generate_single_element_get(self):\n pass",
"def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'instructions_html', 'sourceUrl', 'typeSource', 'poster', 'annotation_storage']:\r\n self.assertIn(key, context)",
"def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'instructions_html', 'annotation_storage', 'token', 'tag', 'openseadragonjson']:\r\n self.assertIn(key, context)",
"def test_gettesttools_html(self):\n pass",
"def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'tag', 'source', 'instructions_html', 'content_html', 'annotation_storage', 'token']:\r\n self.assertIn(key, context)",
"def test_base(self):\n driver = self.driver\n\n # LANDING PAGE\n driver.get(\"http://www.google.com/\")\n\n # google's home page has a link to their privacy policy\n privacy = driver.find_elements_by_xpath(\n \"//*[contains(text(), 'Privacy')]\")\n\n # it should return a list with something in it\n assert len(privacy) == 1\n\n # get the element\n element = privacy[0]\n # it should be displayed\n assert element.is_displayed()\n # it should have nonzero size\n assert element.size['height'] > 10\n assert element.size['width'] > 10\n\n log.debug('All Checks Succeeded!')",
"def test_get_elements(self):\n\n xml_file = get_test_config('test_files/subscriptions.xml')\n self.assertEqual(xml_utilities.get_elements('foo', xml_file),\n [],\n 'Got invalid elements')\n subscriptions = xml_utilities.get_elements('subscription', xml_file)\n self.assertEqual(len(subscriptions),\n 2,\n 'Got wrong number of elements')\n tag_names = [x.tagName for x in subscriptions]\n self.assertEqual(['subscription', 'subscription'],\n tag_names,\n 'Got wrong elements')",
"def test_elements(self):\n self.assertIsInstance(self.analytics.suites[testReportSuite].elements, omniture.utils.AddressableList)",
"def test_get_instructions(self):\r\n xmltree = etree.fromstring(self.sample_xml)\r\n\r\n expected_xml = u\"<div><p>Helper Test Instructions.</p></div>\"\r\n actual_xml = get_instructions(xmltree)\r\n self.assertIsNotNone(actual_xml)\r\n self.assertEqual(expected_xml.strip(), actual_xml.strip())\r\n\r\n xmltree = etree.fromstring('<annotatable>foo</annotatable>')\r\n actual = get_instructions(xmltree)\r\n self.assertIsNone(actual)",
"def test_v1_alert_list_get(self):\n pass",
"def test_get_services_html(self):\n pass",
"def get_element_list(self):\n pass",
"def extract_element(self):\n self.skipWhiteSpace()\n segment = self.test_script_source.current_segment()\n # special symbols\n if segment in TestScriptElementType.script_special_symbols:\n self.current_elmt = ScriptSpecialSymbolElement(self.test_script_source)\n # reserved words\n elif segment in TestScriptElementType.script_reserved_words:\n self.current_elmt = ScriptReservedWordsElement(self.test_script_source)\n # variable\n elif segment.startswith('$'):\n self.current_elmt = ScriptVariableElement(self.test_script_source)\n if self.test_script_source.current_pos == 0:\n segval = TestScriptSymbolTable.get_value_from_sym_tab(segment, TestScriptSymbolTable.test_script_sym_tab)\n if segval is not None and segval != \"\":\n for key in TestScriptElementType.script_testbed_devices:\n if re.search(key, segval, re.I):\n self.current_elmt = TestbedDeviceElement(self.test_script_source)\n self.current_elmt.segment_text = segval\n self.current_elmt.element_type = TestScriptElementType.get_element_type(segval)\n break\n else:\n # capi commands\n for cmd in TestScriptElementType.script_capi_commands:\n if re.search(cmd, segment, re.I): \n self.current_elmt = CAPICommandElement(self.test_script_source)\n self.test_script_source.line_attr = \"CAPI\"\n\n self.next_segment = self.test_script_source.next_segment()\n return self.next_segment\n\n is_testbed_device_elem = False\n \n # use regular expression to search the testbed device vendor name\n if not self.test_script_source.test_script_source_name == common.GlobalConfigFiles.dut_info_file:\n for key in TestScriptElementType.script_testbed_devices:\n if re.search(key, segment, re.I): \n # skip the variable such as $DUT_IF which causes it is recognized as testbed DUT\n if self.test_script_source.current_pos == self.test_script_source.total_num_seg - 1 and key != segment:\n break\n\n self.current_elmt = TestbedDeviceElement(self.test_script_source)\n is_testbed_device_elem = True\n break\n\n if not is_testbed_device_elem:\n # based on the name of the file - DUTInfo.txt, we classify the current element into Feature element\n if self.test_script_source.test_script_source_name == common.GlobalConfigFiles.dut_info_file and (segment in TestScriptElementType.test_feature_key_words):\n self.current_elmt = TestFeatureElement(self.test_script_source)\n elif segment == \"EOL\": # for return variable\n self.current_elmt = EndOfLineElement(self.test_script_source)\n elif self.test_script_source.current_pos == self.test_script_source.total_num_seg - 1 and self.test_script_source.line_attr == \"CAPI\":\n self.current_elmt = CAPIReturnElement(self.test_script_source)\n elif segment == \"EOF\":\n self.next_segment = \"EOF\"\n return\n else:\n self.current_elmt = ScriptStringElement(self.test_script_source)\n #self.current_elmt = ScriptErrorElement(self.test_script_source)\n #raise LexicaError\n\n self.next_segment = self.test_script_source.next_segment()\n return self.next_segment",
"def test_get_instructions(self):\n xmltree = etree.fromstring(self.sample_xml)\n\n expected_xml = \"<div><p>Helper Test Instructions.</p></div>\"\n actual_xml = get_instructions(xmltree)\n assert actual_xml is not None\n assert expected_xml.strip() == actual_xml.strip()\n\n xmltree = etree.fromstring('<annotatable>foo</annotatable>')\n actual = get_instructions(xmltree)\n assert actual is None",
"def get_elements_supporting_js_event(self, js_event_type):\n query = f'//*[@demod_{js_event_type}][@{REACHABLE_ATT_NAME}=\"true\"]'\n els = self.query_xpath(query)\n return els",
"def getElementProperties():",
"def test_extractMenu(self):\n doc = lunchr.parseHtml(self.html)\n self.assertEquals(self.menu, lunchr.extractMenu(doc))",
"def _get_elements(cls):\n raise NotImplementedError()",
"def test_get_html(self):\r\n _html = self.peer_grading.get_html()",
"def applicants_skills(driver):\n try:\n raw_skills = driver.find_elements_by_css_selector(\"span.pill\")\n skills = [skill.text for skill in raw_skills] \n return skills\n except Exception as e:\n print(\"error acquiring applicant skills\")\n print(e)\n return []",
"def test_llist_get_element_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.get()",
"def entitlements(self) -> Entitlements:\n return self.__entitlements",
"def test_get_software_set(self):\n pass",
"def test_functionality(self):\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True) \n \n self.get_ServicesPage(\"\",\"Firmware_update_Template\")\n \n self.logout()",
"def test_extract_instructions(self):\r\n xmltree = etree.fromstring(self.sample_xml)\r\n\r\n expected_xml = u\"<div><p>Video Test Instructions.</p></div>\"\r\n actual_xml = self.mod._extract_instructions(xmltree) # pylint: disable=W0212\r\n self.assertIsNotNone(actual_xml)\r\n self.assertEqual(expected_xml.strip(), actual_xml.strip())\r\n\r\n xmltree = etree.fromstring('<annotatable>foo</annotatable>')\r\n actual = self.mod._extract_instructions(xmltree) # pylint: disable=W0212\r\n self.assertIsNone(actual)",
"def test_fax_inbound_automation_get(self):\n pass",
"def test_getMenu(self):\n urllib2.urlopen = self.urlopen_mock\n self.assertEquals(u'\\xc4RTSOPPA & PANNKAKOR m. sylt & gr\\xe4dde# SEJ m. \\xe4gg- & persiljes\\xe5s samt kokt potatis* \\xa4 KYCKLINGFAJITASm. paprika, salsa & tortillas* VEG: QUORNFAJITASm. paprika, salsa & tortillas', lunchr.getMenu('http://www.example.com', 3))"
] | [
"0.7831955",
"0.7138092",
"0.5891819",
"0.5717251",
"0.56602263",
"0.56293994",
"0.5599581",
"0.5564111",
"0.541159",
"0.5313962",
"0.52749103",
"0.51254624",
"0.5107244",
"0.5106074",
"0.50819343",
"0.50544786",
"0.5054461",
"0.503181",
"0.5025818",
"0.5009385",
"0.49504754",
"0.4923747",
"0.49214306",
"0.4901251",
"0.49005988",
"0.4899019",
"0.48800784",
"0.48695928",
"0.48558116",
"0.4852753"
] | 0.93017524 | 0 |
Test case for liechtensteinsettlements_id_get | def test_liechtensteinsettlements_id_get(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_austriansettlements_id_get(self):\n pass",
"def test_liechtensteinsettlements_get(self):\n pass",
"def test_solareclipses_id_get(self):\n pass",
"def test_intercommunalitys_id_get(self):\n pass",
"def test_prefectures_id_get(self):\n pass",
"def test_variablepresentations_id_get(self):\n pass",
"def test_installments_id_get(self):\n pass",
"def getID():",
"def test_id():\n with expected_protocol(\n DCXS,\n [(\"?\", \"DCXS750-4\"), ],\n ) as inst:\n assert inst.id == \"DCXS750-4\"",
"def test_get_id():\n vc = vtec.parse(EX1)\n assert vc[0].get_id(2005) == \"2005-KJAN-TO-W-0130\"",
"def test_metrostations_id_get(self):\n pass",
"def test_plays_id_get(self):\n pass",
"def test_user_id_identities_get(self):\n pass",
"def test_meme_meme_id_get(self):\n pass",
"def test_brains_id_get(self):\n pass",
"def test_variables_id_get(self):\n pass",
"def test_austriansettlements_get(self):\n pass",
"def getID(self) -> int:\n ...",
"def getId(self):",
"def test_comicscreators_id_get(self):\n pass",
"def test_id(self):\n result = self.test_client.id\n\n assert result == \"86576599\"",
"def test_workflows_id_get(self):\n pass",
"def test_getItineraryFromId(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + date['date'])\n invuid = '00000000000000000000000'\n\n rv = self.json_get('/getItineraryFromId/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': invuid})\n assert 'Itinerary not found' in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': uid})\n assert uid in str(rv.data)",
"def test_getId(self):\n cases = [\n (self.test_eac + 'NE00401.xml','NE00401'),\n (self.test_eac + 'NE00101.xml','NE00101'),\n (self.test_eac + 'NE00915.xml','NE00915'),\n (self.test_eac + 'NE01001.xml','NE01001'),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com/metadata.xml', 'http://www.example.com/presentation.html')\n result = doc.getRecordId()\n self.assertNotEqual(doc, None)\n self.assertEquals(result, expected)",
"def test_poets_id_get(self):\n pass",
"def identifier(self):\n return self.element.xpath('./@Id')",
"def test_coupledmodels_id_get(self):\n pass",
"def id(self) -> Optional[str]:\n return self.elem.get('id')",
"def test_get_chain_by_id(self):\n pass",
"def test_id(self):\n result = self.test_client.id\n\n assert result == \"10423098\""
] | [
"0.82076895",
"0.7358317",
"0.6770782",
"0.6580353",
"0.6308794",
"0.6245422",
"0.6235614",
"0.6187761",
"0.60393614",
"0.6010711",
"0.60008794",
"0.5999302",
"0.5997519",
"0.5997346",
"0.59716815",
"0.5962987",
"0.59535784",
"0.59419966",
"0.59416705",
"0.5927985",
"0.59233904",
"0.59162366",
"0.5902242",
"0.5893645",
"0.58864164",
"0.5847122",
"0.5817824",
"0.5808202",
"0.5805722",
"0.5777499"
] | 0.94481444 | 0 |
Return an iterator over snapshots for a timevarying component attribute with values for all nontimevarying components filled in with the default values for the attribute. | def get_switchable_as_iter(network, component, attr, snapshots, inds=None):
df = network.df(component)
pnl = network.pnl(component)
index = df.index
varying_i = pnl[attr].columns
fixed_i = df.index.difference(varying_i)
if inds is not None:
inds = pd.Index(inds)
index = inds.intersection(index)
varying_i = inds.intersection(varying_i)
fixed_i = inds.intersection(fixed_i)
# Short-circuit only fixed
if len(varying_i) == 0:
return repeat(df.loc[fixed_i, attr], len(snapshots))
def is_same_indices(i1, i2):
return len(i1) == len(i2) and (i1 == i2).all()
if is_same_indices(fixed_i.append(varying_i), index):
def reindex_maybe(s):
return s
else:
def reindex_maybe(s):
return s.reindex(index)
return (
reindex_maybe(df.loc[fixed_i, attr].append(pnl[attr].loc[sn, varying_i]))
for sn in snapshots
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def with_time(self):\n if self.time_slices is None:\n raise FeatureError(\"Feature has no time reference.\")\n\n for i, datum in enumerate(self.data[self.name]):\n yield (self.time_slices[i], datum)",
"def with_time(self):\n key = list(self.keys())[0]\n length = len(self[key])\n time_slices = self[key].time_slices\n\n if time_slices is None:\n raise FeatureError(\"FeatureCollection has no time reference.\")\n\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield (time_slices[i], res)",
"def test_file_write_attributes_for_each(self):\n\n with OrthoMultiTs(self.testfilename, n_loc=3, mode=\"w\") as dataset:\n n_data = 5\n locations = np.array([1, 2, 3])\n data = {\n \"test\": np.arange(n_data * 3).reshape(3, n_data),\n \"test2\": np.arange(n_data * 3).reshape(3, n_data)\n }\n base = datetime(2007, 1, n_data)\n dates = np.array(\n [base + timedelta(hours=i) for i in range(n_data)])\n descriptions = np.repeat([str(\"station\")], 3).tolist()\n\n dataset.write_all(locations,\n data,\n dates,\n loc_descrs=descriptions,\n lons=np.arange(3),\n lats=np.arange(3),\n alts=np.arange(3),\n attributes={\n \"test\": {\n \"testattribute\": \"teststring\"\n },\n \"test2\": {\n \"testattribute2\": \"teststring2\"\n }\n })\n\n with OrthoMultiTs(self.testfilename) as dataset:\n data = dataset.read_all(2)\n nptest.assert_array_equal(data[\"test\"], np.arange(5) + 5)\n assert dataset.dataset.variables[\n \"test\"].testattribute == \"teststring\"\n assert dataset.dataset.variables[\n \"test2\"].testattribute2 == \"teststring2\"\n test_dates = []\n for n_data in [5]:\n base = datetime(2007, 1, n_data)\n test_dates.append(\n np.array(\n [base + timedelta(hours=i) for i in range(n_data)]))\n dates = np.concatenate(test_dates)\n nptest.assert_array_equal(data[\"time\"], dates)",
"def _series_date_value_iter(data_points: List[dict]) -> Generator:\n for data_point in data_points:\n yield data_point[\"generic:ObsDimension\"][\"@value\"], data_point[\"generic:ObsValue\"][\"@value\"]",
"def getMatrixAttributeInTimeRange(node, attribute, timeRange=None):\n fn = mUtils.getFn(node)\n plug = fn.findPlug(attribute, 0)\n if plug.isArray:\n plug.evaluateNumElements()\n plug = plug.elementByPhysicalIndex(0)\n if not timeRange:\n timeRange=getCurrentAnimRange()\n result = list()\n for x in range(int(timeRange[0]), int(timeRange[1])):\n timeContext = om.MDGContext(om.MTime(x, TIMEUNIT))\n matrixO = plug.asMObject(timeContext)\n fnMat = om.MFnMatrixData(matrixO)\n matrix = fnMat.matrix()\n result.append(matrix)\n return result",
"def get_switchable_as_dense(network, component, attr, snapshots=None, inds=None):\n df = network.df(component)\n pnl = network.pnl(component)\n\n index = df.index\n\n varying_i = pnl[attr].columns\n fixed_i = df.index.difference(varying_i)\n\n if inds is not None:\n index = index.intersection(inds)\n varying_i = varying_i.intersection(inds)\n fixed_i = fixed_i.intersection(inds)\n if snapshots is None:\n snapshots = network.snapshots\n\n vals = np.repeat([df.loc[fixed_i, attr].values], len(snapshots), axis=0)\n static = pd.DataFrame(vals, index=snapshots, columns=fixed_i)\n varying = pnl[attr].loc[snapshots, varying_i]\n\n res = pd.concat([static, varying], axis=1, sort=False).reindex(columns=index)\n res.index.name = \"snapshot\" # reindex with multiindex does not preserve name\n\n return res",
"def extraireTemps(self, noeud=\"Turn\", attributs=(\"startTime\", \"endTime\")):\n temps = self.entree.find_all(noeud)\n\n for debut, fin in map(lambda x: (x.get(attributs[0]), x.get(attributs[1])), temps):\n yield debut, fin",
"def test_multiple_time_coverage_attrs(self, tmpdir):\n files = [\n self.netcdf_file(tmpdir, \"f1.nc\", values=[1], global_attrs={\n \"start_time\": \"200001010745Z\",\n \"stop_time\": \"20000101T120000Z\",\n\n \"start_date\": \"01-JAN-2000 07:45:00.000000\",\n \"stop_date\": \"01-JAN-2000 12:00:00.000000\",\n }),\n self.netcdf_file(tmpdir, \"f2.nc\", values=[2], global_attrs={\n \"start_time\": \"20000101T120000Z\",\n \"stop_time\": \"200001041200Z\",\n\n \"start_date\": \"04-JAN-2000 00:00:00.000000\",\n \"stop_date\": \"04-JAN-2000 12:00:00.000000\",\n })\n ]\n agg = CCIAggregationCreator(\"time\").create_aggregation(\"drs\", \"t.ac.uk\", files)\n\n attrs_dict = self.get_attrs_dict(agg)\n expected_attrs = [\n \"start_time\", \"stop_time\",\n \"start_date\", \"stop_date\",\n # Still expect to find time_coverage_{start,end} even though\n # they're not in the source files\n \"time_coverage_start\", \"time_coverage_end\",\n \"time_coverage_duration\"\n ]\n for attr in expected_attrs:\n assert attr in attrs_dict\n\n assert attrs_dict[\"time_coverage_start\"][\"value\"] == \"20000101T074500Z\"\n assert attrs_dict[\"start_time\"][\"value\"] == \"20000101T074500Z\"\n # Note that ISO date is still used in output even though input format\n # is different for {start,stop}_date\n assert attrs_dict[\"start_date\"][\"value\"] == \"20000101T074500Z\"\n\n assert attrs_dict[\"time_coverage_end\"][\"value\"] == \"20000104T120000Z\"\n assert attrs_dict[\"stop_time\"][\"value\"] == \"20000104T120000Z\"\n assert attrs_dict[\"stop_date\"][\"value\"] == \"20000104T120000Z\"\n\n assert attrs_dict[\"time_coverage_duration\"][\"value\"] == \"P3DT4H15M\"",
"def grid_iterator(self):\n n_values = [len(v) for v in self.table.values()]\n for i in xrange(self.slice_start, self.slice_end):\n yield self.variable_base(i, n_values)",
"def static_trajectory(Tinit, n):\n for i in xrange(n):\n yield Tinit",
"def time_x_values(city='Los Angeles', parameter='pm25'):\r\n status, body = api.measurements(city='Los Angeles', parameter='pm25')\r\n values = []\r\n for result in body['results']:\r\n time = result['date']['utc']\r\n value = result['value']\r\n time_x_value.append((time, value))",
"def __iter__(self):\n return self._timeseriesData.__iter__()",
"def filter_time_range(start: int, end: int, time_slots: Optional[Container[int]] = None) -> Iterator[int]:\n if time_slots is None:\n time_slots = range(24)\n for time in range(start, end):\n if time in time_slots:\n yield time",
"def unscaled_variables_generator(blk, descend_into=True, include_fixed=False):\n for v in blk.component_data_objects(pyo.Var, descend_into=descend_into):\n if v.fixed and not include_fixed:\n continue\n if get_scaling_factor(v) is None:\n yield v",
"def values(self):\n for ts in self:\n yield self[ts]",
"def value_iterator(self):\n return _osgAnimation.mapVertexInfluence_value_iterator(self)",
"def variableIter(self):\n for (para, start), variable in self.variables.iteritems():\n yield para, start, variable",
"def get_attribs(self, attribs: List[str]) -> Iterable[Tuple[str, str]]:\n binfo = self.build_info\n for attrib in attribs:\n try:\n val = self._get_attrib(attrib, binfo)\n except Exception as e:\n logger.error(f'could not get attribute {attrib}: {e}')\n raise e\n if self.type_strict and not isinstance(val, str):\n raise ValueError(f'wrong value found for attribute: {attrib}')\n if val is not None:\n yield ((attrib, val))\n elif self.exist_strict:\n raise ValueError(f'no such attribute: {attrib}')",
"def _iter_items(data_sequence):\n for time, element in data_sequence:\n for item in element:\n yield time, item",
"def iter_components(self):\n for iv in range(len(self._var_names)):\n yield self._var_names[iv], self._vals[iv]",
"def __iter__(self) -> Tuple[str, Any]:\n for attr_name, attr_val in self.__dict__.items():\n yield attr_name, attr_val",
"def test_n_loc(self):\n dates = pd.date_range(start=\"2007-01-01\", end=\"2007-02-01\")\n\n ts = pd.DataFrame(\n {\n \"var1\": np.arange(len(dates)),\n \"var2\": np.arange(len(dates))\n },\n index=dates)\n\n dataset = GriddedNcContiguousRaggedTs(self.testdatapath,\n self.grid,\n mode=\"w\")\n fill_values = {\"var1\": 5, \"var2\": 5}\n\n for gpi in self.gpis:\n dataset.write(gpi, ts, fill_values=fill_values)",
"def getLinIterTimes( self, var, index = 0 ):\n\n values = self.getLinIterData( var, index )\n return values[1]",
"def create_regressor_attributes(df, attribute, list_of_prev_t_instants) :\n \n list_of_prev_t_instants.sort()\n start = list_of_prev_t_instants[-1] \n end = len(df)\n df['datetime'] = df.index\n df.reset_index(drop=True)\n\n df_copy = df[start:end]\n df_copy.reset_index(inplace=True, drop=True)\n\n for attribute in attribute :\n foobar = pd.DataFrame()\n\n for prev_t in list_of_prev_t_instants :\n new_col = pd.DataFrame(df[attribute].iloc[(start - prev_t) : (end - prev_t)])\n new_col.reset_index(drop=True, inplace=True)\n new_col.rename(columns={attribute : '{}_(t-{})'.format(attribute, prev_t)}, inplace=True)\n foobar = pd.concat([foobar, new_col], sort=False, axis=1)\n\n df_copy = pd.concat([df_copy, foobar], sort=False, axis=1)\n \n df_copy.set_index(['datetime'], drop=True, inplace=True)\n return df_copy",
"def all(self):\n datapoint_params = self._make_datapooint_param_iter()\n if datapoint_params is None:\n return iter([])\n params_list = list(datapoint_params) # construct param list\n return self._gen(params_list)",
"def get_itds_v2(timestamps, ears, types, max_itd=800e-6, save_to_file=None, verbose=False, return_attributes=False):\n ears = ears.astype(np.bool)\n itds_to_return, timestamps_to_return, ears_to_return, types_to_return = [], [], [], []\n\n timestamps_dict = {}\n timestamp_indices_dict = {}\n for ear in np.unique(ears):\n timestamps_dict[ear] = {}\n timestamp_indices_dict[ear] = {}\n for type_of_event in np.unique(types):\n timestamps_dict[ear][type_of_event] = []\n timestamp_indices_dict[ear][type_of_event] = []\n\n for idx, (timestamp, ear, type_of_event) in enumerate(zip(timestamps, ears, types)):\n timestamps_dict[ear][type_of_event].append(timestamp)\n timestamp_indices_dict[ear][type_of_event].append(idx)\n\n if verbose:\n print('Initialized the timestamp lists.')\n\n bar = progressbar.ProgressBar() if verbose else lambda x: x\n\n for type_of_event in bar(np.unique(types)):\n timestamps_left = np.array(timestamps_dict[True][type_of_event])\n timestamp_indices_left = timestamp_indices_dict[True][type_of_event]\n timestamps_right = np.array(timestamps_dict[False][type_of_event])\n timestamp_indices_right = timestamp_indices_dict[False][type_of_event]\n\n for ts_right, ts_idx_right in zip(timestamps_right, timestamp_indices_right):\n matched_indices = np.where((timestamps_left >= ts_right - max_itd) &\n (timestamps_left < ts_right + max_itd))[0]\n for matched_index in matched_indices:\n matched_itd = ts_right - timestamps_left[matched_index]\n itds_to_return.append(matched_itd)\n timestamps_to_return.append(ts_right)\n ears_to_return.append(False)\n types_to_return.append(type_of_event)\n\n for ts_left, ts_idx_left in zip(timestamps_left, timestamp_indices_left):\n matched_indices = np.where((timestamps_right >= ts_left - max_itd) &\n (timestamps_right < ts_left + max_itd))[0]\n for matched_index in matched_indices:\n matched_itd = timestamps_right[matched_index] - ts_left\n itds_to_return.append(matched_itd)\n timestamps_to_return.append(ts_left)\n ears_to_return.append(True)\n types_to_return.append(type_of_event)\n\n indices = np.argsort(timestamps_to_return)\n timestamps_to_return = np.array(timestamps_to_return, dtype=np.float32)[indices]\n itds_to_return = np.array(itds_to_return, dtype=np.float32)[indices]\n types_to_return = np.array(types_to_return, dtype=np.int16)[indices]\n ears_to_return = np.array(ears_to_return, dtype=np.int8)[indices]\n\n if save_to_file is not None:\n np.savez(save_to_file, timestamps=timestamps_to_return, ears=ears_to_return,\n types=types_to_return, itds=itds_to_return)\n\n if return_attributes:\n return itds_to_return, timestamps_to_return, ears_to_return, types_to_return\n\n return itds_to_return",
"def it(t, variant=0, min_q=3, max_q=sage.all.infinity, primes_only=False):\n for q in sage.all.primes(min_q, max_q) if primes_only else prime_powers(min_q, max_q):\n yield NormGraph(q, t, variant)",
"def _create_val_iterator(self, patch_locations):\n dataset_locations = patch_locations['valid_locations_val']\n # NOTE: Repeat dataset so that we can have 40k iterations.\n dataset_locations = dataset_locations.repeat(2, axis=0)\n\n dataset = tf.data.Dataset.from_tensor_slices(dataset_locations)\n dataset = dataset.map(self._parse_function)\n batched_dataset = dataset.batch(self._settings.batch_size)\n iterator = batched_dataset.make_one_shot_iterator()\n\n return iterator",
"def __iter__(self) -> Iterable[MemoryVariable]:\n return iter(self.variables)",
"def simulation_attributes_generator(sim_attr, attr_skel_list, default_attr):\n#TODO don't forget id_name\n#SOLVED\n none_attr_list = none_simulation_attributes(sim_attr)\n id_name = sim_attr.id_name\n if len(attr_skel_list) == 0:\n sim_attr.fill(default_attr)\n yield sim_attr\n set_simulation_attributes(sim_attr,none_attr_list,None)\n sim_attr.id_name = id_name\n raise StopIteration()\n for attr_skel in attr_skel_list:\n#? Ugly support for analyses\n sim_attr._analyses = attr_skel.analyses\n for skel_attr in attr_skel.generate_simulation_attributes():\n sim_attr.fill(skel_attr)\n#? code repetition\n sim_attr.fill(default_attr)\n yield sim_attr\n set_simulation_attributes(sim_attr,none_attr_list,None)\n sim_attr.id_name = id_name"
] | [
"0.50482535",
"0.4856219",
"0.4846035",
"0.48322827",
"0.47165158",
"0.47063917",
"0.470118",
"0.46948507",
"0.46752623",
"0.466266",
"0.46456906",
"0.4583182",
"0.4576654",
"0.45431003",
"0.4538435",
"0.44855666",
"0.44833434",
"0.4461226",
"0.4460401",
"0.4453483",
"0.44275883",
"0.4398761",
"0.4395316",
"0.43835968",
"0.4369583",
"0.43563935",
"0.43512827",
"0.43448967",
"0.4344356",
"0.4334785"
] | 0.5498514 | 0 |
Pandas 0.21.0 changes sum() behavior so that the result of applying sum over an empty DataFrame is NaN. Meant to be set as pd.Series.zsum = zsum. | def zsum(s, *args, **kwargs):
return 0 if s.empty else s.sum(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum(self):\n # skipna == True\n # only_numerical == True\n # skipna == True\n return self._lift(\"sum\")",
"def row_sums(\n x: DataFrame,\n na_rm: bool = False,\n # dims: int = 1,\n # weights = None,\n # freq = None,\n # n = None\n) -> Iterable[NumericType]:\n return x.agg(sum, axis=1, na_rm=na_rm)",
"def col_sums(\n x: DataFrame,\n na_rm: bool = False,\n # dims: int = 1,\n # weights = None,\n # freq = None,\n # n = None\n) -> Iterable[NumericType]:\n return x.agg(sum, na_rm=na_rm)",
"def _get_sum(self):\r\n try:\r\n return self._sum\r\n except AttributeError:\r\n self._sum = self.no_nan.sum()\r\n # The following 2 lines are needede as in Python 3.3 with NumPy\r\n # 1.7.1, numpy.ndarray and numpy.memmap aren't hashable.\r\n if type(self._sum) is numpy.memmap:\r\n self._sum = numpy.asarray(self._sum).item()\r\n if self.has_nan and self.no_nan.mask.all():\r\n # In this case the sum is not properly computed by numpy.\r\n self._sum = 0\r\n if numpy.isinf(self._sum) or numpy.isnan(self._sum):\r\n # NaN may happen when there are both -inf and +inf values.\r\n if self.has_nan:\r\n # Filter both NaN and Inf values.\r\n mask = self.no_nan.mask + numpy.isinf(self[1])\r\n else:\r\n # Filter only Inf values.\r\n mask = numpy.isinf(self[1])\r\n if mask.all():\r\n self._sum = 0\r\n else:\r\n self._sum = numpy.ma.masked_array(self[1], mask).sum()\r\n # At this point there should be no more NaN.\r\n assert not numpy.isnan(self._sum)\r\n return self._sum",
"def matrix_sum(data, axis=None, ignore_nan=False):\n sum_fn = _nansum if ignore_nan else np.sum\n if axis not in [0, 1, None]:\n raise ValueError(\"Expected axis in [0, 1, None]. Got {}\".format(axis))\n if isinstance(data, pd.DataFrame):\n if is_SparseDataFrame(data):\n if axis is None:\n sums = sum_fn(data.to_coo())\n else:\n index = data.index if axis == 1 else data.columns\n sums = pd.Series(\n np.array(sum_fn(data.to_coo(), axis)).flatten(), index=index\n )\n elif is_sparse_dataframe(data):\n if axis is None:\n sums = sum_fn(data.sparse.to_coo())\n else:\n index = data.index if axis == 1 else data.columns\n sums = pd.Series(\n np.array(sum_fn(data.sparse.to_coo(), axis)).flatten(), index=index\n )\n elif axis is None:\n sums = sum_fn(data.to_numpy())\n else:\n sums = sum_fn(data, axis)\n else:\n sums = sum_fn(data, axis=axis)\n if isinstance(sums, np.matrix):\n sums = np.array(sums).flatten()\n return sums",
"def sum(self) -> \"Stream[float]\":\n return self.agg(np.sum).astype(\"float\")",
"def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:\n return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)",
"def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)",
"def quadsum(data):\n return np.nansum(data, 0)",
"def sum(self) -> FrameLike:\n return super().sum()",
"def sum(self) -> FrameLike:\n return super().sum()",
"def sum(self) -> FrameLike:\n return super().sum()",
"def sum(self) -> FrameLike:\n return super().sum()",
"def test_nan_keyword(self):\n # If array has any nan's then the output will return all zeros\n array = self.array1.copy()\n array[0,0] = numpy.nan\n byt = bytscl(array, nan=True)\n total = numpy.sum(byt)\n self.assertTrue(total != 0)",
"def test_csum_ignore_nans():\n source = [np.zeros((16,), dtype=float) for _ in range(10)]\n source.append(np.full((16,), fill_value=np.nan))\n summed = csum(source, ignore_nan=True)\n assert np.allclose(summed, np.zeros_like(summed))",
"def sum(self):\n return self.aggregate(np.sum)",
"def test_local_sum_sum_dtype(self):\r\n x = tensor.tensor3(dtype='int8')\r\n y = x.sum(axis=0, dtype='int32').sum(axis=1, dtype='int64')\r\n backup = config.on_opt_error\r\n config.on_opt_error = 'raise'\r\n try:\r\n # This compilation would fail prior to fix.\r\n f = theano.function([x], y)\r\n finally:\r\n config.on_opt_error = backup",
"def sum(self) -> float:\n return sum(self.values)",
"def rowsums (self):\n return self.values.sum (axis=0)",
"def colsums (self):\n return self.values.sum (axis=1)",
"def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())",
"def sum(self):\n return sum(self.values)",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"sum\",\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum (self):\n return self.values.sum ()",
"def sum (self):\n return self.values.sum ()",
"def sum(self, values: pdarray, skipna: bool = True) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"sum\", skipna)\n return k, cast(pdarray, v)",
"def fsum(iterable):\n return 0.0",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n keep_attrs=keep_attrs,\n **kwargs,\n )",
"def sum(\n self,\n dim: Dims = None,\n *,\n skipna: bool | None = None,\n min_count: int | None = None,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.sum,\n dim=dim,\n skipna=skipna,\n min_count=min_count,\n numeric_only=True,\n keep_attrs=keep_attrs,\n **kwargs,\n )"
] | [
"0.6400161",
"0.6269071",
"0.622541",
"0.5976757",
"0.5800932",
"0.57864934",
"0.57507914",
"0.57166976",
"0.5702603",
"0.5651831",
"0.5651831",
"0.5651831",
"0.5651831",
"0.56051767",
"0.5582573",
"0.55817443",
"0.55799365",
"0.54326",
"0.54056394",
"0.5402983",
"0.5398725",
"0.5383667",
"0.53635687",
"0.53635687",
"0.5333476",
"0.5333476",
"0.5318804",
"0.53173244",
"0.5304497",
"0.52776724"
] | 0.70001817 | 0 |
Helper function to quickly expand a series to a dataframe with according column axis and every single column being the equal to the given series. | def expand_series(ser, columns):
return ser.to_frame(columns[0]).reindex(columns=columns).ffill(axis=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def series_from_dataframe(df, index_column: str, value_column: str=None):\n\n if len(df.columns) > 2:\n df = df[[index_column, value_column]].copy()\n else:\n df = df.copy()\n df.set_index(index_column, inplace=True)\n sr = df.squeeze()\n sr.name = value_column\n return sr",
"def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame:\n return pd.concat(serieses, axis=\"columns\").T",
"def expand_df(df, column):\n expanded2 = pd.DataFrame({\n col: np.repeat(df[col].values, df[column].str.len())\n for col in df.columns.drop(column)}\n ).assign(**{column: list(np.concatenate(df[column].values))})\n return expanded2",
"def sample_series(self, series, append_frame=None):\n\n columns, values = self.get_readings(series)\n\n dataframe = DataFrame(values, columns=columns)\n dataframe = self.format_index(dataframe, self.ENERGY_DB_INDEX)\n\n # https://pandas.pydata.org/pandas-docs/stable/merging.html\n if append_frame is not None:\n # dataframe = pandas.concat([dataframe, input_frame], axis=1, join='inner', join_axes=[input_frame.index])\n dataframe = pandas.merge(append_frame, dataframe, on=['time', 'time'])\n # print(dataframe)\n\n return dataframe",
"def SweepFrame(*args, **kwargs):\n underride(kwargs, dtype=float)\n return pd.DataFrame(*args, **kwargs)",
"def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0]}\n\n df = pd.DataFrame(d)\n print df\n df = pd.DataFrame(d, index=['a', 'b', 'c', 'd'])\n print df\n\n\n data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]\n df = pd.DataFrame(data2)\n\n print df",
"def make_series(x, y, **options):\n underride(options, name='values')\n if isinstance(y, pd.Series):\n y = y.values\n series = pd.Series(y, index=x, **options)\n series.index.name = 'index'\n return series",
"def binarize(series):\n name = series.name\n df = pd.DataFrame()\n for category in series.value_counts().index:\n df[category] = (series == category)\n return df",
"def _timeseries_to_dataframe_value(timeseries, name):\n # Column headers\n columns = [\n [name],\n [timeseries.instance_or_contract_dataframe_column_header()],\n ['']\n ]\n # Convert a time series of (date, value)\n df = pd.DataFrame.from_records(\n ((v.value,) for v in timeseries),\n columns=columns,\n index=[v.date for v in timeseries],\n )\n df.index.name = 'date'\n return df",
"def _convert_df_to_series(df):\n if isinstance(df, pd.DataFrame) and df.shape[1] == 1:\n return df.iloc[:, 0]\n elif isinstance(df, pd.DataFrame) and df.shape[1] > 1:\n raise TypeError('DataFrame cannot be converted to a Series as it contains more than 1 column.')\n return df",
"def better_add_column(values, df=None):\n # Update the function to create a default DataFrame\n if df is None:\n df = pandas.DataFrame()\n df['col_{}'.format(len(df.columns))] = values\n return df",
"def index_reformat(series: pd.Series, preserve_order: bool) -> pd.DataFrame:\n series = series.copy()\n series = rewrite_index(series)\n series.index = remove_constant_levels(series.index)\n series.index.names = [LEVEL_NAMES.get(name, name) for name in series.index.names]\n series = series.rename(index=pretty_rewrite)\n\n # Preserve order of inputs\n df = series.unstack(\"Target\")\n if preserve_order:\n df = df.reindex(columns=series.index.get_level_values(\"Target\").unique())\n for level in series.index.names:\n kwargs = {}\n if isinstance(df.index, pd.MultiIndex):\n kwargs = dict(level=level)\n if level != \"Target\":\n df = df.reindex(index=series.index.get_level_values(level).unique(), **kwargs)\n else:\n df = df.sort_index()\n return df",
"def CombineSeries(*args):\r\n df = pd.concat([*args], axis=1)\r\n\r\n return df",
"def to_scalar_df(df: pd.DataFrame) -> pd.DataFrame:\n scalar_df = df\n column_ordering = []\n for c, s in df.items():\n if s.dtype == \"object\":\n s_list = s.to_list()\n try:\n ncols = s_list[0].shape[0]\n split_cols = [f\"{c}_{k}\" for k in range(ncols)]\n sdf = pd.DataFrame(s_list, columns=split_cols)\n scalar_df = pd.concat([scalar_df, sdf], axis=1)\n column_ordering += split_cols\n except AttributeError as e:\n raise ValueError(f\"Expected series of lists, but found {s_list[0]}\") from e\n else:\n column_ordering.append(c)\n return scalar_df[column_ordering]",
"def allocate_series_dataframes(network, series):\n for component, attributes in series.items():\n df = network.df(component)\n pnl = network.pnl(component)\n\n for attr in attributes:\n pnl[attr] = pnl[attr].reindex(\n columns=df.index,\n fill_value=network.components[component][\"attrs\"].at[attr, \"default\"],\n )",
"def to_series(self) -> pd.Series:\n df = self.to_dataframe(\"* values *\")\n dims = self.dims_list\n if len(dims) == 1:\n dims = dims[0]\n return df.set_index(dims)[\"* values *\"]",
"def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n from pyspark.pandas import DataFrame\n\n groupby = self._groupby\n psdf = groupby._psdf\n\n # Here we need to include grouped key as an index, and shift previous index.\n # [index_column0, index_column1] -> [grouped key, index_column0, index_column1]\n new_index_scols: List[Column] = []\n new_index_spark_column_names = []\n new_index_names = []\n new_index_fields = []\n for groupkey in groupby._groupkeys:\n index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))\n new_index_scols.append(groupkey.spark.column.alias(index_column_name))\n new_index_spark_column_names.append(index_column_name)\n new_index_names.append(groupkey._column_label)\n new_index_fields.append(groupkey._internal.data_fields[0].copy(name=index_column_name))\n\n for new_index_scol, index_name, index_field in zip(\n psdf._internal.index_spark_columns,\n psdf._internal.index_names,\n psdf._internal.index_fields,\n ):\n index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))\n new_index_scols.append(new_index_scol.alias(index_column_name))\n new_index_spark_column_names.append(index_column_name)\n new_index_names.append(index_name)\n new_index_fields.append(index_field.copy(name=index_column_name))\n\n if groupby._agg_columns_selected:\n agg_columns = groupby._agg_columns\n else:\n # pandas doesn't keep the groupkey as a column from 1.3 for DataFrameGroupBy\n column_labels_to_exclude = groupby._column_labels_to_exclude.copy()\n if isinstance(groupby, DataFrameGroupBy):\n for groupkey in groupby._groupkeys: # type: ignore[attr-defined]\n column_labels_to_exclude.add(groupkey._internal.column_labels[0])\n agg_columns = [\n psdf._psser_for(label)\n for label in psdf._internal.column_labels\n if label not in column_labels_to_exclude\n ]\n\n applied = []\n for agg_column in agg_columns:\n applied.append(agg_column._with_new_scol(func(agg_column.spark.column))) # TODO: dtype?\n\n # Seems like pandas filters out when grouped key is NA.\n cond = groupby._groupkeys[0].spark.column.isNotNull()\n for c in groupby._groupkeys[1:]:\n cond = cond | c.spark.column.isNotNull()\n\n sdf = psdf._internal.spark_frame.filter(cond).select(\n new_index_scols + [c.spark.column for c in applied]\n )\n\n internal = psdf._internal.copy(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in new_index_spark_column_names],\n index_names=new_index_names,\n index_fields=new_index_fields,\n column_labels=[c._column_label for c in applied],\n data_spark_columns=[\n scol_for(sdf, c._internal.data_spark_column_names[0]) for c in applied\n ],\n data_fields=[c._internal.data_fields[0] for c in applied],\n )\n\n return groupby._handle_output(DataFrame(internal))",
"def _wrap_in_pandas_container(\n data_to_wrap,\n *,\n columns,\n index=None,\n):\n if issparse(data_to_wrap):\n raise ValueError(\"Pandas output does not support sparse data.\")\n\n if callable(columns):\n try:\n columns = columns()\n except Exception:\n columns = None\n\n pd = check_pandas_support(\"Setting output container to 'pandas'\")\n\n if isinstance(data_to_wrap, pd.DataFrame):\n if columns is not None:\n data_to_wrap.columns = columns\n return data_to_wrap\n\n return pd.DataFrame(data_to_wrap, index=index, columns=columns, copy=False)",
"def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n pass",
"def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:\n pass",
"def add_column(values, df=None):\n if df is None:\n df=pandas.DataFrame()\n df['col_{}'.format(len(df.columns))] = values\n return df",
"def to_dataframe(self, value_column=\"* values *\") -> pd.DataFrame:\n index, vals = self.vector.to_values()\n df = self.schema.decode_many(index, self.dims_list)\n df[value_column] = vals\n return df",
"def add_column(values, df=pandas.DataFrame()):\n df['col_{}'.format(len(df.columns))] = values\n return df",
"def create_pandas_dataframe(data, index, columns):\n return pd.DataFrame(data, index, columns)",
"def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:",
"def frame(something, name = None):\n \n if isinstance(something, dict):\n res = pd.DataFrame.from_dict(something, orient='index')\n else:\n res = pd.DataFrame(something)\n number_of_columns = len(res.columns)\n if name != None:\n if isinstance(name, list):\n if len(name) >= number_of_columns:\n res.columns = name[:number_of_columns]\n else:\n res.columns = name + list(range(len(name), number_of_columns))\n else:\n res.columns = [name] + list(range(1, number_of_columns))\n return res",
"def from_series(cls, s: pd.Series, schema: Schema) -> \"Flat\":\n if isinstance(s.index, pd.MultiIndex):\n dims = s.index.names\n else:\n if not s.index.name:\n err_msg = (\n \"Series index does not have a name. Unable to infer dimension.\\n\"\n \"When creating the series, ensure the index has a name:\\n\"\n \"s = pd.Series([1, 2, 3], index=pd.Index(['S', 'M', 'L'], name='size'))\"\n )\n raise TypeError(err_msg)\n dims = [s.index.name]\n df = s.to_frame(\"* value *\").reset_index()\n return cls.from_dataframe(df, schema, dims, \"* value *\")",
"def create_dataframe(self, dictionary_to_convert, cols):\n\n dataframe_converted = pd.DataFrame.from_dict(dictionary_to_convert, orient='index', columns = cols)\n dataframe_converted = dataframe_converted.reset_index()\n dataframe_converted = dataframe_converted.drop(columns=['index'])\n\n return dataframe_converted",
"def coords_to_df(coords, columns=None):\n nb_dim = coords.shape[1]\n if columns is None:\n if nb_dim == 2:\n columns = ['x', 'y']\n elif nb_dim == 3:\n columns = ['x', 'y', 'z']\n else:\n columns = ['x'+str(i) for i in range(nb_dim)]\n \n nodes = pd.DataFrame(data=coords, columns=columns)\n return nodes",
"def _timeseries_to_dataframe_mean_and_scenarios(timeseries, name):\n width = timeseries.total_values_per_item()\n # Column headers\n columns = [\n [name] * width,\n [timeseries.instance_or_contract_dataframe_column_header()] * width,\n [''] + timeseries.scenario_names\n ]\n # Convert a time series of (date, scenarios[])\n df = pd.DataFrame.from_records(\n ((v.value, *v.scenarios) for v in timeseries.data),\n columns=columns,\n index=[v.date for v in timeseries],\n )\n df.index.name = 'date'\n return df"
] | [
"0.64476883",
"0.64365065",
"0.6100705",
"0.6061576",
"0.5981239",
"0.5914129",
"0.58018684",
"0.5774947",
"0.57282746",
"0.5726346",
"0.5709844",
"0.5703783",
"0.5690615",
"0.56714773",
"0.5614371",
"0.55539507",
"0.55334795",
"0.5530608",
"0.5498409",
"0.5498409",
"0.5496547",
"0.54573244",
"0.5442797",
"0.54358816",
"0.5432699",
"0.54265887",
"0.5395007",
"0.5380347",
"0.5380338",
"0.53780454"
] | 0.737047 | 0 |
Getter function. Get the index of extendable elements of a given component. | def get_extendable_i(n, c):
idx = n.df(c)[lambda ds: ds[nominal_attrs[c] + "_extendable"]].index
return idx.rename(f"{c}-ext") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def element_index(self):\n return self._index",
"def InterfaceIndex(self) -> int:",
"def InterfaceIndex(self) -> int:",
"def get_list_index(self):\r\n return self.n",
"def get_index(self):\n return self.disk.partitions.index(self)",
"def get_index(self):\r\n i = 0\r\n for container in self.settings[\"containers\"]:\r\n if container[\"name\"] == self.container[\"name\"]:\r\n return i\r\n i += 1",
"def getIndex(condition='', component=''):\n if component == 'IC2' or component == 'IC14':\n index = '.nii[0]'\n elif component == 'IC7' or component == 'IC29':\n index = '.nii[1]'\n elif component == 'IC25':\n index = '.nii[2]'\n elif component == 'IC31':\n index = '.nii[3]'\n elif component == 'IC39':\n index = '.nii[4]'\n else:\n index = '.nii'\n\n return index",
"def isect_index(self):\n return self._lazy_isect_index()",
"def index(self) -> int:",
"def index_in_tag(self):\n if hasattr(self, '_m_index_in_tag'):\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None\n\n self._m_index_in_tag = (self.tag - 35)\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None",
"def components(self) -> int:\n return self._components",
"def enumerate(self):\r\n return enumerate(self, 1)",
"def getSectionIndex(self) -> int:\n ...",
"def get_instance_index(self):\n return np.unique([tp[0] for tp in self._innercontainer])",
"def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes",
"def get_multi_index(self):\n return self.basis.elements",
"def get_non_extendable_i(n, c):\n idx = n.df(c)[lambda ds: ~ds[nominal_attrs[c] + \"_extendable\"]].index\n return idx.rename(f\"{c}-fix\")",
"def index(self, elem):\n return _coconut.len(self.iter) - self.iter.index(elem) - 1",
"def component(self, index):\n return self.components[index]",
"def GetIndexOfItem(self, item):\n \n parent = self._window.GetItemParent(item)\n if parent:\n parentIndices = self.GetIndexOfItem(parent)\n ownIndex = self.GetItemChildren(parent).index(item)\n return parentIndices + (ownIndex,)\n else:\n return ()",
"def __getitem__(self, index):\n return self.components[index]",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def getIndex(self, *args):\n return _libsbml.XMLAttributes_getIndex(self, *args)",
"def _index(self,key):\n index=0\n for item in self._item:\n if item.key==key:\n return index\n index+=1\n return -1",
"def idx(self):\n return self._idx",
"def get_component():\n\t\tcomponentConsumed = consumed\n\t\tfor i in range(len(components)):\n\t\t\tif componentConsumed < len(components[-i]):\n\t\t\t\treturn components[-i][:-componentConsumed if componentConsumed else None]\n\t\t\telse:\n\t\t\t\tcomponentConsumed -= len(components[-i])\n\t\treturn []",
"def get_list_index(self):\r\n return self._api.get_list_index()",
"def index(self, x) -> int:\n pass",
"def get_index_from_well(self, well):\n pass",
"def get_element_index(el, elements):\n for idx, element in enumerate(elements):\n diff = torch.sum(torch.abs(el - element))\n if diff.item() < 1e-8:\n return idx\n return None"
] | [
"0.66678977",
"0.6171629",
"0.6171629",
"0.5992239",
"0.59366125",
"0.5837213",
"0.5818474",
"0.57752585",
"0.5760597",
"0.5744115",
"0.57399565",
"0.5736708",
"0.5734644",
"0.57214147",
"0.57179445",
"0.56879526",
"0.5640134",
"0.5638468",
"0.5628843",
"0.56254864",
"0.56226295",
"0.56162435",
"0.5603926",
"0.5601009",
"0.5598603",
"0.5596347",
"0.5589648",
"0.5576282",
"0.5566776",
"0.5545547"
] | 0.6495712 | 1 |
Getter function. Get the index of nonextendable elements of a given component. | def get_non_extendable_i(n, c):
idx = n.df(c)[lambda ds: ~ds[nominal_attrs[c] + "_extendable"]].index
return idx.rename(f"{c}-fix") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def element_index(self):\n return self._index",
"def get_instance_index(self):\n return np.unique([tp[0] for tp in self._innercontainer])",
"def get_list_index(self):\r\n return self.n",
"def index(self, elem):\n return _coconut.len(self.iter) - self.iter.index(elem) - 1",
"def get_component():\n\t\tcomponentConsumed = consumed\n\t\tfor i in range(len(components)):\n\t\t\tif componentConsumed < len(components[-i]):\n\t\t\t\treturn components[-i][:-componentConsumed if componentConsumed else None]\n\t\t\telse:\n\t\t\t\tcomponentConsumed -= len(components[-i])\n\t\treturn []",
"def InterfaceIndex(self) -> int:",
"def InterfaceIndex(self) -> int:",
"def enumerate(self):\r\n return enumerate(self, 1)",
"def innulo(self):\n for i in range(self.n):\n if not comozero(self[i]):\n return i\n return None",
"def get_unprescribed_indexes(self):\n total_dof = DOF_PER_NODE_STRUCTURAL * len(self.preprocessor.nodes)\n all_indexes = np.arange(total_dof)\n return np.delete(all_indexes, self.prescribed_indexes)",
"def isect_index(self):\n return self._lazy_isect_index()",
"def index_in_tag(self):\n if hasattr(self, '_m_index_in_tag'):\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None\n\n self._m_index_in_tag = (self.tag - 35)\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None",
"def component(self, index):\n return self.components[index]",
"def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0",
"def getNodalIndex(self, iVertex):\n node = self.gr.getMeshPoint(iVertex)\n no = node.getNo()\n \n return no",
"def GetIndexOfItem(self, item):\n \n parent = self._window.GetItemParent(item)\n if parent:\n parentIndices = self.GetIndexOfItem(parent)\n ownIndex = self.GetItemChildren(parent).index(item)\n return parentIndices + (ownIndex,)\n else:\n return ()",
"def idx(self):\n return self._idx",
"def get_unlabeled_idx(X_train, labeled_idx):\n return np.arange(X_train.shape[0])[np.logical_not(np.in1d(np.arange(X_train.shape[0]), labeled_idx))]",
"def __getitem__(self, index):\n return self.components[index]",
"def indices(self):\n return range(len(self))",
"def _index(self) -> int:\n return -1",
"def get_index(self):\n return self.disk.partitions.index(self)",
"def getIndex(condition='', component=''):\n if component == 'IC2' or component == 'IC14':\n index = '.nii[0]'\n elif component == 'IC7' or component == 'IC29':\n index = '.nii[1]'\n elif component == 'IC25':\n index = '.nii[2]'\n elif component == 'IC31':\n index = '.nii[3]'\n elif component == 'IC39':\n index = '.nii[4]'\n else:\n index = '.nii'\n\n return index",
"def index(self) -> int:",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def listNonDegenerate(self):\n return arange(self.nelems())[~self.testDegenerate()]",
"def get_index_from_well(self, well):\n pass",
"def slot_for_container(self, table, index):\n\n i = 0\n for t in self.metalist:\n l = len(t)\n if t is table:\n if l == 0 or l <= index:\n return -1\n else:\n i += index\n return i\n else:\n i += l\n return -1",
"def index(self, item):\n\t\ti = 0\t\t\n\t\tif not self.len:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\treturn i\n\t\tactual = self.prim\n\t\twhile actual and actual.dato != item:\n\t\t\tactual = actual.prox\n\t\t\ti += 1\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\treturn i",
"def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices"
] | [
"0.6539576",
"0.61909676",
"0.6013466",
"0.5934518",
"0.58452624",
"0.58125824",
"0.58125824",
"0.5780629",
"0.5724768",
"0.5723052",
"0.57140875",
"0.56976855",
"0.5628068",
"0.56127214",
"0.5611799",
"0.5598618",
"0.55968726",
"0.55942404",
"0.5589706",
"0.55723184",
"0.5564802",
"0.5564367",
"0.555377",
"0.5548023",
"0.55424976",
"0.5534537",
"0.55308545",
"0.55276823",
"0.5516302",
"0.55147535"
] | 0.6343805 | 1 |
Getter function. Get True values for elements of component c which are active at a given investment period. These are calculated from lifetime and the build year. | def get_active_assets(n, c, investment_period):
periods = np.atleast_1d(investment_period)
active = {}
for period in periods:
if period not in n.investment_periods:
raise ValueError("Investment period not in `network.investment_periods`")
active[period] = n.df(c).eval("build_year <= @period < build_year + lifetime")
return pd.DataFrame(active).any(axis=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def periodCheck(data):",
"def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]",
"def operable_ages(self, acode, period):\n if acode not in self.oper_expr: # action not defined for this development type\n return None\n if acode not in self.operability: # action not compiled yet...\n if self.compile_action(acode) == -1: return None # never operable\n #print ' '.join(self.key), acode, period, self.operability, self.oper_expr\n if period not in self.operability[acode]:\n return None\n else:\n lo, hi = self.operability[acode][period]\n return list(set(range(lo, hi+1)).intersection(list(self._areas[period].keys())))",
"def active (self, after = None, before = None):\n\n active = ActivityList()\n active.list = [actor for actor in self.list\n if (after == None or\n actor[\"period\"].end >= after) and\n (before == None or\n actor[\"period\"].start <= before)]\n return active",
"def get_active(self):\n return self.get_challenges().filter(status='A')",
"def is_always_active(self) -> bool:\n if len(self.active_periods) == 0:\n return True\n\n if len(self.active_periods) == 1:\n period = self.active_periods[0]\n if period.lower == 0 and period.upper == 24000:\n return True\n\n return False",
"def getIntegralConditions(self):\n return self._getConditions(restrict=['CI'])",
"def get_applicable_components(device, components, component_bitmap_bit_length):\n applicable_components_list = device[\"ApplicableComponents\"]\n applicable_components = bitarray(\n component_bitmap_bit_length, endian=\"little\"\n )\n applicable_components.setall(0)\n for component_index in applicable_components_list:\n if 0 <= component_index < len(components):\n applicable_components[component_index] = 1\n else:\n sys.exit(\"ERROR: Applicable Component index not found.\")\n return applicable_components",
"def get_state_in_period(course_key, from_date, to_date):\n enrollment_stat = (\n EnrollmentTabCache.objects\n .filter(course_id=course_key, created__range=(from_date, to_date))\n .values('unenroll', 'enroll', 'total', 'created')\n .order_by('created')\n )\n return enrollment_stat",
"def Complete(self):\n return self.century is not None and self.day is not None",
"def get_city_artif_per_year(self):\n qs = CommuneDiff.objects.filter(city__in=self.cities.all()).filter(\n year_old__gte=self.analyse_start_date, year_new__lte=self.analyse_end_date\n )\n results = collections.defaultdict(dict)\n for commune in qs:\n results[commune.city.name][commune.period] = commune.net_artif\n return results",
"def active_budgeted_expenses_by_period(ledger_id: int, year: int, period: int):\n active_budget = ActiveBudgetModel.find_by_year(ledger_id, year)\n if not active_budget:\n raise NotFound(\"No active budget found\")\n return budgeted_expenses_by_period(active_budget.budget_id, period)",
"def compute_competition_open(df):\n # competition open?\n missing_mask = (\n df[\"competition_open_since_year\"].isna()\n | df[\"competition_open_since_month\"].isna()\n )\n competition_open = (\n pd.to_datetime(\n df[\"competition_open_since_year\"].astype(\"Int64\").astype(\"str\")\n + \"-\"\n + df[\"competition_open_since_month\"]\n .astype(\"Int64\")\n .astype(str)\n .str.zfill(2)\n + \"-01\",\n errors=\"coerce\",\n )\n .le(df[\"date\"])\n .astype(int)\n )\n return competition_open.where(~missing_mask).astype(\"category\")",
"def spans_year(self, key):\n startDate = string_date(self.periods[key]['dates'][0], self.year)\n endDate = string_date(self.periods[key]['dates'][1], self.year)\n startDay = pd.date_range(startDate, periods=1).dayofyear\n endDay = pd.date_range(endDate, periods=1).dayofyear\n return(endDay[0] - startDay[0] < 0)",
"def _get_conditions(self):\n return self.__conditions",
"def _get_conditions(self):\n return self.__conditions",
"def _get_conditions(self):\n return self.__conditions",
"def test_get_active_cco(session):\n business = factory_business('BC1234567')\n filing_dict = copy.deepcopy(FILING_HEADER)\n filing_dict['filing']['consentContinuationOut'] = copy.deepcopy(CONSENT_CONTINUATION_OUT)\n filing = factory_completed_filing(business, filing_dict)\n\n expiry_date = get_cco_expiry_date(filing.effective_date)\n\n consent_continuation_out = ConsentContinuationOut()\n consent_continuation_out.foreign_jurisdiction = 'CA'\n consent_continuation_out.foreign_jurisdiction_region = 'AB'\n consent_continuation_out.expiry_date = expiry_date\n consent_continuation_out.business_id = business.id\n consent_continuation_out.filing_id = filing.id\n consent_continuation_out.save()\n\n cco = consent_continuation_out.get_active_cco(business.id, filing.effective_date)\n assert cco\n cco = consent_continuation_out.get_active_cco(business.id, expiry_date)\n assert cco\n cco = consent_continuation_out.get_active_cco(business.id, expiry_date, 'CA', 'AB')\n assert cco\n\n cco = consent_continuation_out.get_active_cco(business.id, expiry_date + datedelta.datedelta(days=1))\n assert not cco",
"def test_get_component_ON(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/foo': 'bar',\n })\n c = self.u.get_component('a')\n self.assertEqual({'name': 'a', 'activated': True, 'foo': 'bar'}, c)",
"def getSectorialConditions(self):\n return self._getConditions(restrict=['CS'])",
"def get_obs(self, state: EnvState) -> chex.Array:\n return jnp.array([state.weekday, *state.stock])",
"def get_available_companies(team):",
"def eligible(CGPA:float, Year:int, program:str) -> bool:\n return CGPA >= 2 and Year == (2 or 3) and program == \"CS\"",
"def get_service_values(self):\n \t\t current_vals = self.d.get_values_for_service(['onIntervalAlways'],self._service_name)",
"def get_values(self,\n env,\n only_state=False\n ):\n if only_state:\n con_values = np.concatenate([con.get_value(env) for con in self.state_constraints])\n else:\n con_values = np.concatenate([con.get_value(env) for con in self.constraints])\n return con_values",
"def active_temporal_constraints(self,assignments):\n return [t for t in self.temporal_constraints if t.is_active(assignments)]",
"def check(self):\n # Determine which services to test\n # TODO: use a smarter algorithm to detect which services to check\n max_lag = max(service.lag for service in self.services)\n now = datetime.utcnow()\n services = [ service\n for service in self.services\n if service.next_update_in(now) <= max_lag\n ]\n if not services:\n return 0, []\n\n period = max(service.period for service in services)\n\n # Test them\n service_states = self._check_services(services)\n\n # Report\n return int(period), service_states",
"def any_open_valve():\n for dev in station_valve_dict:\n if station_valve_dict[dev]:\n return True\n return False",
"def milestones_active(self, within_component=None):\n if within_component is not None:\n if isinstance(within_component, str):\n within_component = self.components(\"identifier = %s\" % within_component)[0]\n predicate = \"\"\"\n (StartDate == nil || StartDate < NOW()) \n AND \n (EndDate == nil || EndDate > NOW()) \n AND\n (component.identifier == nil OR %s BEGINSWITH component.fullName)\n \"\"\"\n return self.milestones(predicate % (_obj_id(within_component), within_component[\"fullName\"]))\n else:\n predicate = \"\"\"\n (StartDate == nil || StartDate < NOW()) \n AND \n (EndDate == nil || EndDate > NOW())\n \"\"\"\n return self.milestones(predicate)",
"def xbrl_years(self):\n return [year for year in self.years if year >= 2021]"
] | [
"0.5174939",
"0.4845245",
"0.4821024",
"0.47831455",
"0.4737875",
"0.47003165",
"0.46910465",
"0.469043",
"0.46834463",
"0.46755636",
"0.4660154",
"0.4653018",
"0.462037",
"0.46168467",
"0.4614364",
"0.4614364",
"0.4614364",
"0.45948458",
"0.45939475",
"0.45936975",
"0.45795774",
"0.45559177",
"0.4528401",
"0.4523222",
"0.45208547",
"0.45186",
"0.45098868",
"0.4508137",
"0.4505191",
"0.4496334"
] | 0.7044306 | 0 |
Getter function. Get a boolean array with True values for elements of component c which are active at a specific snapshot. If the network is in multi_investment_period mode (given by n._multi_invest), these are calculated from lifetime and the build year. Otherwise all values are set to True. | def get_activity_mask(n, c, sns=None, index=None):
if sns is None:
sns = n.snapshots
if getattr(n, "_multi_invest", False):
_ = {period: get_active_assets(n, c, period) for period in n.investment_periods}
res = pd.concat(_, axis=1).T.reindex(n.snapshots, level=0).loc[sns]
else:
res = pd.DataFrame(True, sns, n.df(c).index)
if index is not None:
res = res.reindex(columns=index)
res.index.name = "snapshot"
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_active_assets(n, c, investment_period):\n periods = np.atleast_1d(investment_period)\n active = {}\n for period in periods:\n if period not in n.investment_periods:\n raise ValueError(\"Investment period not in `network.investment_periods`\")\n active[period] = n.df(c).eval(\"build_year <= @period < build_year + lifetime\")\n return pd.DataFrame(active).any(axis=1)",
"def all(self):\n for v in self.sects.values():\n if not np.all(v):\n return False\n if self.is_full():\n return True\n else:\n return np.all(self.defval)",
"def is_asset_based_activity(self):\n return bool(self._my_map['assetIds'])",
"def any(self):\n for v in self.sects.values():\n if np.any(v):\n return True\n if self.is_full():\n return False\n else:\n return np.any(self.defval)",
"def get_observation_verbose(self):\n state = {}\n for grid_id, grid in self.grids.items():\n o = grid.get_active_orders(self.city_time)\n d = list(grid.get_idle_drivers().values())\n state[grid_id] = [o,d]\n return state",
"def __bool__(self):\n return self.fam.c_nonzero(self)",
"def test03(self):\n a = np.arange(1, 11) > 5\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()\n cwt = [i for i in b.wheretrue()]\n # print \"numpy ->\", a.nonzero()[0].tolist()\n # print \"where ->\", [i for i in b.wheretrue()]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")",
"def getObservation(self):\n res = zeros(4)\n all = self.env.getSensors()\n res[0] = all[3]\n res[1] = all[1]\n res[2] = all[3] and all[1]\n res[3] = not all[3] and not all[1]\n return res",
"def test01(self):\n a = np.arange(1, 11) < 0\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()\n cwt = [i for i in b.wheretrue()]\n # print \"numpy ->\", a.nonzero()[0].tolist()\n # print \"where ->\", [i for i in b.wheretrue()]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")",
"def get_observation(self):\n return np.array(self.env.last_action_is_valid, dtype=np.int32)",
"def is_active(self):\n for unit in self.units:\n if unit.is_alive():\n return True\n return False",
"def hasActiveConfiguration(self):\n cal = self.request.get('form.widgets.calendarConfig')\n if cal is not None:\n if cal == ['non actif'] or cal == ['bloque']:\n return False\n else:\n return True\n wrapper = getSAWrapper('gites_wallons')\n session = wrapper.session\n for heb in getHebergementsForProprio(self.context, session):\n return (heb.heb_calendrier_proprio != 'non actif')",
"def test00(self):\n a = np.arange(1, 11) > 0\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()\n cwt = [i for i in b.wheretrue()]\n # print \"numpy ->\", a.nonzero()[0].tolist()\n # print \"where ->\", [i for i in b.wheretrue()]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")",
"def test02(self):\n a = np.arange(1, 1e5) < 0\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()\n cwt = [i for i in b.wheretrue()]\n # print \"numpy ->\", a.nonzero()[0].tolist()\n # print \"where ->\", [i for i in b.wheretrue()]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")",
"def _isCached (self):\n\t\tself.props['ncjobids'] = range(self.length)\n\t\tif self.cache == False:\n\t\t\tself.log ('Not cached, because proc.cache is False', 'debug')\n\t\t\treturn False\n\t\t\n\t\tif self.cache == True:\n\t\t\tfor depend in self.depends:\n\t\t\t\tif depend.cached: continue\n\t\t\t\tself.log ('Not cached, my dependent \"%s\" is not cached.' % depend._name(), 'debug')\n\t\t\t\treturn False\n\t\t\n\t\ttrulyCachedJids = []\n\t\texptCachedJids = []\n\t\tself.props['ncjobids'] = []\n\t\tfor i, job in enumerate(self.jobs):\n\t\t\tjob = self.jobs[i]\n\t\t\tif job.isTrulyCached ():\n\t\t\t\ttrulyCachedJids.append(i)\n\t\t\telif job.isExptCached ():\n\t\t\t\texptCachedJids.append (i)\n\t\t\telse:\n\t\t\t\tself.props['ncjobids'].append (i)\n\t\t\t\t\n\t\tself.log ('Truely cached jobs: %s' % (trulyCachedJids if len(trulyCachedJids) < self.length else 'ALL'), 'debug')\n\t\tself.log ('Export cached jobs: %s' % (exptCachedJids if len(exptCachedJids) < self.length else 'ALL'), 'debug')\n\t\t\n\t\tif self.ncjobids:\n\t\t\tif len(self.ncjobids) < self.length:\n\t\t\t\tself.log ('Partly cached, only run non-cached %s job(s).' % len(self.ncjobids), 'info')\n\t\t\t\tself.log ('Jobs to be running: %s' % self.ncjobids, 'debug')\n\t\t\telse:\n\t\t\t\tself.log ('Not cached, none of the jobs are cached.', 'info')\n\t\t\treturn False\n\t\telse:\n\t\t\tself.log (self.workdir, 'info', 'CACHED')\n\t\t\treturn True",
"def test04(self):\n a = np.arange(1, 11) > 5\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()[:3]\n cwt = [i for i in b.wheretrue(limit=3)]\n # print \"numpy ->\", a.nonzero()[0].tolist()[:3]\n # print \"where ->\", [i for i in b.wheretrue(limit=3)]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")",
"def buildC(self, outputstates='Ti', debug=False):\n\n if not isinstance(outputstates, list):\n C = np.zeros([1, len(self.sta)])\n j = 0\n assert outputstates in list(self.sta.keys()), '{} not in states'.format(\n outputstates)\n for rcstate in self.sta:\n if debug:\n print(self.sta[rcstate].name)\n if self.sta[rcstate].name == outputstates:\n C[0, j] = 1\n j += 1\n else:\n C = np.zeros([len(outputstates), len(self.sta)])\n i = 0\n for outpstate in outputstates:\n j = 0\n\n if debug:\n print(outpstate)\n assert isinstance(outpstate,\n str), 'output should be given as string'\n assert outpstate in list(self.sta.keys()), '{} not in states'.format(\n outpstate)\n for rcstate in self.sta:\n if debug:\n print(self.sta[rcstate].name)\n if self.sta[rcstate].name == outpstate:\n C[i, j] = 1\n j += 1\n i += 1\n return C, list(self.sta.keys())",
"def component_status_constraint(index):\n i, t = index\n return component_status[i, t] == pulp.lpSum(component_status_k[i, t, RANGE])",
"def is_active(self) -> bool:\n return not any((self.is_ancillary, self.is_removed, self.is_system))",
"def conditional_component_covs(self):\n return np.array([d.conditional_cov() for d in self.conditionalMVNs])",
"def concurrent_builds(self):\n return self._concurrent_builds",
"def is_almost_active(self,\n env\n ):\n flag = any([con.is_almost_active(env) for con in self.constraints])\n return flag",
"def _check_attribute_in_list(self, check_attribs, component_attribs):\n getattr = attrgetter('attribute_id')\n for key, group in groupby(component_attribs, getattr):\n if set(check_attribs).intersection([x.id for x in group]):\n return True\n return False",
"def check_sa_ea_for_each_branch(self, conn_components):\n parallel_cut_sa = list(set(self.initial_start_activities).union(\n infer_start_activities_from_prev_connections_and_current_dfg(self.initial_dfg, self.dfg, self.activities,\n include_self=False)).intersection(\n self.activities))\n parallel_cut_ea = list(set(self.initial_end_activities).union(\n infer_end_activities_from_succ_connections_and_current_dfg(self.initial_dfg, self.dfg, self.activities,\n include_self=False)).intersection(\n self.activities))\n\n if conn_components is None:\n return False\n\n for comp in conn_components:\n comp_sa_ok = False\n comp_ea_ok = False\n\n for sa in parallel_cut_sa:\n if sa in comp:\n comp_sa_ok = True\n break\n for ea in parallel_cut_ea:\n if ea in comp:\n comp_ea_ok = True\n break\n\n if not (comp_sa_ok and comp_ea_ok):\n return False\n\n return True",
"def independent_components(self) -> bool:\n return bool(self.GetIndependentComponents())",
"def any_open_valve():\n for dev in station_valve_dict:\n if station_valve_dict[dev]:\n return True\n return False",
"def cfdCheckIfCavity(self):\r\n \r\n self.foundPatch=False\r\n \r\n for patch, value in self.cfdBoundaryPatchesArray.items():\r\n \r\n if value['type'] == 'inlet' or 'outlet':\r\n self.foundPatch =True\r\n break",
"def test05(self):\n a = np.arange(1, 11) > 5\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()[2:]\n cwt = [i for i in b.wheretrue(skip=2)]\n # print \"numpy ->\", a.nonzero()[0].tolist()[2:]\n # print \"where ->\", [i for i in b.wheretrue(skip=2)]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")",
"def has_activations(self):\n # pylint: disable=not-an-iterable\n for _ in self.activations:\n return True\n return False",
"def state_update_aggregation_modes(self):\n return self._state_update_aggregation_modes"
] | [
"0.65424186",
"0.47628143",
"0.4661641",
"0.4639534",
"0.4568647",
"0.4535897",
"0.45355004",
"0.45338643",
"0.45336056",
"0.452387",
"0.45226544",
"0.44983009",
"0.4493752",
"0.4489318",
"0.44724113",
"0.4471887",
"0.4463261",
"0.44590876",
"0.4454553",
"0.4453565",
"0.44474912",
"0.44419938",
"0.44386578",
"0.4433974",
"0.4425467",
"0.44239682",
"0.4420891",
"0.4410832",
"0.44064838",
"0.44059873"
] | 0.48905343 | 1 |
Getter function to retrieve the per unit bounds of a given compoent for given snapshots and possible subset of elements (e.g. nonextendables). Depending on the attr you can further specify the bounds of the variable you are looking at, e.g. p_store for storage units. | def get_bounds_pu(n, c, sns, index=None, attr=None):
min_pu_str = nominal_attrs[c].replace("nom", "min_pu")
max_pu_str = nominal_attrs[c].replace("nom", "max_pu")
max_pu = get_switchable_as_dense(n, c, max_pu_str, sns)
if c in n.passive_branch_components:
min_pu = -max_pu
elif c == "StorageUnit":
min_pu = pd.DataFrame(0, max_pu.index, max_pu.columns)
if attr == "p_store":
max_pu = -get_switchable_as_dense(n, c, min_pu_str, sns)
if attr == "state_of_charge":
max_pu = expand_series(n.df(c).max_hours, sns).T
min_pu = pd.DataFrame(0, *max_pu.axes)
else:
min_pu = get_switchable_as_dense(n, c, min_pu_str, sns)
if index is None:
return min_pu, max_pu
else:
return min_pu.reindex(columns=index), max_pu.reindex(columns=index) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bounds():\n return [0.00], [1.00]",
"def get_bounds(self):\n return ([self.t_min] * self.dim,[self.t_max] * self.dim)",
"def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages",
"def get_geo_extents(nc, possible_units, std_name, axis_name, short_name):\n\n geo_extent_vars = {}\n geo_extent_units = []\n\n # variables must have units\n for var in nc.get_variables_by_attributes(units=lambda x: x is not None):\n \n geo_extent_vars[var.name] = 0\n # units in this set\n if var.units in possible_units:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n\n # standard name\n if hasattr(var, 'standard_name') and var.standard_name == std_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n # axis of \"X\"\n if hasattr(var, 'axis') and var.axis == axis_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n \n\n if var.name == std_name or var.name == short_name:\n geo_extent_vars[var.name] += 1\n geo_extent_units.append(var.units)\n\n if len(geo_extent_vars) == 0:\n return\n\n # filter out any zero scores\n geo_extent_vars = dict(filter(lambda x: x[1]>0, geo_extent_vars.items()))\n\n # sort by criteria passed\n final_geo_vars = sorted(geo_extent_vars, key=lambda x: geo_extent_vars[x], reverse=True)\n\n obs_mins = [np.nanmin(nc.variables[var]) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n obs_maxs = [np.nanmax(nc.variables[var]) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n\n # Let's just pick one\n geo_vals = nc.variables[final_geo_vars[0][:]]\n if geo_vals.size == 1:\n obs_res = [0.0]\n else:\n obs_res = [np.nanmean(np.diff(nc.variables[var])) for var in final_geo_vars if not np.isnan(nc.variables[var]).all()]\n\n geo_min = round(float(min(obs_mins)), 5)\n geo_max = round(float(max(obs_maxs)), 5)\n geo_extent_units = [nc.variables[k].units for k, v in geo_extent_vars.items()][0]\n geo_res = \"{} {}\".format(round(float(abs(np.mean(obs_res))), 5), geo_extent_units)\n\n print('<attribute name=\"geospatial_{}_min\" value=\"{}\" />'.format(short_name, geo_min))\n print('<attribute name=\"geospatial_{}_max\" value=\"{}\" />'.format(short_name, geo_max))\n print('<attribute name=\"geospatial_{}_resolution\" value=\"{}\" />'.format(short_name, geo_res))\n print('<attribute name=\"geospatial_{}_units\" value=\"{}\" />'.format(short_name, geo_extent_units))",
"def compute_bounds(self, space):\n bounds = np.zeros((len(space), 2))\n\n for idx, param in enumerate(space):\n\n if TYPE[param[\"type\"]] is TYPE.FLOAT or \\\n TYPE[param[\"type\"]] is TYPE.INTEGER:\n bounds[idx] = (param[\"min\"], param[\"max\"])\n\n elif TYPE[param[\"type\"]] is TYPE.DISCRETE or \\\n TYPE[param[\"type\"]] is TYPE.DISCRETE:\n bounds[idx] = (0, len(param['values']))\n\n return bounds",
"def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)",
"def get_bounds(p_state, idx_image=-1, idx_chain=-1):\n _min = (3*ctypes.c_float)()\n _max = (3*ctypes.c_float)()\n _Get_Bounds(ctypes.c_void_p(p_state), _min, _max,\n ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n return [_min[i] for i in range(3)], [_max[i] for i in range(3)]",
"def axis_bounds(pc, axis):\n try:\n bounds = pc.bounds\n except AttributeError:\n bounds = pc\n \n return tuple([getattr(bounds, b + axis) for b in ('min', 'max')])",
"def unit_bounds(dimension):\n\n return [-1.0, 1.0] * dimension",
"def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)",
"def get_bounds(self):\n log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_bounds()\")\n\n # TODO: Move the operation out of here.\n\n xmin = Inf\n ymin = Inf\n xmax = -Inf\n ymax = -Inf\n\n # for obj in self.object_list:\n for obj in self.get_list():\n try:\n gxmin, gymin, gxmax, gymax = obj.bounds()\n xmin = min([xmin, gxmin])\n ymin = min([ymin, gymin])\n xmax = max([xmax, gxmax])\n ymax = max([ymax, gymax])\n except Exception as e:\n log.warning(\"DEV WARNING: Tried to get bounds of empty geometry. %s\" % str(e))\n\n return [xmin, ymin, xmax, ymax]",
"def _get_prod_bounds(self, comp):\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n maximum = comp.get_capacity(None, None, None, None)[0][cap_res]\n # TODO minimum!\n # producing or consuming the defining resource?\n if maximum > 0:\n return 0, maximum, pyo.NonNegativeReals\n else:\n return maximum, 0, pyo.NonPositiveReals",
"def bounds(self): # -> tuple[()]:\n ...",
"def GetBounds(self):\n ...",
"def GetBounds(self):\n ...",
"def GetBounds(self):\n ...",
"def GetBounds(self):\n ...",
"def GetBounds(self):\n ...",
"def GetBounds(self):\n ...",
"def GetBounds(self):\n ...",
"def GetBounds(self):\n ...",
"def get_physical_bounds(dim):\n dim = basename(dim)\n\n if dim == \"coszen\":\n trunc_low = -1.\n trunc_high = 1.\n\n elif dim == \"energy\":\n trunc_low = 0.\n trunc_high = None\n\n elif dim == \"azimuth\":\n trunc_low = 0.\n trunc_high = 2*np.pi\n\n else:\n raise ValueError(\"No physical bounds for dimension '%s' available.\"%dim)\n\n return trunc_low, trunc_high",
"def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages",
"def getFluidAttr(*args, attribute: AnyStr=\"\", lowerFace: bool=True, xIndex: int=0, xvalue:\n bool=True, yIndex: int=0, yvalue: bool=True, zIndex: int=0, zvalue: bool=True,\n **kwargs)->None:\n pass",
"def test_b2_calc_bounds_column(self):\n type_of = \"c\"\n self.assert_calc_bounds(type_of)\n\n\n\n # config.NR",
"def bounds(self):\n return self.substrates.bounds",
"def _with_attr0(self, attr, val=None):\r\n\r\n if attr in ['x', 'y', 'z']:\r\n TOL = 1.0E-8\r\n\r\n if isinstance(val,list):\r\n tmp = RealList(val, TOL)\r\n return CollectionIp(ip for ip in self if getattr(ip,attr) in tmp)\r\n\r\n if isinstance(val, tuple):\r\n start = val[0]\r\n end = val[1]\r\n return CollectionIp(ip for ip in self if getattr(ip,attr)>start-TOL and getattr(ip,attr)<end+TOL)\r\n\r\n return CollectionIp(ip for ip in self if abs(getattr(ip,attr)-val)<TOL)\r\n\r\n assert False",
"def bounds(self):\n \n return self.osmdb.bounds()",
"def getPropertyOptimalRange(self, identifier):\n if identifier in self.properties.keys():\n return self.layer.getProperty(identifier).getOptimalRange()\n return (None, None)",
"def get_bbox(self, primitive):\n accessor = primitive.attributes.get(\"POSITION\")\n return accessor.min, accessor.max"
] | [
"0.57321733",
"0.5713874",
"0.5637598",
"0.5618561",
"0.559979",
"0.5563755",
"0.5507351",
"0.55036366",
"0.5473687",
"0.5466363",
"0.5459925",
"0.5447227",
"0.5392807",
"0.53789955",
"0.53789955",
"0.53789955",
"0.53789955",
"0.53789955",
"0.53789955",
"0.53789955",
"0.53789955",
"0.5373344",
"0.5357414",
"0.53367597",
"0.5333067",
"0.532073",
"0.5247782",
"0.5154242",
"0.5128195",
"0.51157314"
] | 0.6485828 | 0 |
dump model for later prediction | def dump_model(self): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")",
"def savemodel(self, fname):\n if not fname.endswith('.gz'):\n fname += '.gz'\n D = {'clf':self.clf, 'vocab':self.vocab,\n 'idxlabelmap':self.labelmap}\n with gzip.open(fname, 'w') as fout:\n dump(D, fout)\n print 'Save model into file: {}'.format(fname)",
"def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))",
"def save_model(self):\n pickle.dump(self, open(\"Logistic_Regression_Model.pkl\", \"wb\"))",
"def dump_model(model, filename):\n import pickle\n logging.info(\"Dumping model into model.pkl\")\n with open(filename, 'w') as dump_file:\n pickle.dump(model, dump_file)",
"def save_model(model, model_filepath):\n dump(model, model_filepath)",
"def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()",
"def save_model(self):\n torch.save(self.get_params(), 'code/lr-model.pt')",
"def serialize_model(model, X, y):\n\n # Fitting the model to the full dataset\n model.fit(X, y)\n # Pickling\n pkl_filename = 'rehosp_model.pkl'\n with open(pkl_filename, 'wb') as file:\n pickle.dump(model, file)\n\n return",
"def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")",
"def save_model(self):\n\n print('Save model')\n self.feature_extractor.save_weights(\n self.path_save_model + self.name_model + '.h5')\n\n print('Mean and std')\n np.save(self.path_save_model + 'mean.npy', self.mean)\n np.save(self.path_save_model + 'std.npy', self.std)",
"def save_model(self, filename):\n\t\tpickle.dump(self, open(filename, 'wb'))\n\t\tprint('Model saved in',filename)",
"def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)",
"def save_model(model, model_filepath, protocol=0):\n # using pickle to store trained classifier\n #pickle.dump(model,open(model_filepath,'wb'))\n \n file = gzip.GzipFile(model_filepath, 'wb')\n file.write(pickle.dumps(model, protocol))\n file.close()\n \n pass",
"def dump(self, model_path):\n pickle.dump(self.scaler, gzip.open(os.path.join(model_path, 'scaler.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)\n# pickle.dump(self.mapper, gzip.open(os.path.join(model_path, 'mapper.pkl.gz'),'w'),\n# protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.batcher, gzip.open(os.path.join(model_path, 'batcher.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)",
"def save_model(self, step):\n\n # file_name = params['name']\n # pickle.dump(self, gzip.open(file_name, 'wb'))",
"def save_model(model, filepath):\n try:\n dump(model, filepath)\n except Exception as e:\n print(e)\n print('Failed to pickle model.')",
"def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)",
"def save_model(model, filename):\n with open(filename, 'wb') as f:\n joblib.dump(model, f)",
"def save_model(self, filename='model.pt'):\n checkpoint = {\n 'input_size': self.linear_layers[0].in_features,\n 'output_size': self.linear_layers[-1].out_features,\n 'hidden_layers': [layer.out_features for layer in self.linear_layers[:-1]],\n 'state_dict': self.state_dict()}\n torch.save(checkpoint, filename)",
"def save_model(self, name): \n torch.save(dict(params=self.model.encoder.state_dict()), osp.join(self.args.save_path, name + '.pth'))",
"def save_model(model, model_path):\n pickle.dump(model.best_estimator_,open(model_path,'wb'))",
"def save(self, model_name = 'mr-senti'):\n\n\t\tjoblib.dump(self.classifier, os.path.join('model', model_name + '.pkl'))",
"def save_pipeline(model_to_persist):\n\n save_file_name = 'model.pkl'\n save_path = configuracion.TRAINED_MODEL_DIR / save_file_name\n joblib.dump(model_to_persist, save_path)\n\n print('saved pipeline')",
"def save_model(self, filename) -> None:\n #t.save(self, filename)\n traced=t.jit.script(self)\n t.jit.save(traced,filename)",
"def save_model(model, filename=\"model.json\"):\n model_json = model.to_json()\n with open(filename, \"w\") as json_file:\n json_file.write(model_json)\n model.save_weights(\"model.h5\")\n print(\"Saved model to disk\")",
"def save_model(self, model_file):\n net_params = self.get_policy_param() # get model params\n # torch.save(net, save_epoch + '.pkl') # 保存整个网络\n torch.save(net_params, model_file)",
"def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name, model in self.models.items():\n print(\"MODEL NAME = {}\".format(model_name))\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n to_save = model.state_dict()\n if model_name == 'encoder':\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.height\n to_save['width'] = self.width\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.model_optimizer.state_dict(), save_path)",
"def saveModel(self):\n with open(self.modelSaveFile, 'wb') as f:\n pickle.dump(self.values, f, pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.policy, f, pickle.HIGHEST_PROTOCOL)"
] | [
"0.7894731",
"0.7692822",
"0.76911736",
"0.7653883",
"0.7623206",
"0.7538876",
"0.7530988",
"0.75119674",
"0.75119317",
"0.7468773",
"0.7465218",
"0.74468184",
"0.7443801",
"0.7432671",
"0.7335412",
"0.730605",
"0.730019",
"0.723542",
"0.72077006",
"0.7206754",
"0.72036034",
"0.7193031",
"0.71852654",
"0.7182393",
"0.7178578",
"0.7167984",
"0.71668667",
"0.7165206",
"0.71605515",
"0.7156508"
] | 0.82823545 | 0 |
Get rid of the database again after each test | def tearDown(self):
app.db.drop_database('local')
app.db.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tearDown(self):\n get_connection().drop_database('test_monstor_registration')",
"def teardown():\n teardown_db()",
"def tearDown(self):\n os.close(self.db_fd)\n os.unlink(mainPyUnit.app.config['DATABASE'])",
"def tearDown(self):\n os.close(self.db_fd)\n os.unlink(pegasus.app.config['DATABASE'])",
"def tearDown(self):\n self.db.drop_all()\n pass",
"def tearDown(self):\n db.drop_all()",
"def tearDown(self):\n db.drop_all()",
"def tearDown(self):\r\n empty_db()",
"def tearDown(self):\r\n empty_db()",
"def tearDown(self):\r\n empty_db()",
"def tearDown(self):\r\n empty_db()",
"def tearDown(self):\r\n empty_db()",
"def tearDown(self):\r\n empty_db()",
"def tearDown(self):\n os.close(self.db_fd)\n os.unlink(app.app.config['DATABASE'])",
"def tearDown(self):\n os.close(self.db_fd)\n os.unlink(closet.app.config['DATABASE'])",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\r\n testing.tearDown()\r\n empty_db()",
"def tearDown(self):\n with app.app_context():\n db = app.db.get_db()\n cur = db.cursor()\n with app.open_resource('sql/drop_tests.sql', mode='r') as f:\n cur.execute(f.read())\n db.commit()\n cur.close()\n db.close()",
"def tearDown(self):\n #db.session.remove()\n db.drop_all()",
"def tearDown(self) -> None:\n things.db.session.remove()\n things.db.drop_all()",
"def tearDown(self):\n os.remove(self._dbfile)",
"def tearDown(self):\n\t\twith self.app.app_context():\n\t\t\tdb.session.remove()\n\t\t\tdb.drop_all()",
"def tearDown(self):\n\t\twith self.app.app_context():\n\t\t\tdb.session.remove()\n\t\t\tdb.drop_all()"
] | [
"0.8635427",
"0.85302615",
"0.8512873",
"0.8491214",
"0.84900934",
"0.8465409",
"0.8465409",
"0.8465091",
"0.8465091",
"0.8465091",
"0.8465091",
"0.8465091",
"0.8465091",
"0.8444213",
"0.8435058",
"0.8433075",
"0.8433075",
"0.8433075",
"0.8433075",
"0.8433075",
"0.8433075",
"0.8433075",
"0.8433075",
"0.8433075",
"0.83827406",
"0.8374064",
"0.83240384",
"0.8253249",
"0.8249752",
"0.8249752"
] | 0.8665355 | 0 |
Test get itinerary for a user | def test_getItinerary(self):
rv = self.app.get('/itinerarylistshells/alex')
for i in range(5):
assert "itin"+str(i+1) in str(rv.data)
rv = self.app.get('/itinerarylistshells/bbbb')
assert "Invalid username" in str(rv.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_api_user_get(self):\n pass",
"def test_func(self, user, **kwargs):\n return self.get_object().authorized_user(user)",
"def test_read_user(self):\n pass",
"def test_resource_user_resource_get_current_user_get(self):\n pass",
"def test_resource_user_resource_get_user_get(self):\n pass",
"def test_user(self):\n return True",
"def test_user_id_get(self):\n pass",
"def test_user_information_request(self):\n pass",
"def test_getItineraryFromId(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + date['date'])\n invuid = '00000000000000000000000'\n\n rv = self.json_get('/getItineraryFromId/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': invuid})\n assert 'Itinerary not found' in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': uid})\n assert uid in str(rv.data)",
"def test_text_get_logged_in(self):\n\n resource = Resource(AnonymousTextHandler)\n request = HttpRequest()\n user = User.objects.get(pk=1)\n setattr(request, 'user' , user)\n request.method = 'GET'\n \n response = resource(request, key='text_key_3', emitter_format='json')\n self.assertEquals(200, response.status_code)",
"def test_list_user(self):\n pass",
"def test_get_user(self):\n print('(' + self.test_get_user.__name__+')', self.test_get_user.__doc__)\n # test for patient\n self.assertDictContainsSubset(\n self.connection.get_user(PATIENT_USERNAME), PATIENT)\n # test for doctor\n self.assertDictContainsSubset(\n self.connection.get_user(DOCTOR_USERNAME), DOCTOR)",
"def step_impl(context):\n response = test_app.get(f\"/get_user_account/{user_id}\")\n assert response.json() == []",
"def test_users_get(self):\n pass",
"def test_users_get(self):\n pass",
"def test_get_users(self):\n pass",
"def test_get_user_info(self):\n \n name = app.get_user_info(\"skullphish\", name=True)\n vocab_count = app.get_user_info(\"skullphish\", vocab_count=True)\n userId = app.get_user_info(\"skullphish\", userId=True)\n \n self.assertTrue(name == \"damian rodbari\")\n self.assertEqual(vocab_count,0)\n self.assertTrue(userId == ObjectId(\"5bb8a0c006f1f8105bc3bb23\"))",
"def inner_test(user: models.User):\n pass",
"def getInterestedUsers():",
"def test_get_user_by_emailuser_email_get(self):\n pass",
"def test_user_id_identities_get(self):\n pass",
"def test_retrive_user(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['email'], self.user.email)\n self.assertEqual(res.data['name'], self.user.name)\n self.assertNotIn('password', res.data)",
"def test_get_all_by_one_user(self):\n response = self.client.get('/api/v1/users/5/parcels')\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"], \"User does not have orders\", msg = \"User orders\")\n self.assertEqual(response.status_code, 200)",
"def sample_user(self):",
"def test_show(self):\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@alice\", str(response.data))",
"def test_user_info(user_keys):\n\n pyme_instance = PyMe(206946886)\n response = pyme_instance.display()\n\n assert isinstance(response, dict)\n assert response['id'] == 206946886, \"The ID of the user should be in the response\"\n assert set(user_keys).issubset(response.keys()), \"All keys should be in the response\"",
"def test_user_get_topteams():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/top/10')\n assert r.status_code == 200\n destroy_ctfd(app)",
"def test_func(self, user):\n return self.get_object().admin == user",
"def step_impl(context):\n response = test_app.get(f\"/get_user_account/{user_id}\").json()\n assert response != []\n assert response[0][\"userid\"] == user_id\n assert response[0][\"balance\"] == 100",
"def user():"
] | [
"0.68575686",
"0.6680141",
"0.66680706",
"0.6619506",
"0.66056466",
"0.6571949",
"0.65636194",
"0.6502335",
"0.64398944",
"0.6393863",
"0.6360309",
"0.6296977",
"0.62935597",
"0.62925845",
"0.62925845",
"0.62671554",
"0.62409383",
"0.6222767",
"0.6201795",
"0.61806047",
"0.61705387",
"0.61552054",
"0.6149753",
"0.61182326",
"0.6114324",
"0.611001",
"0.6102731",
"0.60946214",
"0.6058098",
"0.6049611"
] | 0.7174526 | 0 |
Test retrieval of events for an itinerary | def test_getEventsForItinerary(self):
date = {'date': '2015-08-21T00:00:00.000Z'}
events = []
for i in range(10):
hh = str(i)
events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',
end = '2015-08-21T'+hh+':25:00.000Z',
date = '2015-08-21T00:00:00.000Z'))
rv = self.json_get('/getEventsForItinerary/bbbb', date)
assert 'Invalid username' in str(rv.data)
rv = self.json_get('/getEventsForItinerary/alex', date)
assert 'Itinerary for the day not found' in str(rv.data)
# Create sample itinerary for alex for the event day
self.json_post('/createItinerary/alex', dict(
name = 'New Day',
date = date['date']
))
rv = self.json_get('/getEventsForItinerary/alex', date)
assert '{"events": []}' in str(rv.data)
for e in events:
rv = self.json_post('/createEvent/alex', e)
uid = str('alex_' + e['start'] + e['end'])
assert uid in str(rv.data)
rv = self.json_get('/getEventsForItinerary/alex', date)
for e in events:
uid = str('alex_' + e['start'] + e['end'])
assert uid in str(rv.data)
assert e['start'] in str(rv.data)
assert e['end'] in str(rv.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_events(self):\n events = gracedb.events()\n for event in events:\n self.assertTrue('graceid' in event)\n break",
"def test_api_predictor_events_get(self):\n pass",
"def test_query_events(self):\n query_list = {\n 'q': 'test',\n 'type': 'show'\n }\n results = query_events(query_list)\n events = list(results['events'])\n showcase = list(results['showcase_events'])\n self.assertTrue(self.event_show1 in events)\n self.assertTrue(self.event_show2 in showcase)\n self.assertFalse(self.event_film in events)",
"def test_get_future_events(self):\n events = list(get_future_events())\n self.assertFalse(self.event_show1 in events)\n self.assertTrue(self.event_show2 in events)",
"def test_describe_event(self):\n pass",
"def test_future_event(self):\n pass",
"def test_gameHandleEvents(self):\n # this kinda gonna be reiterating the other tests??\n # the tests of all the individual methods below make this test work\n pass",
"def test_started_but_not_finished_event_appears_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2020-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2020, 2, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2020, 12, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n with freeze_time(\"2020-05-01\"):\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=(\r\n \"Убедитесь, что начавшееся, но не \"\r\n \"закончившееся событие показывается в списке.\"\r\n ),\r\n )",
"def test_getEventsFromId(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + events[0]['start'] + events[0]['end'])\n invuid = '00000000000000000000000'\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventFromId/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventFromId/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n rv = self.json_get('/getEventFromId/alex', {'uid': uid})\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)",
"async def test_estop_callback(\n decoy: Decoy,\n) -> None:\n engine_store = decoy.mock(cls=EngineStore)\n\n subject = get_estop_listener(engine_store=engine_store)\n\n decoy.when(engine_store.current_run_id).then_return(None, \"fake_run_id\")\n\n disengage_event = EstopStateNotification(\n old_state=EstopState.PHYSICALLY_ENGAGED, new_state=EstopState.LOGICALLY_ENGAGED\n )\n\n subject(disengage_event)\n\n engage_event = EstopStateNotification(\n old_state=EstopState.LOGICALLY_ENGAGED, new_state=EstopState.PHYSICALLY_ENGAGED\n )\n\n subject(engage_event)\n\n subject(engage_event)\n\n decoy.verify(engine_store.engine.estop(maintenance_run=False), times=1)",
"def test_event_page(self):\n res = self.client.get('/events')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Upcoming Events' in data",
"def test_getItinerary(self):\n rv = self.app.get('/itinerarylistshells/alex')\n for i in range(5):\n assert \"itin\"+str(i+1) in str(rv.data)\n\n rv = self.app.get('/itinerarylistshells/bbbb')\n assert \"Invalid username\" in str(rv.data)",
"def test_past_event(self):\n pass",
"def test_load_events(self):\n command = '{0}'.format(\n os.path.join(self.datadir, 'monol_testA.evt'))\n hen.read_events.main(command.split())\n new_filename = self.first_event_file\n ev = hen.io.load_events(new_filename)\n assert hasattr(ev, 'header')\n assert hasattr(ev, 'gti')",
"def visit_event(self, event):",
"def test_events(self):\n\n response = self.client.get(reverse('events'))\n\n assert response.status_code == 200",
"def test_load_response_descriptor_events_event_event_resource(self):\n pass",
"def test_mentor_can_list_available_events_in_his_city(self):\r\n\r\n city = CityFactory(name=\"Вермонт\")\r\n other_city = ViewAfishaTests.city\r\n user = UserFactory(profile__city=city)\r\n client = self.return_authorized_user_client(user)\r\n EventFactory.create_batch(10, city=city)\r\n EventFactory.create_batch(100, city=other_city)\r\n\r\n response_data = client.get(path=EVENTS_URL).data\r\n results = response_data.get(\"results\")\r\n\r\n self.assertEqual(\r\n len(results),\r\n 10,\r\n msg=(\r\n \"Проверьте что пользователь видит все доступные события \"\r\n \"в городе\"\r\n ),\r\n )",
"def test_get_events(self):\n\n request_params = {\n \"token\": EVENTBRITE_API_KEY,\n \"location.latitude\": \"37.4192008972\",\n \"location.longitude\": \"-122.057403564\",\n \"location.within\": \"20mi\",\n \"sort_by\": \"date\"\n }\n url_encoded_request_params = _update_urlencode_request_params(\"103,109\", 1, request_params)\n events_list, page_count = _get_events(url_encoded_request_params)\n self.assertTrue(type(events_list) is list)\n self.assertTrue(type(page_count) is int)",
"def test_finished_events_doesnt_appear_in_events_list(self):\r\n user = ViewAfishaTests.mentor\r\n client_user = self.return_authorized_user_client(user)\r\n with freeze_time(\"2010-01-01\"):\r\n EventFactory(\r\n city=user.profile.city,\r\n start_at=datetime(2011, 1, 1, tzinfo=pytz.utc),\r\n end_at=datetime(2012, 1, 1, tzinfo=pytz.utc),\r\n )\r\n num_events = Event.objects.count()\r\n self.assertEqual(\r\n num_events,\r\n 1,\r\n msg=\"Убедитесь, что тест смог создать событие в прошлом\",\r\n )\r\n\r\n response_data = client_user.get(EVENTS_URL, format=\"json\").data\r\n num_events = response_data.get(\"count\")\r\n\r\n self.assertEqual(\r\n num_events,\r\n 0,\r\n msg=\"Убедитесь, что события в прошедшие события не показываются\",\r\n )",
"def test_get_Events(self):\n event_a = Event.objects.create(title=\"christmas party\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n event_b = Event.objects.create(title=\"Spring clean\",\n start=datetime.strptime(\"2020-04-03 09:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-04-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=7),\n recurrence_interval=0, description=\"get the church clean\", website_publish=True)\n event_b.invites.add(self.comms_grp)\n event_b.save()\n client = APIClient()\n resp = client.get('/api/events')\n self.assertEqual(resp.status_code, 200)\n events = Event.objects.all()\n self.assertEqual(events[0].title, json.loads(resp.content)[1]['title'])\n self.assertEqual(events[1].title, json.loads(resp.content)[0]['title'])",
"def test_module(client, first_fetch_time, event_type_filter):\n since_time, _ = parse_date_range(first_fetch_time, date_format=DATE_FORMAT, utc=True)\n client.get_events(since_time=since_time, event_type_filter=event_type_filter)\n\n # test was successful\n return 'ok'",
"def run():\n\n while True:\n\n # get event, blah\n event_name, event_data = revent.get_event(block=True, timeout=5)\n\n if event_name is not None:\n print 'received: %s' % event_name\n\n if event_name.endswith('_oembed_details'):\n handle_new_oembed_details(event_data)\n\n elif event_name == 'new_tweet':\n handle_new_tweet(event_data)\n\n # and we're done\n assert revent.verify_msg(event_name, event_data), \\\n \"Could not verify %s\" % event_name",
"def test_events(self):\n\n resp = self.client.get('/events?page=1&user_categories=113%2C105%2C104 ')\n self.assertTrue('next_events_url' in resp.context)\n self.assertTrue('previous_events_url' in resp.context)\n self.assertTrue('events_list' in resp.context)\n self.assertTrue('previous' in resp.context)\n self.assertTrue('next' in resp.context)\n self.assertEqual(resp.status_code, 200)",
"async def test_api_get_event_listeners(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.get(const.URL_API_EVENTS)\n data = await resp.json()\n\n local = hass.bus.async_listeners()\n\n for event in data:\n assert local.pop(event[\"event\"]) == event[\"listener_count\"]\n\n assert len(local) == 0",
"def test_get_next_event():\n result = schedule.get_next_event()\n\n if result:\n assert result['name'], 'Result has no `name` key'\n assert result['date'], 'Result has not `date` key'\n\n assert isinstance(result['name'], str), 'name is not a string'\n assert isinstance(result['date'], arrow.Arrow), 'date is not a date'",
"def test_get_event(self):\n event = Event(self.client, 123, {})\n\n self.assertEqual(event.action, \"ticket_create\")\n self.assertEqual(event.created, datetime(2018, 1, 1, 0, 1, 1))\n self.assertEqual(event.duration, 300.56)\n self.assertIsNotNone(event.entity)\n self.assertEqual(event.id, 123)\n self.assertEqual(event.message, \"None\")\n self.assertIsNone(event.percent_complete)\n self.assertIsNone(event.rate)\n self.assertTrue(event.read)\n self.assertIsNotNone(event.secondary_entity)\n self.assertTrue(event.seen)\n self.assertIsNone(event.status)\n self.assertIsNone(event.time_remaining)\n self.assertEqual(event.username, \"exampleUser\")",
"def test_calendar_query_todo_alarm(self):\n raise SkipTest(\"test unimplemented\")",
"def find_events(handler_input):\n \n slots = handler_input.request_envelope.request.intent.slots\n \n selected_event = slots['event_cat'].resolutions.resolutions_per_authority[0].values[0].value.name\n \n events_list = requests.get(\"http://3.17.148.9:8080/events\")\n length = 0\n\n if events_list.status_code == 200:\n events_list = events_list.content\n details = json.loads(events_list.decode('utf-8'))\n length = len(details)\n\n events = dict()\n response_text = \"\"\n for i in range(length):\n if details[i][\"event_category\"].lower() == selected_event:\n cat = details[i]['event']\n if cat not in events:\n events[cat] = 1\n else:\n events[cat] += 1\n \n for event, count in events.items():\n response_text += str(count) + + event+\", \"\n\n speech_text = \"I found {}\".format(response_text)\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"I found {}\".format(response_text), speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response",
"def test_events_list(self):\n response = self.client.get(url_for(\n 'issues.eventsresourse',\n issue_number=self.TARGET_ISSUE_NUMBER))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)"
] | [
"0.6637942",
"0.6494425",
"0.6466185",
"0.61733973",
"0.6157892",
"0.6099789",
"0.6084765",
"0.6012242",
"0.59584683",
"0.59145075",
"0.5910777",
"0.5877571",
"0.587388",
"0.5854526",
"0.58476",
"0.5831675",
"0.58038217",
"0.5803201",
"0.5798981",
"0.5787392",
"0.57758427",
"0.57392323",
"0.5735592",
"0.57324666",
"0.56921417",
"0.5684835",
"0.5675359",
"0.5668529",
"0.5668052",
"0.5645079"
] | 0.7012843 | 0 |
Test retrieval of itinerary data from uid | def test_getItineraryFromId(self):
date = {'date': '2015-08-21T00:00:00.000Z'}
# Create sample itinerary for alex for the event day
self.json_post('/createItinerary/alex', dict(
name = 'New Day',
date = date['date']
))
uid = str('alex_' + date['date'])
invuid = '00000000000000000000000'
rv = self.json_get('/getItineraryFromId/bbbb', {'uid': uid})
assert 'Invalid username' in str(rv.data)
rv = self.json_get('/getItineraryFromId/alex', {'uid': invuid})
assert 'Itinerary not found' in str(rv.data)
rv = self.json_get('/getItineraryFromId/alex', {'uid': uid})
assert uid in str(rv.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_user_id_get(self):\n pass",
"def user(self, uid):",
"def test_user_id_identities_get(self):\n pass",
"def testFetchBodyStructureUID(self):\n return self.test_fetchBodyStructure(1)",
"def lookup(self, user_id):\n raise NotImplementedError",
"def get_Iu(uid):\n try:\n return len(trainset.ur[trainset.to_inner_uid(uid)])\n except ValueError: # user was not part of the trainset\n return 0",
"def test_getItinerary(self):\n rv = self.app.get('/itinerarylistshells/alex')\n for i in range(5):\n assert \"itin\"+str(i+1) in str(rv.data)\n\n rv = self.app.get('/itinerarylistshells/bbbb')\n assert \"Invalid username\" in str(rv.data)",
"def get_Iu(uid):\n try:\n return len(trainSet.ur[trainSet.to_inner_uid(uid)])\n except ValueError: # user was not part of the trainset\n return 0",
"def test_read_user(self):\n pass",
"def test_get_user_info(self):\n \n name = app.get_user_info(\"skullphish\", name=True)\n vocab_count = app.get_user_info(\"skullphish\", vocab_count=True)\n userId = app.get_user_info(\"skullphish\", userId=True)\n \n self.assertTrue(name == \"damian rodbari\")\n self.assertEqual(vocab_count,0)\n self.assertTrue(userId == ObjectId(\"5bb8a0c006f1f8105bc3bb23\"))",
"def test_fetchUID(self):\n d = self.client.fetchUID('1:7')\n self.assertEqual(self.transport.value(), b'0001 FETCH 1:7 (UID)\\r\\n')\n self.client.lineReceived(b'* 2 FETCH (UID 22)')\n self.client.lineReceived(b'* 3 FETCH (UID 23)')\n self.client.lineReceived(b'* 4 FETCH (UID 24)')\n self.client.lineReceived(b'* 5 FETCH (UID 25)')\n self.client.lineReceived(b'0001 OK FETCH completed')\n self.assertEqual(\n self.successResultOf(d), {\n 2: {'UID': '22'},\n 3: {'UID': '23'},\n 4: {'UID': '24'},\n 5: {'UID': '25'}})",
"def test_medicians_id_get(self):\n pass",
"def test_user_info(user_keys):\n\n pyme_instance = PyMe(206946886)\n response = pyme_instance.display()\n\n assert isinstance(response, dict)\n assert response['id'] == 206946886, \"The ID of the user should be in the response\"\n assert set(user_keys).issubset(response.keys()), \"All keys should be in the response\"",
"def test_user_information_request(self):\n pass",
"def test_read_identity(self):\n pass",
"def test_for_email_attribut_by_uid(self):\n name = u\"__TestUser2__\"\n password = u\"ekERErwerwerh\"\n email = \"__TestUser2__@moinhost\"\n self.createUser(name, password, email=email)\n uid = user.getUserId(self.request, name)\n theuser = user.User(self.request, uid)\n assert theuser.email == email",
"def get(self, eventId, uid):\n raise NotImplementedError",
"def test_user_id_put(self):\n pass",
"def test_get_user_id(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n # for patient\n self.assertEqual(\n PATIENT_ID, self.connection.get_user_id(PATIENT_USERNAME))\n # for doctor\n self.assertEqual(\n DOCTOR_ID, self.connection.get_user_id(DOCTOR_USERNAME))",
"def test_read_user_identity_mapping(self):\n pass",
"def test_get_user_by_uuiduser_uuid_get(self):\n pass",
"def test_resource_user_resource_get_user_get(self):\n pass",
"def _evaluate_user_id(self, dispatcher, tracker):\n person = dispatcher.output_channel.get_person_by_id(dispatcher.sender_id)\n user = tracker.get_slot('user')\n if user is None:\n # Todo Replace self assignment\n user = person.aclattr\n\n return user",
"def sample_user(self):",
"def test_render_data_researcher_user(self):\n url = '{}?is_bigcourse=0'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course.id}))\n self.response = self.client_data_researcher.get(url)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'],[[False]])\n\n self.response = self.client_data_researcher.get(url)\n self.assertEqual(self.response.status_code, 200)\n data = json.loads(self.response.content.decode())\n self.assertEqual(len(data['data']), 12)\n self.assertEqual(\n data['data'][-1], ['[email protected]', 'student', '', '', '0/1', '0/1', 'No'])",
"def test_api_user_get(self):\n pass",
"def test_login_records_attributes(self):\n self.test_login()\n record = UserSocialAuth.objects.get(\n user=self.user, provider=self.PROVIDER_BACKEND, uid__startswith=self.PROVIDER_IDP_SLUG\n )\n attributes = record.extra_data\n assert attributes.get('urn:oid:1.3.6.1.4.1.5923.1.1.1.9') == ['[email protected]', '[email protected]']\n assert attributes.get('urn:oid:2.5.4.3') == ['Me Myself And I']\n assert attributes.get('urn:oid:0.9.2342.19200300.100.1.1') == ['myself']\n assert attributes.get('urn:oid:2.5.4.20') == ['555-5555']\n # Phone number",
"def get_primary_for(userid):",
"def get_primary_for(userid):",
"def get_standin_for(userid):"
] | [
"0.64463186",
"0.63672686",
"0.63439983",
"0.59777427",
"0.5923015",
"0.58660334",
"0.584328",
"0.5841248",
"0.5800688",
"0.57749987",
"0.5771717",
"0.57595974",
"0.57239634",
"0.5602",
"0.5544797",
"0.55285424",
"0.5517793",
"0.55099833",
"0.55007076",
"0.54977375",
"0.5481871",
"0.5474667",
"0.5470077",
"0.5458523",
"0.54559684",
"0.54540783",
"0.5449269",
"0.5446691",
"0.5446691",
"0.54351074"
] | 0.6408533 | 1 |
Constructor of bot class. The token is either passed in argument or read in the config file. | def __init__(self, token):
self.token = token # Slack bot token
self.rtm = None # Real Time Messaging connection
self.api = { # Dict of available bot commands
"help": self.help, # For the help command
"rank": self.rank, # For the rank command (will show playing user score)
"ranking": self.ranking, # For the ranking command (will show full ranking)
"ask" : self.question # For the question command (will ask a question)
}
self.currentAskedQuestions = {} # Asked questions during the game (not yet answered)
self.dirname = os.path.join(os.path.dirname(__file__), "ressources") # Path to ressources as bot property
json_data = codecs.open("{}/questions.json".format(self.dirname), "r", "utf-8") # Open question file to get questions (thx captain obvious)
self.questions = json.load(json_data) # Read the json questions file and create a dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, token: str):\n\n self.__token = token\n self.__message_sender = None\n self.bus_controller = None\n self.__updater = None\n self.__dp = None\n self.__users = dict() # dictonary {id: user} (str: User)\n self.__gui = None",
"def __init__(self, TOKEN: str) -> None:\n super(TelegramBot, self).__init__()\n self.TOKEN = TOKEN\n self.URL = f\"https://api.telegram.org/bot{TOKEN}/\"\n logging.debug(\"Telegram Bot ready\")",
"def __init__(self, token, connection):\n self.__token = token\n self.__connection = connection\n self.__message = None\n self.__admin_id = None\n self.__username = None",
"def __init__(self, token):\n\n self.token = token",
"def __init__(self, token):\n self.token = token",
"def __init__(self, token):\n self.token = token",
"def __init__(self, token):\n self.token = token",
"def __init__(self, token, dir_download= './downloads'):\n self.token = token\n self.api_url = \"https://api.telegram.org/bot{}/\".format(token)\n self.file_url = \"https://api.telegram.org/file/bot{}/\".format(token)\n self.dirDownloads = dir_download",
"def __init__(self, bot):\n self.bot = bot",
"def __init__(self, bot):\n self.bot = bot",
"def __init__(self, token=None, token_path=\"tokens.txt\", username=None, password=None,\n grant_type=\"api-password\", client_id=\"brandwatch-api-client\",\n api_url=\"https://api.brandwatch.com/\"):\n self.api_url = api_url\n self.oauthpath = \"oauth/token\"\n\n if token:\n self._update_by_test_auth(username, token)\n self._write_auth(token_path)\n elif username is not None and password is not None:\n self._update_by_auth(username, password, token_path, grant_type, client_id)\n self._write_auth(token_path)\n elif username is not None:\n self._read_auth(username, token_path)\n else:\n raise KeyError(\"Must provide valid token, username and password,\"\n \" or username and path to token file\")",
"def __init__(self, bot=BNBot):\n self.bot = bot",
"def __init__(self, config):\n self._slack_client = self._connect(config[\"slack_bot_token\"])\n self.bot_id = self._get_user_id()\n self.default_channel = config[\"default_channel\"]",
"def __init__(self):\n\n # This environment variable should be set before using the bot\n self.token = os.environ['STATS_BOT_TOKEN']\n\n\n # These will be checked against as substrings within each\n # message, so different variations are not required if their\n # radix is present (e.g. \"all\" covers \"/all\" and \"ball\")\n self.menu_trigger = ['/all', '/stats']\n self.loan_stats_trigger = ['/loans']\n self.il_trigger = ['/IL']\n self.assets_trigger = ['/assets']\n\n\n # Stops runtime if the token has not been set\n if self.token is None:\n raise RuntimeError(\n \"FATAL: No token was found. \" + \\\n \"You might need to specify one or more environment variables.\")\n\n # Configures logging in debug level to check for errors\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)",
"def __init__(self, *kwargs):\n self.session = requests.Session()\n self.config_path = os.path.join(\n os.path.dirname(__file__), 'config.json')\n self.load_config()\n if self.application_token == '':\n self.set_application_token()\n self.token = self.get_token()\n self.get_settings()",
"def __init__(self, bot: DreamBot) -> None:\n\n self.bot = bot",
"def __init__(self, bot: DreamBot) -> None:\n\n self.bot = bot",
"def __init__(self):\n self.token = None\n self.login()",
"def __init__(self, token, api_key):\n self.token = token\n self.api_key = api_key",
"def __init__(self, token=None):\n\n if token is None:\n raise PushoverError(\"No token supplied.\")\n else:\n self.token = token\n self.user_token = None\n self.user_device = None\n self.messages = []",
"def __init__(self, channel=None, token=None):\n token = token or getattr(settings, 'SLACK_TOKEN', None)\n if channel:\n self.channel = channel\n\n if token == None:\n raise ImproperlyConfigured(\"Set SLACK_TOKEN in your setting.\")\n\n self._client = SlackClient(token)",
"def __init__(self, url, token):\n super().__init__(url, token)",
"def __init__(self, cmd, app_id: str, api_version: str, token=None):\n self._cmd = cmd\n self._app_id = app_id\n self._token = token\n self._api_version = api_version",
"def __init__(self, bot: commands.Bot):\n\n super().__init__(bot)\n\n # Init instance vars\n self.cookie_data = self._parse_cookie_data()\n self.cookie_available = False\n self.cookie_prepared_timestamp = None\n self.cookie_drop_delay_hours = None\n self.cookie_drop_delay_minutes = None\n self.cookie_type = None",
"def __init__(self, key, secret, token=None, token_secret=None, subdomain=None):\n\n self.key = key\n self.secret = secret\n self.token = token\n self.token_secret = token_secret\n self.subdomain = subdomain or self.DOMAIN",
"def __init__(self, token, arn) -> None:\n self.token = token\n self.arn = arn",
"def __init__(self, server, username=None, password=None, token=None):\n self.server = server\n self.username = username\n self.password = password\n self._token = token\n\n self.repos = Repos(self)\n self.groups = Groups(self)\n\n if token is None:\n self._get_token()",
"def __init__(self, session: ClientSession, token: str, *, api_base: str = API_BASE):\r\n self._session = session\r\n self._token = token\r\n self._api_base = api_base",
"def __init__(self, host='localhost', password='agent'):\n self._host = host\n self._password = password\n self._auth_url = \"http://%s:5000/v2.0/tokens\" % self._host\n self._token = None\n self._vsm_url = None",
"def __init__(self, address=('', 50000), authkey=b'tradingbot'):\n _ClientBot.__init__(self, address=address, authkey=authkey)"
] | [
"0.7350684",
"0.7225262",
"0.71546656",
"0.71175253",
"0.7082925",
"0.7082925",
"0.7082925",
"0.7029878",
"0.6977135",
"0.6977135",
"0.6932527",
"0.6848244",
"0.6823913",
"0.67627245",
"0.6731667",
"0.670638",
"0.670638",
"0.66926235",
"0.6631106",
"0.66154206",
"0.65494776",
"0.6533936",
"0.65207666",
"0.6488908",
"0.6480868",
"0.6405139",
"0.63954246",
"0.6374239",
"0.6352943",
"0.63237756"
] | 0.7344112 | 1 |
computes empirical bootstrap for regression problem | def regression_bootstrap(self, X: np.ndarray, y: np.ndarray, n=None, B=1000, model=None):
boot_est = [None] * B
result = {}
if model.run is False:
model.fit(X, y);
thetas = model.theta
index = 0
for _ in range(B):
idx = np.random.randint(low=0, high=n, size=n)
model.fit(X[idx], y[idx]);
boot_est[index] = tuple(model.theta)
index += 1
result = {}
result['estimates'] = boot_est
result['est_mean'] = np.mean(boot_est, axis=0)
result['est_err'] = np.std(boot_est, ddof=1, axis=0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def residual_bootstrap(self, X: np.ndarray, y: np.ndarray, n=None, B=1000, model=None):\n # fit the model if it hasn't been run\n if model.run is False:\n model.fit(X, y);\n resid = model.residuals\n pred = model.predictions\n boot_est = [None] * B\n result = {} # to store the mean, std_err\n index = 0 \n for _ in range(B):\n idx = np.random.randint(low=0, high=n, size=n)\n boot_yi = pred + resid[idx]\n model.fit(X, boot_yi)\n boot_est[index] = tuple(model.theta)\n index += 1\n \n #self.boot_est['std_err'] = np.std(statistic, ddof=1, axis=0)\n result['estimates'] = boot_est\n result['est_mean'] = np.mean(boot_est, axis=0)\n result['est_err'] = np.std(boot_est, ddof=1, axis=0)\n return result",
"def eg_bootmu():\n\n a = []\n b = []\n\n for _ in range(100):\n a.append(utils.gaussian(10, 1))\n\n print(\"\", \"mu\", \"sd\", \"cliffs\", \"boot\", \"both\", sep=\"\\t\")\n print(\"\", \"--\", \"--\", \"------\", \"----\", \"----\", sep=\"\\t\")\n\n for mu in range(100, 111):\n b = []\n\n for _ in range(100):\n b.append(utils.gaussian(mu / 10, 1))\n\n cl = utils.cliffsDelta(a, b)\n bs = stats.bootstrap(a, b)\n\n print(\"\", mu / 10, 1, cl, bs, cl and bs, sep=\"\\t\")",
"def bootstrap(X):\n return X[np.random.choice(list(range(X.shape[0])), size=X.shape[0]), :]",
"def main():\n df = pd.read_csv('data/Boston.csv')\n n_obs = len(df)\n np.random.seed(111)\n\n # Part a\n medv_mean = np.mean(df['medv'])\n print('medv mean = {:.3f}'.format(medv_mean))\n\n # Part b\n medv_stan_err = statistics.stdev(df['medv']) / np.sqrt(n_obs)\n print('medv standard error = {:.5f}'.format(medv_stan_err))\n\n # Part c\n n_boot_iters = 10000\n medv_mean_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_mean_array[ii] = np.mean(df.loc[ind, 'medv'])\n\n medv_stan_err_boot = statistics.stdev(medv_mean_array)\n print('medv standard error (bootstrap) = {:.5f}'.format(medv_stan_err_boot))\n\n # Part d\n ci_95 = [medv_mean - 2 * medv_stan_err,\n medv_mean + 2 * medv_stan_err]\n ci_95_boot = [medv_mean - 2 * medv_stan_err_boot,\n medv_mean + 2 * medv_stan_err_boot]\n print('95% CI = [{:.3f}, {:.3f}]'.format(ci_95[0], ci_95[1]))\n print('95% CI (bootstrap) = [{:.3f}, {:.3f}]'.format(ci_95_boot[0], ci_95_boot[1]))\n\n # Part e\n medv_med = np.median(df['medv'])\n print('medv med = {:.3f}'.format(medv_med))\n\n # Part f\n medv_med_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_med_array[ii] = np.median(df.loc[ind, 'medv'])\n\n medv_med_stan_err_boot = statistics.stdev(medv_med_array)\n print('medv median standard error (bootstrap) = {:.5f}'.format(medv_med_stan_err_boot))\n\n # Part g\n medv_10 = np.percentile(df['medv'], 10)\n print('medv 10th percentile = {:.3f}'.format(medv_10))\n\n # Part f\n medv_10_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_10_array[ii] = np.percentile(df.loc[ind, 'medv'], 10)\n\n medv_10_stan_err_boot = statistics.stdev(medv_10_array)\n print('medv 10th percenile standard error (bootstrap) = {:.5f}'.format(medv_10_stan_err_boot))",
"def bootstrap(y0, z0):\n\n x = creation.NUM()\n y = creation.NUM()\n z = creation.NUM()\n yhat = []\n zhat = []\n\n for y1 in y0:\n update.add(x, y1)\n update.add(y, y1)\n\n for z1 in z0:\n update.add(x, z1)\n update.add(z, z1)\n\n for y1 in y0:\n yhat.append(y1 - y[\"mu\"] + x[\"mu\"])\n\n for z1 in z0:\n zhat.append(z1 - z[\"mu\"] + x[\"mu\"])\n\n n = 0\n\n for _ in range(globals.the[\"bootstrap\"]):\n if utils.delta(creation.NUM(utils.samples(yhat)), creation.NUM(utils.samples(zhat))) > utils.delta(y, z):\n n += 1\n\n return n / globals.the[\"bootstrap\"] >= globals.the[\"conf\"]",
"def bootstrap( M ):\n tot_ct_per_ob = M.sum(axis = 0)\n zero_list = []\n for i in range(len(tot_ct_per_ob)):\n if tot_ct_per_ob[i] <= 0:\n zero_list.append(i)\n M = np.delete(M, zero_list, 1)\n new_M = np.zeros(M.shape)\n tot_ct_per_ob = M.sum(axis = 0)\n for i in range(len(M)):\n for j in range(len(M[0])):\n new_M[i][j] = M[i][j]/tot_ct_per_ob[j]\n new_M = np.transpose(new_M)\n bootstrap = []\n for i in range(len(tot_ct_per_ob)):\n rnd_vec = np.random.multinomial(tot_ct_per_ob[i], new_M[i])\n bootstrap.append(rnd_vec)\n \n bootstrap = np.transpose(np.asarray(bootstrap)) \n return bootstrap",
"def standard_bootstrap(dataset):\n randseed=np.random.randint(0,10000)\n np.random.seed(randseed)\n \n n = dataset.shape[0]\n b = np.random.randint(0, high=n-1, size=n)\n return dataset[b]",
"def eg_pre():\n\n print(\"\\teg3\")\n\n d = 1\n\n for _ in range(10):\n t1 = []\n t2 = []\n\n for _ in range(32):\n t1.append(utils.gaussian(10, 1))\n t2.append(utils.gaussian(d * 10, 1))\n\n print(\"\", \"\", d, d < 1.1, stats.bootstrap(\n t1, t2), stats.bootstrap(t1, t1), sep=\"\\t\")\n\n d = round(d + .05, 2)",
"def bootstrap_errors(self, arr, k=100):\n val = np.zeros(k)\n\n for i in range(k):\n rand_selection = np.random.choice(arr, len(arr))\n val[i] = self.varience(rand_selection)\n sigma = np.sqrt(np.average(np.square(val))\n - np.square(np.average(val)))\n return sigma",
"def bootstrap(init_file, nbootstraps):\n check_presence_init(init_file)\n dict_ = read(init_file)\n\n # Process the information specified in the initialization file\n nbins, logit, bandwidth, gridsize, a, b = process_user_input(dict_)\n trim, rbandwidth, reestimate_p = process_default_input(dict_)\n\n # Suppress output\n show_output = False\n\n # Prepare empty array to store output values\n mte_boot = np.zeros([gridsize, nbootstraps])\n\n # Load the baseline data\n data = read_data(dict_[\"ESTIMATION\"][\"file\"])\n\n counter = 0\n while counter < nbootstraps:\n boot_data = resample(data, replace=True, n_samples=len(data), random_state=None)\n\n # Process the inputs for the decision equation\n indicator, D, Z = process_choice_data(dict_, boot_data)\n\n # Estimate propensity score P(z)\n ps = estimate_treatment_propensity(D, Z, logit, show_output)\n\n if isinstance(ps, np.ndarray): # & (np.min(ps) <= 0.3) & (np.max(ps) >= 0.7):\n # Define common support and trim the data, if trim=True\n boot_data, ps = trim_support(\n dict_,\n boot_data,\n logit,\n ps,\n indicator,\n nbins,\n trim,\n reestimate_p,\n show_output,\n )\n\n # Estimate the observed and unobserved component of the MTE\n X, b1_b0, b0, mte_u = mte_components(\n dict_, boot_data, ps, rbandwidth, bandwidth, gridsize, a, b, show_output\n )\n\n # Calculate the MTE component that depends on X\n mte_x = np.dot(X, b1_b0).mean(axis=0)\n\n # Put the MTE together\n mte = mte_x + mte_u\n mte_boot[:, counter] = mte\n\n counter += 1\n\n else:\n continue\n\n return mte_boot",
"def bootstrap_mean(x, B=10000, alpha=0.05, plot=False):\n\n # Deterministic things\n n = len(x) # sample size\n orig = x.mean() # sample mean\n se_mean = x.std()/np.sqrt(n) # standard error of the mean\n qt = stats.t.ppf(q=1 - alpha/2, df=n - 1) # Student quantile\n\n # Generate boostrap distribution of sample mean\n xboot = boot_matrix(x, B=B)\n sampling_distribution = xboot.mean(axis=1)\n\n # Standard error and sample quantiles\n se_mean_boot = sampling_distribution.std()\n quantile_boot = np.percentile(sampling_distribution, q=(100*alpha/2, 100*(1-alpha/2)))\n\n # # RESULTS\n # print(\"Estimated mean:\", orig)\n # print(\"Classic standard error:\", se_mean)\n # print(\"Classic student c.i.:\", orig + np.array([-qt, qt])*se_mean)\n # print(\"\\nBootstrap results:\")\n # print(\"Standard error:\", se_mean_boot)\n # print(\"t-type c.i.:\", orig + np.array([-qt, qt])*se_mean_boot)\n # print(\"Percentile c.i.:\", quantile_boot)\n # print(\"Basic c.i.:\", 2*orig - quantile_boot[::-1])\n\n if plot:\n plt.hist(sampling_distribution, bins=\"fd\")\n # return sampling_distribution\n return np.round(orig, decimals=2), np.round(quantile_boot, decimals=2)",
"def bootstrap(data,func,nboot):\n\n n = len(data)\n resamples = np.array([[random.choice(data) for i in range(n)]\n for j in range(nboot)])\n return np.apply_along_axis(func, 1, resamples)",
"def calc_bootstrap(fcs,obs,ref,func, bootstrap_range, L, B):\n \n from sklearn.utils import resample\n \n idxs = np.arange(len(fcs))\n results = []\n \n random_state = 0\n for smp in range(B):\n block_sample = np.array([]).astype(int)\n while(len(block_sample) < len(fcs)):\n random_state += 1\n rolls = resample(idxs, n_samples=1, random_state=random_state)[0]\n block = np.roll(idxs, rolls)[0:L]\n block_sample = np.append(block_sample, block)\n\n block_sample = block_sample[0:len(idxs)]\n results.append(func(fcs[block_sample],obs[block_sample],ref[block_sample]))\n \n try:\n out = [ np.percentile(results, bootstrap_range[0]), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, bootstrap_range[1])]\n except:\n out = [ np.percentile(results, 2.5), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, 97.5)]\n\n # For indicating the statistical significance \n # of the lower boundary:\n if(out[0]>0): \n out.append('*')\n else:\n out.append('')\n \n return out",
"def _chunk_boot_ols_coefs(dat, formula, weights, seed):\n # Random sample with replacement from all data\n dat = dat.sample(frac=1, replace=True, random_state=seed)\n y, x = dmatrices(formula, dat, 1, return_type=\"dataframe\")\n b = _ols(\n x, y, robust=None, n_lags=1, cluster=None, all_stats=False, weights=weights\n )\n return list(b)",
"def bootstrap(data, iterations=10000):\n\n boot_mean = []\n\n for n in range(0, iterations):\n\n boot = resample(data, replace=True, n_samples=None,\n random_state=None)\n\n boot_mean.append(np.mean(boot))\n\n final_mean = np.mean(boot_mean)\n\n final_std = np.std(boot_mean, dtype=np.float64)\n\n return final_mean, final_std",
"def bootstrap(x, iter=int(1E6), return_samples=False):\n\n \n means = np.empty(iter) \n dfs = []\n for i in tqdm(range(iter), desc='Performing bootstrap sampling'):\n resamp = np.random.choice(x, size=len(x), replace=True)\n means[i] = resamp.mean()\n\n if return_samples:\n _df = pd.DataFrame([])\n _df['value'] = resamp\n _df['iter'] = i + 1\n dfs.append(_df)\n\n # Compute confidence intervals of the means.\n mean_val = means.mean()\n bounds_ci = {'99%': (0.5, 99.5), '95%': (2.5, 97.5), '90%': (5, 95),\n '75%': (12.5, 87.5), '50%': (25, 75), '25%': (37.5, 62.5),\n '10%': (45, 55), '5%': (47.5, 52.5), '1%': (49.5, 50.5)} \n cis = {} \n for k, v in bounds_ci.items():\n bounds = np.percentile(means, v)\n cis[k] = bounds\n\n statistics['original_data'] = x\n statistics['resampled_means'] = means\n statistics['mean_value'] = mean_val\n statistics['confidence_intervals'] = cis\n\n if return_samples:\n _df = pd.concat(dfs, sort=False)\n return [statistics, _df]\n else:\n return statistics",
"def _boot_h(h, x, y):\n n = 100\n ind = np.random.randint(x.shape[0], size=n)\n # allocate output\n out = np.empty(n)\n # Loop through each bootstrap point\n for i in range(n):\n # all-1 points\n xx = np.delete(x, i, axis=0)\n yy = np.delete(y, i, axis=0)\n z = (xx - x[i, :]) / h\n out[i] = _nadaraya_watson(z, yy)\n cv = np.sum((y[ind] - out)**2) / float(n)\n\n return cv",
"def bootstrap_mean_pvalue_2samples(x, y, equal_var=False, B=1000):\n n = len(x)\n orig = stats.ttest_ind(x, y, equal_var=equal_var)\n pooled_mean = np.concatenate((x, y), axis=None).mean()\n\n xboot = boot_matrix(x - x.mean() + pooled_mean,\n B=B) # important centering step to get sampling distribution under the null\n yboot = boot_matrix(y - y.mean() + pooled_mean, B=B)\n sampling_distribution = stats.ttest_ind(xboot, yboot, axis=1, equal_var=equal_var)[0]\n\n if np.isnan(orig[1]):\n p_final = np.nan\n else:\n # Calculate proportion of bootstrap samples with at least as strong evidence against null\n p = np.mean(sampling_distribution >= orig[0])\n # RESULTS\n # print(\"p value for null hypothesis of equal population means:\")\n # print(\"Parametric:\", orig[1])\n # print(\"Bootstrap:\", 2 * min(p, 1 - p))\n p_final = 2 * min(p, 1 - p)\n\n return p_final, orig",
"def bootstrap_ci(data: np.ndarray,\n stat_fcn,\n num_reps: int,\n alpha: float,\n ci_sides: int,\n bias_correction: bool = False,\n studentized: bool = False,\n seed: int = None):\n assert isinstance(data, np.ndarray)\n assert isinstance(num_reps, int) and num_reps > 0\n assert ci_sides == 1 or ci_sides == 2\n\n # Compute the statistic of interest based on the empirical distribution (input data)\n stat_emp = stat_fcn(data)\n\n # Get the bootstrap replications\n if data.ndim == 1:\n # Size of the samples drawn by the bootstrap method have to be equal input sample, since the variance of the\n # statistic to be computed depends on sample size\n size_sample = data.shape[0]\n # Set the seed if provided\n if seed is not None:\n np.random.seed(seed)\n # Draw samples of data with replacement (uniform weights)\n data_bs = np.random.choice(data, (size_sample, num_reps), replace=True)\n else:\n # Only use this function for 1D data sets\n raise NotImplementedError\n\n # Compute the statistic of interest based on the resampled distribution\n # Do it along each row (axis=0) -->> bootstrap replications\n stat_bs = np.apply_along_axis(stat_fcn, 0, data_bs) # dim = 1 x num_reps\n\n # Correct for the bias introduced by bootstrapping\n # Note: other estimates of the bias-correction factor than stat_emt possible, see [4]\n if bias_correction:\n # bias-corrected statistic (see (2) in [2], or (11.10) in [3])\n stat_bs_bc = 2*stat_emp - np.mean(stat_bs) # same as bias = mean_repl - stat_emp; repl_bc = stat_emp - bias\n stat_ret = stat_bs_bc # use the correction also for the bs replications? -->> No (so far)\n # Note: bias-correction can be dangerous in practice. Even though T_bc(D) is less biased than T(D),\n # the bias-corrected estimator may have substantially larger variance. This is due to a possibly higher\n # variability in the estimate of the bias, particularly when computed from small data sets.\n else:\n # Return the estimator based on the original sample a.k.a. empirical distribution\n stat_ret = stat_emp\n\n # Compute the deviation to the value of the statistic based on the empirical distribution (see [7])\n # This is analogous to the deviation of the empirical value around the true population value\n # i.e. delta = stat_emp - stat_pop\n # Note: it makes no difference if one uses the percentile operator before or after this difference\n delta_bs = stat_bs - stat_emp # dim = 1 x num_reps\n\n # Confidence interval with asymptotic refinement (a.k.a. percentile-t method)\n if studentized:\n # Compute the standard deviation of the original sample\n se_emp = np.std(data, ddof=0)/np.sqrt(data.shape[0]) # for dividing by (n-1) set ddof=1\n if se_emp < 1e-9:\n warn('Standard deviation in the empirical data (se_emp) is below 1e-9.', UserWarning)\n\n # Compute the standard error of the replications for the bootstrapped t-statistic\n se_bs = np.std(stat_bs, ddof=0)/np.sqrt(data_bs.shape[0]) # dim = num_reps x 1\n if se_bs < 1e-9: # use any for version 2 above\n warn('Standard deviation in the bootstrapped data (se_bs) is below 1e-9. '\n 'Setting confidence interval bounds to infinity.', UserWarning)\n return stat_ret, [-np.infty, np.infty]\n\n # Compute the t-statistic of the replications\n t_bs = delta_bs/se_bs # is consistent with [3, p. 360]\n\n if ci_sides == 2: # Two-sided confidence interval\n t_bs.sort()\n t_lo, t_up = np.percentile(t_bs, [100*alpha/2., 100 - 100*alpha/2.])\n ci_lo = stat_emp - t_up*se_emp # see [3, (11.6) p. 364]\n ci_up = stat_emp - t_lo*se_emp # see [3, (11.6) p. 364]\n\n elif ci_sides == 1: # One-sided confidence interval (upper bound)\n t_bs.sort()\n t_lo = np.percentile(t_bs, 100*alpha)\n ci_lo = -np.inf\n ci_up = stat_emp - t_lo*se_emp # see [3, (11.6) p. 364]\n\n else:\n raise pyrado.ValueErr(given=ci_sides, eq_constraint=\"1 or 2\")\n\n # Confidence interval without asymptotic refinement (a.k.a. basic method)\n else:\n if ci_sides == 2: # Two-sided confidence interval\n delta_bs.sort()\n delta_lo, delta_up = np.percentile(delta_bs, [100*alpha/2., 100 - 100*alpha/2.])\n ci_lo = stat_emp - delta_up\n ci_up = stat_emp - delta_lo\n\n elif ci_sides == 1: # One-sided confidence interval (upper bound)\n delta_bs.sort()\n delta_lo = np.percentile(delta_bs, 100*alpha)\n ci_lo = -np.inf\n ci_up = stat_emp - delta_lo\n\n else:\n raise pyrado.ValueErr(given=ci_sides, eq_constraint=\"1 or 2\")\n\n return stat_ret, [ci_lo, ci_up]",
"def bootstrap(data):\r\n size = int(len(data))\r\n train = resample(data, n_samples=size, replace=True)\r\n test = data.drop(train.index) \r\n return train[encoded_features], train[target], test[encoded_features], test[target]",
"def fit_predictCI(self, y, x, ndays, bootstrap, n_jobs = None):\n os.remove(\"report.log\")\n #Make some parameters avaliable\n self.x = x\n self.y = np.array(y)\n self.ndays = len(self.x) + ndays\n \n \n #Create a lol with data for run the model\n lists = self.__bootstratpTS(npArray = self.y, replicate = bootstrap)\n \n #Model will be fitted and predicted so R) using ci is not consisent\n #Make cores avalible to the process\n pool = mp.Pool(processes = n_jobs)\n \n #Run the model\n results = pool.starmap(self.runSir, [(lists[i], self.x, self.ndays) for i in range(0,len(lists))])\n\n self.preds = [results[i][0] for i in range(0,len(lists))] #get predictions\n\n a = [results[i][1][0] for i in range(0,len(lists))] #get a\n b = [results[i][1][1] for i in range(0,len(lists))] #get b\n lim_inf, med, lim_sup = self.__computeCI()\n \n \n return {\"a\": a, \"b\": b, \"lim_inf\": lim_inf, \"med\": med, \"lim_sup\": lim_sup}",
"def Bagging(df_train, df_test, B, threshold):\n\n # number of individual forecasts and number of periods\n K = df_test.shape[1]\n T = df_train.shape[0]\n\n # matrix for saving predictions from bootstrapped models\n pred_mat = np.full((df_test.shape[0], B), np.nan, dtype=float)\n\n # length of the boostrap block\n m = int(T**(1/3))\n p = np.int(T/m)\n\n # pairwise bootstrap\n # for i in range(B):\n #\n # # create the bootstrap sample from randomly drawn blocks\n # boot_indices = np.random.randint(T, size=T)\n # boot_X = df_train.iloc[boot_indices, 1:]\n # boot_y = df_train.iloc[boot_indices, 0]\n #\n # # pre-test\n # # estimate OLS on the block with all individual forecasts\n # boot_X_t = np.transpose(boot_X)\n # XX = np.linalg.inv(np.dot(boot_X_t, boot_X))\n # beta_hat = np.linalg.multi_dot([XX, boot_X_t, boot_y])\n #\n # # residuals\n # epsilon = boot_y - np.dot(boot_X, beta_hat)\n # epsilon_sq = epsilon**2\n #\n # # variances (heteroskedasticity robust)\n # Sigma = np.diag(epsilon_sq)\n # beta_var = np.diag(\n # np.linalg.multi_dot([XX, boot_X_t, Sigma, boot_X, XX])\n # )\n\n # moving block bootstrap (in the first dimension of the array)\n # divide the sample into blocks of length m\n blocks = np.asarray(np.split(df_train.values[:p*m, :], p, axis=0))\n\n # create bootstrap samples by sampling blocks\n boot_ind = np.random.randint(p, size=(B, p))\n boot_sample = np.reshape(blocks[boot_ind, :, :], (B, p*m, K+1))\n boot_X = boot_sample[:, :, 1:]\n boot_y = boot_sample[:, :, 0][:, :, np.newaxis]\n\n # pre-test\n # estimate OLS on the block filled sample with all individual forecasts\n boot_X_t = np.transpose(boot_X, axes=(0, 2, 1))\n beta_hat = np.matmul(\n np.linalg.inv(np.matmul(boot_X_t, boot_X)),\n np.matmul(boot_X_t, boot_y)\n )\n\n # residuals\n epsilon = boot_y - np.matmul(boot_X, beta_hat)\n\n # compute the absolute t-statistics\n # compute S\n S_sum = np.full((B, K, K), 0, dtype=float)\n for e in range(p):\n for f in range(m):\n for g in range(m):\n\n F_f = boot_X[:, e*m + f, :][:, :, np.newaxis]\n F_g = boot_X[:, e*m + g, :][:, :, np.newaxis]\n eps_f = epsilon[:, e*m + f][:, np.newaxis]\n eps_g = epsilon[:, e*m + g][:, np.newaxis]\n S_sum += np.matmul(\n np.multiply(F_f, eps_f),\n np.transpose(np.multiply(F_g, eps_g), axes=(0, 2, 1))\n )\n S = S_sum / (p*m)\n # compute H\n H_sum = np.full((B, K, K), 0, dtype=float)\n for e in range(p):\n for f in range(m):\n\n F_f = boot_X[:, e*m + f, :][:, :, np.newaxis]\n H_sum += np.matmul(F_f, np.transpose(F_f, axes=(0, 2, 1)))\n\n H = H_sum / (p*m)\n H_inv = np.linalg.inv(H)\n\n # variances\n beta_var = np.diagonal(\n 1/np.sqrt(T) * np.matmul(np.matmul(H_inv, S), H_inv),\n axis1=1, axis2=2\n )[:, :, np.newaxis]\n\n # near singularity may cause negative variance issues\n beta_var = np.abs(beta_var)\n # t-statistics\n t_stats_abs = np.abs(np.divide(beta_hat, np.sqrt(beta_var)))\n\n for i in range(B):\n\n sel_ind = np.squeeze(t_stats_abs[i] > threshold)\n # continue if there is atleas one predictor\n if np.sum(sel_ind) > 0:\n\n boot_X_sel_t = boot_X[i, :, sel_ind]\n boot_X_sel = np.transpose(boot_X_sel_t)\n boot_y_sel = boot_y[i]\n\n # estimate OLS on the block with the selected forecasts\n gamma_hat = np.linalg.multi_dot(\n [np.linalg.inv(np.dot(boot_X_sel_t, boot_X_sel)),\n boot_X_sel_t, boot_y_sel])\n\n # forecast out-of-sample\n pred_mat[:, i] = np.dot(\n df_test.iloc[:, sel_ind].values,\n gamma_hat\n ).flatten()\n else:\n # if no variable passes the pre-test, the prediction is 0\n pred_mat[:, i] = 0\n\n # aggregation of forecasts\n pred = np.nanmean(pred_mat, axis=1)\n\n df_pred = pd.DataFrame(\n {\"Bagging\": pred},\n index=df_test.index\n )\n\n return df_pred",
"def mw_boot(x, y, num_reps=1000):\r\n tol = MACHEP * 100\r\n combined = array(list(x) + list(y))\r\n observed_stat, obs_p = mw_test(x, y)\r\n total_obs = len(combined)\r\n num_x = len(x)\r\n num_greater = 0\r\n for sampled_x, sampled_y in _get_bootstrap_sample(x, y, num_reps):\r\n sample_stat, sample_p = mw_test(sampled_x, sampled_y)\r\n if sample_stat >= (observed_stat - tol):\r\n num_greater += 1\r\n return observed_stat, num_greater / num_reps",
"def bootstrap(data, alpha=0.05, n_bootstrap = 2000, func=None, **func_args):\n\t\n\tassert data.ndim == 3, 'Data is not 3-dimensional. Function only works for 3-D data.' \n\t\n\t# Trials form the second dimension\n\tn_trials = data.shape[1]\n\t\n\t# generate randomised bootstrap resamples as random indices\n\tbootstrap_index = np.random.randint(0, n_trials, \n\t\t\t\t\t\t\t\t\t\t(n_trials, n_bootstrap) )\n\t\n\t# For each bin in the histogram, randomly samples from the results\n\t# of each trial and repeats, effectively, n_bootstrap times \n\ttrials_bootstrap = data[:, bootstrap_index, :]\n\t\n\t# dimension one is the trials, zero is the conditions; this averaging \n\t# goes across the trials creating a PSTH for each condition, and,\n\t# importantly, for each bootstrap resample\n\tavg_bootstrap = trials_bootstrap.mean(axis=1)\n\t\n\tif func:\n\t\tavg_bootstrap = func(avg_bootstrap, **func_args)\n\t\t\n\t# find percentile values for each bin along the bootstrap resamples,\n\t# which are on axis 1 \n\tCI_pos = np.percentile(avg_bootstrap, 100*(1 - (alpha/2.)), \n\t\t\t\t\t\t\t\taxis=1)\n\tCI_neg = np.percentile(avg_bootstrap, 100*(alpha/2.), \n\t\t\t\t\t\t\t\taxis=1)\n\n\n\treturn CI_pos, CI_neg",
"def bootstrap_acceleration(d):\n return np.sum(d**3) / np.sum(d**2)**(3.0/2.0) / 6.0",
"def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))",
"def bootstrap_gp_fit(x: np.ndarray, y: np.ndarray, N: int = 10) -> Dict[str, Union[float, np.ndarray]]:\n def _fit_gaussian(x_, y_):\n kernel = C(1.0, (1e-3, 1e4)) * RBF(1.0, (1e-3, 1e4))\n gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10, normalize_y=True)\n gp.fit(x, y)\n y_pred, sigma = gp.predict(x, return_std=True)\n\n nrmse = np.sqrt(np.mean((y_pred - np.array(y)) ** 2)) / np.mean(y)\n\n constant = gp.kernel_.k1.constant_value\n length_scale = gp.kernel_.k2.length_scale\n return y_pred.reshape((1, -1)), sigma.reshape((1, -1)), nrmse, constant, length_scale\n\n results = joblib.Parallel(n_jobs=multiprocessing.cpu_count())(\n joblib.delayed(_fit_gaussian)(x, y) for _ in range(N)\n )\n y_pred_list, sigma_list, nrmse_list, constant_list, length_scale_list = list(zip(*results))\n y_pred_arr, sigma_arr = np.array(y_pred_list), np.array(sigma_list)\n\n bootstrap_dict = {'mn_length_scale': np.mean(length_scale_list), 'mn_constant': np.mean(constant_list),\n 'mn_nrmse': np.mean(nrmse_list), 'std_length_scale': np.std(length_scale_list), 'std_constant': np.std(constant_list),\n 'std_nrmse': np.std(nrmse_list), 'mn_y_pred': np.mean(y_pred_arr, axis=0)[0], 'mn_sigma': np.mean(sigma_arr, axis=0)[0], 'y': y.ravel(), 'x': x.ravel()}\n return bootstrap_dict",
"def bootstrap(init_file, nbootstraps, show_output=False):\n check_presence_init(init_file)\n dict_ = read(init_file)\n\n nbins = dict_[\"ESTIMATION\"][\"nbins\"]\n trim = dict_[\"ESTIMATION\"][\"trim_support\"]\n rbandwidth = dict_[\"ESTIMATION\"][\"rbandwidth\"]\n bandwidth = dict_[\"ESTIMATION\"][\"bandwidth\"]\n gridsize = dict_[\"ESTIMATION\"][\"gridsize\"]\n a = dict_[\"ESTIMATION\"][\"ps_range\"][0]\n b = dict_[\"ESTIMATION\"][\"ps_range\"][1]\n\n logit = dict_[\"ESTIMATION\"][\"logit\"]\n\n # Distribute initialization information.\n data = read_data(dict_[\"ESTIMATION\"][\"file\"])\n\n # Prepare empty arrays to store output values\n mte_boot = np.zeros([gridsize, nbootstraps])\n\n counter = 0\n while counter < nbootstraps:\n boot = resample(data, replace=True, n_samples=len(data), random_state=None)\n\n # Process data for the semiparametric estimation.\n indicator = dict_[\"ESTIMATION\"][\"indicator\"]\n D = boot[indicator].values\n Z = boot[dict_[\"CHOICE\"][\"order\"]]\n\n # The Local Instrumental Variables (LIV) approach\n\n # 1. Estimate propensity score P(z)\n ps = estimate_treatment_propensity(D, Z, logit, show_output)\n\n if isinstance(ps, np.ndarray): # & (np.min(ps) <= 0.3) & (np.max(ps) >= 0.7):\n\n # 2a. Find common support\n treated, untreated, common_support = define_common_support(\n ps, indicator, boot, nbins, show_output\n )\n\n # 2b. Trim the data\n if trim is True:\n boot, ps = trim_data(ps, common_support, boot)\n\n # 3. Double Residual Regression\n # Sort data by ps\n boot = boot.sort_values(by=\"ps\", ascending=True)\n ps = np.sort(ps)\n\n X = boot[dict_[\"TREATED\"][\"order\"]]\n Xp = construct_Xp(X, ps)\n Y = boot[[dict_[\"ESTIMATION\"][\"dependent\"]]]\n\n b0, b1_b0 = double_residual_reg(ps, X, Xp, Y, rbandwidth, show_output)\n\n # Turn the X, Xp, and Y DataFrames into np.ndarrays\n X_arr = np.array(X)\n Xp_arr = np.array(Xp)\n Y_arr = np.array(Y).ravel()\n\n # 4. Compute the unobserved part of Y\n Y_tilde = Y_arr - np.dot(X_arr, b0) - np.dot(Xp_arr, b1_b0)\n\n # 5. Estimate mte_u, the unobserved component of the MTE,\n # through a locally quadratic regression\n quantiles, mte_u = locpoly(ps, Y_tilde, 1, 2, bandwidth, gridsize, a, b)\n\n # 6. construct MTE\n # Calculate the MTE component that depends on X\n mte_x = np.dot(X, b1_b0).mean(axis=0)\n\n # Put the MTE together\n mte = mte_x + mte_u\n\n mte_boot[:, counter] = mte\n\n counter += 1\n\n else:\n continue\n\n return mte_boot",
"def empirical_bootstrap(self, pop_data: np.ndarray, n = None, B = 1000, func=None):\n # store the estimates for each bootstrapped sample\n n = pop_data.shape[0] if n is None else n\n boot_est = [None] * B\n index = 0\n for _ in range(B):\n idx = np.random.randint(low=0, high=n, size=n)\n est = func(pop_data[idx], axis=0)\n boot_est[index] = est\n index += 1\n \n result = {}\n result['estimates'] = boot_est\n result['est_mean'] = np.mean(boot_est)\n result['est_err'] = np.std(boot_est, ddof=1)\n \n return result",
"def bootstrap_ci(x, n=300, ci=0.95):\n\n low_per = 100 * (1 - ci) / 2\n high_per = 100 * ci + low_per\n x = removena_numpy(x)\n if not len(x):\n return (np.nan, np.nan)\n bootstrap_samples = choice(a=x, size=(\n len(x), n), replace = True).mean(axis = 0)\n return np.percentile(bootstrap_samples, [low_per, high_per])"
] | [
"0.6828615",
"0.66068625",
"0.6545324",
"0.65088797",
"0.64571667",
"0.64449966",
"0.6441457",
"0.64222157",
"0.6327106",
"0.6316082",
"0.6301678",
"0.6224388",
"0.62125295",
"0.61592007",
"0.60806715",
"0.60796577",
"0.60620844",
"0.6056833",
"0.6034502",
"0.6022748",
"0.6014953",
"0.6011409",
"0.5982173",
"0.5982081",
"0.596928",
"0.59686786",
"0.595784",
"0.59068364",
"0.588601",
"0.5874016"
] | 0.7066247 | 0 |
Samples from the model by using a sentence as input. | def sample_sent(self, seed: str, num_words: int, temperature=1.0):
new_seed = word_tokenize(seed)
return self.sample(new_seed, num_words, temperature) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample(args):\n with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:\n saved_args = cPickle.load(f)\n with open(os.path.join(args.save_dir, 'words_vocab.pkl'), 'rb') as f:\n words, vocab = cPickle.load(f)\n tf.reset_default_graph()\n model = Model(saved_args, True)\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver(tf.global_variables())\n ckpt = tf.train.get_checkpoint_state(args.save_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n for _ in range(args.count):\n output = model.sample(sess, words, vocab, args.n, args.prime,\n args.sample, args.pick,\n args.width, args.quiet)\n score, matches = eval_str(output)\n print(\"===== Before GTranslate Smoothing. Grammar Score = %i\" %score)\n print(output)\n gtranslate_output = translate(output)\n new_score, new_matches = eval_str(gtranslate_output)\n print(\"===== After GTranslate Smoothing. Grammar Score = %i\" %new_score)\n print(translate(gtranslate_output))\n if args.show_grammar:\n for err in matches:\n print(\"---\")\n print(err)",
"def sample(self, state, model_args, model_kwargs):\n raise NotImplementedError",
"def sample(self, like_params):\n\t\traise NotImplementedError",
"def nao_speech(possible_sentences):\n\n print(random.choice(possible_sentences))",
"def generate_sentence(sentence, vocab, model, sample_n=10):\n reverse_vocab = {idx: word for word, idx in vocab.items()}\n\n # Preprocess the sentence\n sentence = sentence.strip()\n sentence = sentence.lower()\n sentence = sentence.replace('\\n', '')\n sentence = sentence.replace('!', '')\n sentence = sentence.replace('?', '')\n sentence = sentence.replace('.', '')\n sentence = sentence.split()\n\n padded_sentence, _ = pad_corpus([sentence], [])\n sentence_ids = convert_to_id(vocab, padded_sentence)\n\n start_token = vocab[\"*START*\"]\n stop_token = vocab[\"*STOP*\"]\n decoder_input = [start_token] * 10\n\n sentence_logits = model.call(sentence_ids, [decoder_input])\n sentence_logits = tf.squeeze(sentence_logits)\n output_sentece = []\n\n for word_logits in sentence_logits:\n np_word_logits = word_logits.numpy()\n\n # Make it so highly likely and unhelpful words are not chosen\n np_word_logits[vocab[\"_s\"]] = 0\n np_word_logits[stop_token] = 0\n np_word_logits[vocab[\"the\"]] = 0\n np_word_logits[vocab[\"a\"]] = 0\n np_word_logits[vocab[\"of\"]] = 0\n\n top_n = np.argsort(np_word_logits)[-sample_n:]\n n_logits = np.exp(np_word_logits[top_n])/np.exp(np_word_logits[top_n]).sum()\n\n # Sample from the top n words to introduce variability\n out_index = np.random.choice(top_n,p=n_logits)\n output_sentece.append(reverse_vocab[out_index])\n\n print(\" \".join(output_sentece))",
"def sample_sentence_syl(hmm, obs_map, rhyme_dict, start_word, n_words=100):\n obs_map_r = obs_map_reverser(obs_map)\n\n num_start_word = obs_map[re.sub(r'[^-\\'\\w]', '', start_word).lower().strip('\\'')]\n num_rhyme_dict = {}\n\n # Convert the rhyme_dict to be composed of numbers instead of words.\n for _, (key, value) in enumerate(rhyme_dict.items()):\n num_value = []\n for val in value:\n # Clean up the word so we can see where it is in obs_map\n n_val = re.sub(r'[^-\\'\\w]', '', val).lower().strip('\\'')\n num_value.append(obs_map[n_val]) \n\n n_key = re.sub(r'[^-\\'\\w]', '', key).lower().strip('\\'')\n num_rhyme_dict[obs_map[n_key]] = num_value\n\n # Sample and convert sentence.\n # emission, states = hmm.generate_emission(n_words, num_rhyme_dict)\n emission, states = hmm.generate_emission(n_words, num_start_word)\n sentence = [obs_map_r[i] for i in emission]\n\n # Flip the order of the sentence before returning.\n # sentence.reverse() \n\n return sentence",
"def sample(self, M):\n # Helper function to get mdls\n def recur_mdl(model, lst):\n if isinstance(model, UnigramLM): # Base case\n return\n \n recur_mdl(model.prev_mdl, lst)\n lst.append(model)\n return lst\n \n tokens = ['\\x02'] # START token\n\n # Use a helper function to generate sample tokens of length `length`\n mdls = recur_mdl(self, []) # List of models\n\n if M <= self.N: # Before model ngrams\n mdls = mdls[:M]\n else: # If reach model ngrams\n for _ in range(M - self.N + 1): # Append additional used models\n mdls.append(mdls[-1])\n\n tups = tuple('\\x02'.split()) # First word depend on '\\x02'\n for mdl in mdls: # Loop through used models\n probs = mdl.mdl[mdl.mdl['n1gram'] == tups] # Get ngrams and probability dataframe\n if len(probs.ngram) == 0: # No word to choose\n ran = '\\x03' # Append '\\x03'\n break\n else:\n random = np.random.choice(probs.ngram, p=probs.prob) # Choose token based on probs\n ran = random[-1]\n \n if mdl.N < self.N: # If still smaller than N\n tups = random\n else: # ngram models\n tups = random[1:]\n\n tokens.append(ran) # Append\n \n for _ in range(M - len(tokens)): # Fill the gap of missing due to '\\x03'\n tokens.append('\\x03')\n \n # Transform the tokens to strings\n return ' '.join(tokens)",
"def run_sample(self):\n # there will be validation failures for sample data\n self.validate_req(ignore_failure=True)\n runner_fn = self.model_runner.execute_model_for_sample_data\n return self.do_handle_request(runner_fn)",
"def sample(self, seg_logit, seg_label):",
"def sample_sentence_syl_only(hmm, obs_map, n_words=100):\n obs_map_r = obs_map_reverser(obs_map)\n\n # Sample and convert sentence.\n emission, states = hmm.generate_emission(n_words)\n sentence = [obs_map_r[i] for i in emission]\n\n return sentence",
"def test_model():\n test_text = \"what is the price of jug?\"\n model = spacy.load(\"../model/custom_ner_model\")\n doc = model(test_text)\n for ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)",
"def sample(self, s):\n rng = np.random.default_rng()\n return rng.choice(np.arange(self.n_actions), p=self.eval(s))",
"def sample_sentence(self, prefix=[], max_length=20):\r\n i = 0\r\n sent = prefix\r\n word = self.sample_next(sent, False)\r\n while i <= max_length and word != \"END_OF_SENTENCE\":\r\n sent.append(word)\r\n word = self.sample_next(sent)\r\n i += 1\r\n return sent",
"def test_text_classifier_get_training_samples(self):\n pass",
"def generateByWord(model, voc, maxlen=20, diversity=0.5, numwords=42):\n\n text, sym_indices, indices_sym = voc\n syms = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n \n #generated += sentence\n generated += ' '.join(sentence)\n print('----- Generating with seed: \"' + ' '.join(sentence) + '\"')\n sys.stdout.write(generated)\n\n for i in range(numwords):\n x = np.zeros((1, maxlen, len(syms)))\n for t, sym in enumerate(sentence):\n x[0, t, sym_indices[sym]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_sym = indices_sym[next_index]\n generated += ' '+next_sym\n sentence.append(next_sym)\n tmpsentence = sentence[1:]\n sentence = tmpsentence\n sys.stdout.write(next_sym+' ')\n sys.stdout.flush()\n print()",
"def sample(self):",
"def sample_survey(self, **kwargs):",
"def sample(self):\n raise NotImplementedError",
"def sample(self):\n raise NotImplementedError",
"def utter(fileNames):\n model = __initModel(fileNames)\n\n print(\"Please enter your sentence...\")\n\n try:\n while True:\n utterance = input()\n print(numpy.random.choice(list(model.keys()), 1, p=list(model.values()))[0])\n\n except KeyboardInterrupt:\n pass",
"def generate(model, voc, maxlen=20, diversity=0.5, numchars=100):\n\n text, char_indices, indices_char = voc\n chars = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n #print(\"Insert text to start from [min 20 chars]:\")\n #sentence = str(raw_input())\n #sentence = sentence[:maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(numchars):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()",
"def sample(self):\r\n raise NotImplementedError",
"def sample(self, n_samples, batch_size, word_0=-1, gen_type='multinom'):\n # Compute the number of batches\n if n_samples != batch_size:\n n_batches = n_samples // batch_size + 1\n else:\n n_batches = 1\n\n samples = torch.zeros(n_batches * batch_size, self.max_len).long()\n\n # Produce samples by batches\n for batch in range(n_batches):\n hidden = self.init_hidden(batch_size)\n if word_0 < 0:\n # Initialize every sequence with a random word from the vocabulary\n input = torch.randint(low=0, high=self.voc_dim, size=(batch_size,))\n else:\n # Initialize every sequence with 'word_0' as starting token\n input = torch.LongTensor([word_0] * batch_size)\n if self.gpu:\n input = input.cuda()\n\n # Iterate the generator until we reach the maximum length allowed for the sequence\n for i in range(self.max_len):\n # Forward pass where we keep track of the hidden states of the network\n output, hidden = self.forward(input, hidden, require_hidden=True)\n\n if gen_type == 'multinom':\n # Generate the next token in the sequence randomly using the output as a multinomial distribution\n next_token = torch.multinomial(torch.exp(output), 1)\n elif gen_type == 'argmax':\n # Choose the most probable token in the sequence deterministically\n next_token = torch.argmax(torch.exp(output), 1)\n\n # Append generated ith tokens to batch #'batch'\n samples[batch * batch_size:(batch + 1) * batch_size, i] = next_token.view(-1)\n\n # Add generated tokens to the input\n input = next_token.view(-1)\n\n # We need this because the number of samples might not be divisible by the size of batches\n samples = samples[:n_samples]\n\n return samples",
"def sample(self, observation):\n raise NotImplementedError",
"def predict_sentences(self, sents):\n tkw=self.tkw\n sents_attr=[]\n sent_samples={\n \"word_inputs\":[],\n \"predicate_inputs\":[],\n \"postags_inputs\":[]\n }\n print('prepare data')\n for sid,sent in enumerate(sents):\n if sid % (int(np.ceil(len(sents)/100))) == 0:\n print(sid / len(sents))\n sent_str = \" \".join(sent)\n preds = [(word.i, str(word))\n for word\n in tkw.parser(sent_str)\n if word.tag_.startswith(\"V\")]\n num_of_samples = int(np.ceil(float(len(sent)) / self.sent_maxlen) * self.sent_maxlen)\n pred_list=[]\n for ind, pred in preds:\n cur_sample=self.encode_inputs([self.create_sample(sent, ind)])\n for name in [\"word_inputs\", \"predicate_inputs\", \"postags_inputs\"]:\n sent_samples[name].append(cur_sample[name])\n pred_list.append((ind, pred))\n sents_attr.append((num_of_samples,pred_list,len(sent)))\n for key in sent_samples:\n sent_samples[key]=np.concatenate(sent_samples[key],axis=0)\n print('predict data')\n X = sent_samples\n Y=self.model.predict(X)\n # print(Y[0])\n # print(Y[2])\n res=[]\n p=0\n for attr in sents_attr:\n num_of_samples,pred_list,sent_len=attr\n sample_len=num_of_samples//self.sent_maxlen\n ret=[]\n for pid,(ind, pred) in enumerate(pred_list):\n ret.append(((ind, pred),\n [(self.consolidate_label(label), float(prob))\n for (label, prob) in\n self.transform_output_probs(Y[p+pid*sample_len:p+(pid+1)*sample_len], \n get_prob = True).reshape(num_of_samples,\n 2)[:sent_len]]))\n res.append(ret)\n p+=len(pred_list)*sample_len\n return res",
"def test_text_classifier_add_training_samples(self):\n pass",
"def sample(\n seq: str,\n saved_model: Path,\n nsample: int = 1000,\n rm_aa: str = \"C,K\",\n save_fname: Optional[Path] = None,\n num_heads: int = 2,\n model_dim: int = 128,\n key_dim: int = 128,\n value_dim: int = 128,\n) -> Optional[List[str]]:\n model_params[\"n_head\"] = num_heads\n model_params[\"d_model\"] = model_dim\n model_params[\"d_k\"] = key_dim\n model_params[\"d_v\"] = value_dim\n\n model = model = BERT(**model_params)\n model.load_state_dict(torch.load(saved_model))\n model.double()\n model.to(device)\n\n sampled_seqs = model.sample(seq, n_samples=nsample, rm_aa=rm_aa)\n\n if save_fname is not None:\n write_fasta(save_fname, sampled_seqs)\n else:\n return sampled_seqs",
"def generate_samples_eval(model, context, max_gen_length, eos_token_id):\n #assert False, \"Implementation untested\"\n args = get_args()\n args.eos_id = eos_token_id\n raw_text_len = len(context)\n resp_sentences = generate(model, [context], max_gen_length)\n if resp_sentences:\n return resp_sentences[0][raw_text_len:]",
"def sample_sonnet(hmm, obs_map, n_words):\n sonnetLines = []\n sonnet = ''\n\n for numLines in range(14):\n line = sample_sentence(hmm, obs_map, n_words)\n sonnetLines.append(''.join(line).capitalize() + '\\n')\n\n for line in sonnetLines:\n sonnet += line\n\n return sonnet",
"def sample(self, batchsize, *args, **kwargs):\n raise NotImplementedError"
] | [
"0.6462898",
"0.6332141",
"0.6311455",
"0.6258953",
"0.6209095",
"0.6182178",
"0.61456835",
"0.6083665",
"0.6044231",
"0.601698",
"0.59860104",
"0.5928433",
"0.59251314",
"0.59154105",
"0.5907113",
"0.5901741",
"0.58996326",
"0.58762306",
"0.58762306",
"0.5863097",
"0.5856869",
"0.58439827",
"0.58413506",
"0.58350384",
"0.58234495",
"0.582185",
"0.5817849",
"0.5811744",
"0.57847494",
"0.57421464"
] | 0.6458874 | 1 |
Loads a character sampler from the specified paths. | def from_paths(cls, weights_path, vocab_path, sequence_length):
chars = util.load_vocab(vocab_path)
model = build_character_level_model(len(chars), sequence_length)
model.load_weights(weights_path)
return cls(model, chars, sequence_length) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(path):\n pass",
"def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")",
"def load_assets(self, paths):\n try:\n self.background = load(paths['background'])\n self.bullet = load(paths['bullet'])\n self.bullet_red = load(paths['bullet_red'])\n self.icon = load(paths['icon'])\n\n self.Ship = load(paths['ship'])\n self.Ship_CR = load(paths['ship_cr'])\n self.Ship_CL = load(paths['ship_cl'])\n self.Ship_CC = load(paths['ship_cc'])\n\n self.InvaderA1 = load(paths['invadera1'])\n self.InvaderA2 = load(paths['invadera2'])\n self.InvaderB1 = load(paths['invaderb1'])\n self.InvaderB2 = load(paths['invaderb2'])\n self.InvaderC1 = load(paths['invaderc1'])\n self.InvaderC2 = load(paths['invaderc2'])\n\n except Exception as e:\n print(\" \"+str(e))\n return 0\n else:\n return 1",
"def __init__(self, path, sampler='multinest'):\n assert os.path.exists(path)\n self.path = path\n self.log = dddm.utils.get_logger(self.__class__.__name__)\n self.sampler = sampler\n self.setup()",
"def _read(self, path: str):\n num_samples, length = [int(x) for x in path.split(\":\")]\n random.seed(self.seed)\n for _ in range(num_samples):\n tokens, tags = self._sample(length)\n yield self.text_to_instance(tokens, tags)",
"def load_paths(self, paths):\n paths = list(str(p) for p in paths)\n\n # This is where more cleverness will go if/when needed.\n\n return SimpleFitsCollection(\n paths,\n hdu_index=self.hdu_index,\n blankval=self.blankval,\n )",
"def load(self, path):\n pass",
"def load(self, path):\n pass",
"def load(self, path: str):\n pass",
"def get_dataloader(path, chars_include, transform):\n return WSDataLoader(path, chars_include=chars_include, batch_size=64, transform=transform, return_path=True)",
"def load_sample(filename):\n return open(os.path.join(SAMPLES, filename)).read()",
"def load(path, reset=False):\n pass",
"def __init__(self, *paths, **kwargs):\n trajectories = load_trajectories(*paths, **kwargs)\n super().__init__(trajectories, **kwargs)",
"def LoadBatch(filename):",
"def from_string(cls, path):\n pass",
"def sample(data_path, **kwargs):\n\tprocessor = SampleProcessor(kwargs)\n\ttry:\n\t\tprocessor.traverse(data_path)\n\t\tprocessor.output()\n\tfinally:\n\t\tprocessor.close()",
"def load_firstnames(gender):\n return load_resource(\"resources/%s.txt\" % gender)",
"def load(cls, labpath: str) -> None:\n raise NotImplementedError",
"def from_file(cls, basename, *args, **keys):\n log.verbose(\"Loading mapping\", repr(basename), verbosity=55)\n path = keys.get(\"path\", None)\n if path:\n filename = os.path.join(path, os.path.basename(basename))\n basename = filename\n else:\n filename = config.locate_mapping(basename)\n text = utils.get_uri_content(filename)\n return cls.from_string(text, basename, *args, **keys)",
"def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")",
"def load_camus(self, patients_path, height, width):\n # Add classes\n self.add_class(\"camus\", 1, \"chamber\")\n \n i = 0\n if isinstance(patients_path, str):\n patients_dir = glob(patients_path)\n for patient_path in tqdm(patients_dir, ncols=80):\n filenames = glob(patient_path + \"*.jpg\")\n for image_filename in filenames:\n self.add_image(\"camus\", image_id=i, path=image_filename,\n width=width, height=height)\n i += 1\n elif isinstance(patients_path, list):\n filenames = [p for p in patients_path if p.endswith(\".jpg\")]\n for image_filename in filenames:\n self.add_image(\"camus\", image_id=i, path=image_filename,\n width=width, height=height)\n i += 1",
"def read(*paths):\n with open(os.path.join(*paths), 'r', 'utf-8') as f:\n return f.read()",
"def load_features(feature_path):\n if not os.path.exists(os.path.join(feature_path, f\"0_features.npy\")): \n raise ValueError(f\"The provided location {feature_path} does not contain any representation files\")\n\n ds_list, chunk_id = [], 0\n while os.path.exists(os.path.join(feature_path, f\"{chunk_id}_features.npy\")): \n features = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_features.npy\"))).float()\n labels = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_labels.npy\"))).long()\n ds_list.append(ch.utils.data.TensorDataset(features, labels))\n chunk_id += 1\n\n print(f\"==> loaded {chunk_id} files of representations...\")\n return ch.utils.data.ConcatDataset(ds_list)",
"def load_from_path(self, paths, label_key='labels'):\n data = []\n labels = []\n for path in paths:\n with tf.io.gfile.GFile(path, 'rb') as f:\n d = {\n k.decode('utf8'): v\n for k, v in cPickle.load(f, encoding='bytes').items()\n }\n data.append(d['data'])\n labels.append(d[label_key])\n data = np.concatenate(data, axis=0)\n data = data.reshape((data.shape[0], 3, 32, 32))\n labels = np.concatenate(labels, axis=0)\n labels = np.reshape(labels, (len(labels), 1))\n\n if tf.keras.backend.image_data_format() == 'channels_last':\n data = data.transpose(0, 2, 3, 1)\n\n return data, labels",
"def load(path, num_cpu=16):\n return ActWrapper.load(path, num_cpu=num_cpu)",
"def read(*paths):\n with open(os.path.join(*paths), 'r') as f:\n return f.read()",
"def read(*paths):\n with open(os.path.join(*paths), 'r') as f:\n return f.read()",
"def read(*paths):\n with open(os.path.join(*paths), 'r') as f:\n return f.read()",
"def read(*paths):\n with open(os.path.join(*paths), 'r') as f:\n return f.read()",
"def read(*paths):\n with open(os.path.join(*paths), 'r') as f:\n return f.read()"
] | [
"0.601337",
"0.5859353",
"0.583447",
"0.5746866",
"0.569873",
"0.56790996",
"0.56531835",
"0.56531835",
"0.56473684",
"0.55836266",
"0.54546577",
"0.5444637",
"0.5434526",
"0.5354626",
"0.5247609",
"0.52465093",
"0.52368",
"0.5188384",
"0.5157901",
"0.515153",
"0.5129671",
"0.5120703",
"0.5101193",
"0.5094919",
"0.50938076",
"0.50683796",
"0.50683796",
"0.50683796",
"0.50683796",
"0.50683796"
] | 0.5873757 | 1 |
\ Parse a device file using the given file format. If file format is not given, file extension will be used. | def from_devfile(devfile, file_format=None, raiseErr=True, **kwargs):
from mmdev import parsers
if file_format is None:
file_format = os.path.splitext(devfile)[1][1:]
try:
parsercls = parsers.PARSERS[file_format]
except KeyError:
raise KeyError("File extension '%s' not recognized" % file_format)
return parsercls(devfile, raiseErr=raiseErr, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_file(self,filename):\n\n if (config.mode_format == \"simple\"): return self.read_file_simple(filename)\n if (config.mode_format == \"agsm\"): return self.read_file_agsm(filename)\n sys.exit(\"ERROR: unrecognised format \\\"\"+config.mode_format+\"\\\".\\n\" \\\n +\" Please choose another value for mode_format in AIMS_configure.py\")",
"def parse_extension(filepath):\n extension = os.path.splitext(filepath)[1][1:]\n\n extensions_dict = {\"netcdf\": ['nc'],\n \"mitiff\": ['mitiff'],\n \"geotiff\": ['gtiff', 'tiff', 'tif']}\n\n driver = None\n\n for key in extensions_dict:\n if extension in extensions_dict[key]:\n driver = key \n\n if driver is not None:\n return driver\n else:\n raise Exception(\"Unknown file extension, cannot guess file format\")",
"def _parse_ext(raw_fname, verbose=False):\n fname, ext = os.path.splitext(raw_fname)\n # BTi data is the only file format that does not have a file extension\n if ext == '' or 'c,rf' in fname:\n if verbose is True:\n print('Found no extension for raw file, assuming \"BTi\" format and '\n 'appending extension .pdf')\n ext = '.pdf'\n # If ending on .gz, check whether it is an .nii.gz file\n elif ext == '.gz' and raw_fname.endswith('.nii.gz'):\n ext = '.nii.gz'\n fname = fname[:-4] # cut off the .nii\n return fname, ext",
"def parse_data_format_from_path(path):\n data_format = None\n if os.path.isfile(path):\n data_format = os.path.splitext(path)[1].lower()\n else:\n # TODO: Add support for more data formats\n pass\n\n if data_format is None:\n return generate_response(warning='The path %s is not understandable.' % path)\n # elif data_format not in supported_formats:\n # return generate_response(warning='The data format %s is currently not supported.' % data_format)\n return generate_response(result=data_format)",
"def parse(self, filename):\n try:\n if 't' in self.FILE_OPEN_MODE:\n kw = {'encoding': self.FILE_ENCODING, 'errors': 'ignore'}\n else:\n kw = {}\n with open(filename, self.FILE_OPEN_MODE, **kw) as infile:\n self._parse(infile)\n except IOError:\n raise FileFormatError()",
"def parse_filename(filename, filename_format=\"ALL\"):\n\n # parse filename\n basename = os.path.basename(filename)\n\n # disable parsing if filename_format is None\n if filename_format is None:\n return {\"filename\": filename}\n\n # try all filename formats for special value ALL\n if filename_format == \"ALL\":\n for parser in filename_format_parser.values():\n try:\n info = parser(basename)\n except ValueError:\n info = {}\n continue\n else:\n break\n elif filename_format in filename_format_parser:\n parser = filename_format_parser[filename_format]\n info = parser(basename)\n else:\n raise KeyError(\"unknown filename_format={}\".format(filename_format))\n\n\n # define nuclide tuple\n info[\"filename\"] = filename\n if (\"Z\" in info) and (\"N\" in info):\n info[\"nuclide\"] = (info[\"Z\"],info[\"N\"])\n\n return info",
"def load_with_some_format(filename, formats):\n # Try to load file with formats that match its extension in format order\n data = None\n extension = os.path.splitext(filename)[1][1:]\n\n for format in formats:\n if extension in format.filenameExtensions:\n try:\n data = format.load(filename)\n break\n except InvalidFileFormatException:\n pass\n\n # If load by extension failed, try to load files in any format independently of their extension\n if not data:\n for format in formats:\n try:\n data = format.load(filename)\n break\n except InvalidFileFormatException:\n pass\n return data",
"def _parse_file(cls, filepath):\n hdus = sunpy.io.read_file(filepath)\n return cls._parse_hdus(hdus)",
"def load(cls,filename,format=None,**kwargs):\n\n\t\tif format is None:\n\t\t\t\n\t\t\textension = filename.split(\".\")[-1]\n\t\t\tif extension in [\"fit\",\"fits\"]:\n\t\t\t\tformat=\"fits\"\n\t\t\telif extension in [\"npy\",\"npz\"]:\n\t\t\t\tformat=\"npz\"\n\t\t\telse:\n\t\t\t\traise IOError(\"File format not recognized from extension '{0}', please specify it manually\".format(extension))\n\n\t\tif format==\"fits\":\n\t\t\treturn loadFITS(cls,filename)\n\t\telif format==\"npz\":\n\t\t\treturn loadNPZ(cls,filename)\n\t\telse:\n\t\t\tangle,data = format(filename,**kwargs)\n\t\t\treturn cls(data,angle)",
"def get_file_format(file):\n flag = None\n with open(file) as f:\n for line in f.readlines():\n MAT, MF, MT = read_control(line)[:3]\n if MF == 1 and MT == 451:\n i = 0\n C, i = read_cont([line], i)\n flag = C.N1\n break\n if flag is None:\n ftype = None\n elif flag == -11 or flag == -12:\n ftype = \"errorr\"\n elif flag == -1:\n ftype = \"gendf\"\n else:\n if C.L1 == 2:\n ftype = \"pendf\"\n else:\n ftype = \"endf6\"\n return ftype",
"def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n ret = self.read_file_object(file_obj, file_format=file_format)\n file_obj.close()\n return ret",
"def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n return self.read_file_object(file_obj, file_format=file_format)",
"def parse_file(self, f_path=\"NULL\"):\n if f_path == \"NULL\":\n raise Exception(\"ERROR: please specify tandam MS/MS file path\")\n return self._parse_ms_file(f_path)",
"def parse(self, infile):\r\n raise NotImplementedError()",
"def autodetect_format(file_data):\n\n # The first header line.\n for line in file_data:\n if line != []:\n break\n\n # Sparky format.\n if line[0] == 'Assignment':\n return 'sparky'\n\n # NMRView format.\n if line == ['label', 'dataset', 'sw', 'sf']:\n return 'nmrview'\n\n # NMRPipe SeriesTab.\n if line[0] == 'REMARK' and line[1] == 'SeriesTab':\n return 'seriestab'\n\n # XEasy format.\n if line == ['No.', 'Color', 'w1', 'w2', 'ass.', 'in', 'w1', 'ass.', 'in', 'w2', 'Volume', 'Vol.', 'Err.', 'Method', 'Comment']:\n return 'xeasy'\n\n # Assume a generic format.\n return 'generic'",
"def get_device(file_name):\n with open(file_name, 'r') as f:\n device = load(f)\n return pprint(device)",
"def file_format(x):\n return FILE_EXT_FORMAT_MAP.get(genomic_file_ext(x))",
"def parse_file(self, file_or_filename, parse_all=False):\n try:\n file_contents = file_or_filename.read()\n except AttributeError:\n with open(file_or_filename, \"r\") as f:\n file_contents = f.read()\n return self.parse_string(file_contents, parse_all)",
"def detect_file_format(path):\n with open(path, \"rb\") as f:\n first_bytes = f.read(16)\n if first_bytes.startswith(b\"CRAM\"):\n return \"CRAM\"\n if first_bytes.startswith(b\"##fileformat=VCF\"):\n return \"VCF\"\n\n gzip_header = b\"\\037\\213\"\n if first_bytes.startswith(gzip_header):\n with gzip.GzipFile(path, \"rb\") as f:\n first_bytes = f.read(16)\n if first_bytes.startswith(b\"BAM\\1\"):\n return \"BAM\"\n elif first_bytes.startswith(b\"##fileformat=VCF\"):\n return \"VCF\"\n\n return None",
"def parse_from_file (path):\n with open(path) as f:\n return NFFG.parse(f.read())",
"def get_file_format(file_path):\n with open(file_path, encoding=get_file_encoding(file_path)) as f:\n for line in f:\n line = f.readline()\n if line.startswith(\"* FORMAT\"):\n return line.split(\"=\", 1)[1].strip()\n elif line.startswith(\"MetroCount\"):\n return \"MC\"\n\n return None",
"def _file_format_adapter(self):\n raise NotImplementedError",
"def _get_file_object(infilename):\n\n _, extension = os.path.splitext(infilename)\n if extension.lower() == '.spe':\n return parsers.SpeFile(infilename)\n elif extension.lower() == '.spc':\n return parsers.SpcFile(infilename)\n elif extension.lower() == '.cnf':\n return parsers.CnfFile(infilename)\n else:\n raise NotImplementedError(\n 'File type {} can not be read'.format(extension))",
"def parse_file(self, domain_filename,\n gzip=False,\n fixed=True, pdf=False, output_path=None, **kargs):\n valid_file_formats = set(['fasta_style', 'dfam'])\n assert format in valid_file_formats, '{0} not in {1} valid_file_formats'.format(format, valid_file_formats)\n\n if output_path:\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n self.output_path = output_path\n\n self.fixed = fixed\n self.pdf = pdf\n\n self.data = []\n\n if format == 'fasta_style':\n self.data = self.__load_fasta_style(filename, gzip)\n elif format == 'dfam':\n self.data = self.__load_dfam_style(filename, gzip)",
"def parse(self, filename: str, input_format='csv', **kwargs):\n if 'delimiter' not in kwargs:\n kwargs['delimiter'] = self._extention_types[input_format]\n if filename.endswith('.tar'):\n with tarfile.open(filename) as tar:\n for member in tar.getmembers():\n f = tar.extractfile(member)\n df = pd.read_csv(f, comment='#', **kwargs) # type: pd.DataFrame\n if member.name == 'nodes.csv':\n self.load_nodes(df)\n elif member.name == 'edges.csv':\n self.load_edges(df)\n else:\n raise Exception('Tar file contains unrecognized member {}'.format(member.name))\n else:\n df = pd.read_csv(filename, comment='#', **kwargs) # type: pd.DataFrame\n self.load(df)",
"def _parse(self, infile):\n raise NotImplementedError()",
"def protocolParseFile( f ):\n\n\tpath = list( os.path.split( f.name )[:1] )\n\tparsed = ProtocolDescription.parseFile( f )[0]\n\n\timports = parsed[\"imports\"][:]\n\tparsed[\"imports\"] = []\n\n\tfor i in imports:\n\t\tp = os.path.join( path[0], i )\n\t\tparsed[\"imports\"].append( protocolParseFile( open( p, \"r\" ) ) )\n\n\treturn parsed",
"def get_read_parser(format):\n format = format.lower()\n if format == 'bed':\n return BedReadParser\n elif format == 'bedpe':\n return BedPeReadParser\n elif format == 'sam':\n return SamReadParser\n elif format == 'bam':\n return BamReadParser\n else:\n raise ValueError(f\"unknown read file format: {format!r}\")",
"def parse_file(filename):\n \n if filename.endswith('.p'):\n return parse_pickle(filename)\n elif filename.endswith('.xml'):\n return parse_xml(filename)\n else:\n print 'Unknown filetype for %s - it should have a .p or .xml extension. Attempting to open with XML first, pickle second.'\n \n try:\n return parse_xml(filename)\n except:\n return parse_pickle(filename)",
"def read(filename, file_format=None, **kwargs):\n if not isinstance(filename, str):\n raise TypeError()\n if not (file_format is None or file_format in {\"tough\", \"json\"}):\n raise ValueError()\n\n fmt = (\n file_format\n if file_format\n else filetype_from_filename(filename, _extension_to_filetype)\n )\n fmt = fmt if fmt else \"tough\"\n\n return _reader_map[fmt](filename, **kwargs)"
] | [
"0.6118345",
"0.5993526",
"0.59147155",
"0.5881222",
"0.5867202",
"0.57360864",
"0.5717077",
"0.5633291",
"0.55890423",
"0.5573933",
"0.5534352",
"0.55200124",
"0.5511212",
"0.5482957",
"0.5473776",
"0.54368377",
"0.54324925",
"0.54305726",
"0.5429239",
"0.54276496",
"0.5421039",
"0.5411888",
"0.5389359",
"0.5386586",
"0.53663504",
"0.53551745",
"0.53482634",
"0.5347457",
"0.53418005",
"0.5337368"
] | 0.6906373 | 0 |
Get this account's current receive address | def receive_address(self):
url = self.base_url + 'account/receive_address'
self.session.headers.update(self.sign(url))
resp =self.session.get(url)
return pd.Series(resp.json()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_current_address(self):\n pass",
"def get_remit_to_address(self): \n return self.remit_to_address",
"def get_address(self):\n if self.address:\n return self.address",
"def get_address(self):\n \n return self._addr",
"def get_address(self):\n \n return self._addr",
"def getaccountaddress(self, account):\n return self.proxy.getaccountaddress(account)",
"def get_address(self, ):\n return self.get_parameter('address')",
"def _get_address(self):\n return self.__address",
"def getnewaddress(self, account=None):\n if account is None:\n return self.proxy.getnewaddress()\n else:\n return self.proxy.getnewaddress(account)",
"def address(self):\n return self.data.get('address')",
"def get_address(self):\n return logic.address(self.get_program())",
"def getNewAddress(self):\n a = self.selectedAccount.getNextPaymentAddress()\n if self.blockchain:\n self.blockchain.subscribeAddresses(a)\n self.save()\n return a",
"def getAddress(self) -> int:\n ...",
"def address(self):\n return self._address",
"def address(self):\n return self._address",
"def address(self):\n return self._address",
"def address(self):\n return self._address",
"def address(self):\n return self._address",
"def get_new_address(account):\n try:\n new_address = subprocess.check_output([\"litecoin-cli\", \"getnewaddress\", account])\n except:\n sys.exit(1)\n\n return new_address.decode().strip()",
"def getAddress(self):\r\n return self._endpoint.getAddress()",
"def remote_getWebsocketAddress(self):\r\n return self._extAddress",
"def address(self) -> str:\n return pulumi.get(self, \"address\")",
"def address(self) -> str:\n return pulumi.get(self, \"address\")",
"def address(self) -> str:\n return pulumi.get(self, \"address\")",
"def get_address(self):\n return self.address.line[0]+\", \"+self.address.city+\", \"+self.address.state+\", \"+self.address.country",
"def get_receive_mail(self):\n return self.__mail",
"def get_address_id(self):\n return self.complete_address[-1]",
"def get_address(self):\n \n if self._addr == None:\n return self._socket.getpeername()\n return self._addr",
"def address(self):\n\n return self._address",
"def address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"address\")"
] | [
"0.69223",
"0.67890394",
"0.6574072",
"0.6554729",
"0.6554729",
"0.6517154",
"0.6479102",
"0.6416398",
"0.6332001",
"0.6276571",
"0.62622255",
"0.623307",
"0.6200893",
"0.6195295",
"0.6195295",
"0.6195295",
"0.6195295",
"0.6195295",
"0.61622924",
"0.6149964",
"0.61148465",
"0.610853",
"0.610853",
"0.610853",
"0.6100321",
"0.60953504",
"0.6092468",
"0.60840493",
"0.6078913",
"0.6078844"
] | 0.751058 | 0 |
UNTESTED Request BitCoin from an email address to be delivered to this account | def request(self, from_email, amount, notes='', currency='BTC'):
url = self.base_url + 'transactions/request_money'
if currency == 'BTC':
request_data = {
"transaction": {
"from": from_email,
"amount": amount,
"notes": notes
}
}
else:
request_data = {
"transaction": {
"from": from_email,
"amount_string": str(amount),
"amount_currency_iso": currency,
"notes": notes
}
}
body = json.dumps(request_data)
self.session.headers.update(self.sign(url, body=body))
resp = self.session.post(url=url, data=body)
return resp.json() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_0110_activationkey_resend_post_1(self):\n response = self.fetch(\n '/activation_resend', method=\"POST\", follow_redirects=False,\n body=urlencode({'email':'[email protected]'})\n )\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.body.count(u'we could not match your email'), 1\n )",
"def send_email(self, new_address):\n s = smtplib.SMTP('smtp.gmail.com:587')\n s.starttls()\n s.login(from_address, password)\n email = MIMEText(\"Received a request for ION-X information from:\\n{}\"\n .format(new_address))\n email['To'] = to_address\n email['From'] = from_address\n email['Subject'] = \"Website Request Received\"\n s.sendmail(from_address, to_address, email.as_string())\n s.quit()",
"def request_verification_bypass(request, env, email):\n if request.method == 'POST':\n oauth_client = OAUTHCLIENT(env)\n token = oauth_client.get_token()\n content = {'message': email + \" has been requested for By-pass to \" + env}\n\n if 'access_token' in token:\n if env == 'qa32':\n host = 'http://qajb101.p2pcredit.local/users/email/'\n elif env == 'stg':\n host = 'http://stage-api-proxy-A.vip.c1.stg/users/email/'\n elif env == 'qa20':\n host = 'http://np97.c1.dev/users/email/'\n\n # create header with access token\n headers = {'Authorization': token['token_type'] + ' ' + token['access_token']}\n\n # request email verification by-pass with access-token\n response = requests.get(\n host + email,\n headers=headers\n )\n\n response_json = response.json()\n\n # build response message\n if response_json['email_exists']:\n if response_json['activation_key'] == \"\":\n content['result'] = \"VERIFIED\"\n content['message'] = email + \" is auto-verified on \" + env\n else:\n content['result'] = \"NOT VERIFIED\"\n content['message'] = email + \" is not verified yet on \" + env + \\\n \". Please verify your email by clicking 'Verify Email' link.\"\n else:\n content['result'] = \"USER NOT FOUND\"\n content['message'] = email + \" is not found on \" + env\n\n response_status = status.HTTP_200_OK\n content['response'] = response_json\n else:\n content['result'] = str(token)\n response_status = status.HTTP_500_INTERNAL_SERVER_ERROR\n content['response'] = 'No token generated'\n\n return Response(content, status=response_status)",
"def __send_verification(self, email):\r\n user = User.getUser(email.lower())\r\n if user is None or user.verified:\r\n self.set_error(constants.STATUS_BAD_REQUEST, message=None, url=\"/\")\r\n return\r\n user.verificationCode = b64encode(CryptoUtil.get_verify_code(), \"*$\")\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'code': user.verificationCode,\r\n 'url': constants.VERIFICATION_URL\r\n }\r\n template = self.jinja2_env.get_template('verificationemail.jinja')\r\n message = mail.EmailMessage()\r\n message.sender = constants.SENDER_ADDRESS\r\n message.to = user.email\r\n message.subject = 'Please verify your address'\r\n message.body = template.render(template_values)\r\n message.send()\r\n user.put()",
"def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)",
"def send(self, email):\n client = self.clients[email.addressee]\n client.receive(email)",
"def cmd_account_send_verification(client, args):\n verification_email = client.send_verification_email(args.username)\n generate_output({'verification_email': verification_email})",
"def PostVerifyEmail(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_resend_activation_email(self):\n\n data = {\n 'email': self.user.email,\n }\n\n response = self.client.post(\n reverse('user-resend-activation-email'),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n response.content\n )\n\n self.assertEqual(\n response.content,\n b'',\n )\n\n self.assertEqual(len(mail.outbox), 1)",
"def send_sender_activation_email(self, email):\n logger.info(\"Function call: send_sender_activation_email for '{}'\".format(email, ))\n return self.__handle_error('Empty sender email') if not email else self.__handle_result(self.__send_request('senders/{}/code'.format(email, )))",
"def mail_responder(event, _):\n logger.info('%s: Request received:%s', __name__,\n str(event['Records'][0]['eventSource']))\n\n try:\n (source_email, recipient) = parse_ses_notification(\n event['Records'][0]['ses'])\n except Exception:\n logger.error('Error parsing received Email')\n return False\n\n LANG = CONFIG['LANG']\n\n logger.debug('Source Email {} recipient {}'.format(\n source_email, recipient))\n\n if recipient == CONFIG['TEST_EMAIL']:\n feedback.send_email(\n CONFIG['REPLY_EMAIL'],\n source_email,\n TEMPLATES['EMAIL_SUBJECT'],\n 'a',\n 'a',\n '',\n None,\n CONFIG['FEEDBACK_EMAIL'])\n return True\n\n elif recipient == CONFIG['TEST_EMAIL_NEW']:\n email_key(source_email, 'https://example.com')\n return True\n\n elif recipient == CONFIG['REPLY_EMAIL']:\n logger.info('Response to no-reply ignored')\n return True\n\n elif recipient == CONFIG['DELETE_USER_EMAIL']:\n try:\n deleted = api.delete_user(user_id=source_email)\n except Exception:\n email(source_email, 'try_again.j2')\n return False\n if deleted:\n email(source_email, 'unsubscribed.j2')\n return False\n\n elif recipient == CONFIG['GET_EMAIL']:\n try:\n user_exist = api.get_user(source_email)\n except Exception:\n logger.error('API error when checking {}'.format(source_email))\n email(source_email, 'try_again.j2')\n return False\n\n if not user_exist:\n try:\n api.create_user(source_email, 'EM')\n except Exception:\n logger.error('API error when Creating {}'.format(source_email))\n email(source_email, 'try_again.j2')\n return False\n\n try:\n new_key = api.get_new_key(user_id=source_email)\n except Exception:\n logger.error(\n 'API error when getting key fo {}'.format(source_email))\n email(source_email, 'try_again.j2')\n return False\n\n if not new_key:\n email(source_email, 'no_key.j2')\n return False\n\n awsurl = ((CONFIG['OUTLINE_AWS_URL']).format(\n urllib.parse.quote(new_key)))\n\n email_key(source_email, awsurl)\n\n return True",
"def test_email_good(get_email, capsys):\n e = get_email\n e.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out",
"def send_message_to_account():\n json_values = request.json\n values = {\"email\"}\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\n if all(key in json_values for key in values):\n if validator_email.is_valid(json_values):\n account = Account()\n account.email = json_values[\"email\"]\n result = account.change_code(create_code())\n if result == ResponsesREST.SUCCESSFUL.value:\n response = Response(json.dumps({\"email\": account.email}),\n status=result, mimetype=\"application/json\")\n else:\n response = Response(json.dumps(json_error(result)),\n status=result, mimetype=\"application/json\")\n return response",
"def send_confirm_email(request,uid):\n user=models.UserProfile.objects.get(id=uid)\n current_site=get_current_site(request)\n email_subject='Activate Your Account'\n message=render_to_string('activate_account.html',{\n 'user':user,\n 'domain':current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(uid)),\n 'token':account_activation_token.make_token(user),\n })\n to_email= user.email\n email= EmailMessage(email_subject,message,to=[to_email])\n email.send()\n return JsonResponse(\n {\n \"status\":\"The confirmation email has been sent.\",\n }\n )",
"def email(self, identifier, data):\n self.client.request_with_method(Methods.EMAIL % (self.name, identifier,),\n data=data)",
"def verify_email(request):\n user = User.objects.get(username=request.user)\n if request.method == 'POST':\n otp = request.data.get('otp')\n if not otp:\n return Response({'message':\"We cannot find your otp\"}, status=status.HTTP_400_BAD_REQUEST)\n\n #Get token\n qs = ResetRequests.objects.filter(user=user, token=otp, use_case = 'account confirmation')\n if not qs.exists():\n return Response({'message':'Wrong Token.'}, status=status.HTTP_400_BAD_REQUEST)\n\n #Grab the last token\n token_request = qs.last()\n timer = token_request.created_at\n\n #Check token expiry\n if timezone.now() > timer + timezone.timedelta(minutes=10):\n return Response({'message':'Token Expired. Request another please.'}, status=status.HTTP_400_BAD_REQUEST)\n\n #Check whether token has been used.\n if token_request.consumed:\n return Response({\"message\":\"Pin has been used already\"}, status=status.HTTP_400_BAD_REQUEST)\n\n if int(otp) == int(token_request.token):\n #Set user as verified\n user.email_verified = True\n user.save()\n #Set token as consumed\n token_request.consumed = True\n token_request.save()\n\n #Send Confirmation Mail\n email_subject = \"SpendWise - Account Verified.\"\n email_msg = \"Your account has been verified. Welcome to the SpendWise Ecosystem\"\n try:\n sendEmail(user, email_subject, \"Account Verified\", information=email_msg)\n return Response({'message':'User account successfully verified.'}, status=status.HTTP_200_OK)\n except:\n return Response({'message':'We could not send a confirmation email'}, status=status.HTTP_200_OK)\n\n\n if request.method == 'GET':\n to = User.objects.get(username=request.user).email\n pin = random.randint(0, 1000000)\n #presumes this link is only reachable cos the user already has an email.\n to = user.email\n try:\n subject = \"Account Confirmation.\"\n message = f\"Your Account Confirmation code is {pin}\\n\\nExpires in 10 minutes.\"\n sendEmail(user, subject, \"Account Confirmation\", information=message, otp=pin)\n\n #Write to user's record\n ResetRequests.objects.create(\n user = user,\n token = pin,\n use_case = 'account confirmation'\n )\n #Add password reset request date here\n return Response({'message':'Token sent to registered email.',\n 'email' : to},\n status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'message':'We could not send an email', 'error':e},\n status=status.HTTP_400_BAD_REQUEST)\n\n #Do the actual verification\n #Verified is alrady possibly True via sms. What happens now?",
"def test_check_email(self):\n url = reverse('check_email')\n data = {\"emails\": [\"[email protected]\"]}\n response_data = {\"results\": [{\"email\": \"[email protected]\", \"blocked\": True}], \"success\": True}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, response_data)",
"def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)",
"def send_email(request):\n if \"email\" in request.DATA:\n email_addr = request.DATA[\"email\"]\n try:\n user = User.objects.get(email=email_addr)\n except User.DoesNotExist:\n return JsonResponse(\n \"Bad request - No registered user with that email\",\n status=400,\n safe=False,\n )\n\n urlsafe_chars = string.ascii_letters + string.digits + \"-_\"\n code_str = \"\".join(random.choice(urlsafe_chars) for _ in range(100))\n\n # 30 minutes from now\n expiry_time = timezone.now() + datetime.timedelta(minutes=30)\n\n # overwrite old code\n if PasswordResetCode.objects.filter(user_id=user.id).exists():\n reset_code = PasswordResetCode.objects.get(user_id=user.id)\n reset_code.delete()\n\n PasswordResetCode.objects.create(\n user_id=user.id, code=code_str, expiry=expiry_time\n )\n\n message = build_email(\n email_addr, user.id, code_str, user.first_name, user.username\n )\n send_reset_email(message)\n\n return JsonResponse(\"OK - email sent\", status=200, safe=False)\n\n return JsonResponse(\n \"Bad request - Must provide email\", status=400, safe=False\n )",
"def get_invitation_email(address, key):\n\n EMAIL = '[email protected]'\n SUBJECT = 'Your Foojal Invitation'\n URL = 'http://app.foojal.com/invites/'\n EMAIL_CONTENT = \"\"\"\nYou have been invited to Foojal.com!\n\nTo accept this invitation, click the following link,\nor copy and paste the URL into your browser's address\nbar:\n\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = EMAIL\n message.to = address\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % URL + key\n return message",
"def _confirm_email(user, email):\n mail_subject = 'Подтверждение почты'\n message = render_to_string('accounts/account_verification_email.html', {\n 'user': user,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': default_token_generator.make_token(user),\n 'email': email,\n })\n to_email = email\n send_email = EmailMessage(mail_subject, message, to=[to_email])\n send_email.send()",
"def request_email_address_confirmation(\n user: User, email_address: str, site_id: SiteID\n) -> None:\n normalized_email_address = _normalize_email_address(email_address)\n\n email_address_verification_service.send_email_address_confirmation_email(\n normalized_email_address, user.screen_name, user.id, site_id\n )",
"def PostResendVerifyEmail(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_check_email_endpoint(self, **kwargs):\n email_id = kwargs.get('email_id', Workflows.generate_new_email(suffix=self.global_config[\"email_id_suffix\"]))\n client_id = kwargs.get('client_id', self.global_config[\"client_id\"])\n client_secret = kwargs.get('client_secret', self.global_config[\"client_secret\"])\n relative_url = kwargs.get('url', self.test_args[\"relative_url\"]).format(email_id, client_id, client_secret)\n\n restapi = Rest(base_uri=self.global_config[\"base_url\"])\n response = restapi.get(relative_url=relative_url, **kwargs)\n if kwargs.get(\"return_response_obj\", False):\n return response\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"data\"][\"available\"] == self.test_args[\"expected_result\"], \"Test Failed\"\n return None",
"def test_update_email_task_send_email_to_current_user(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/UpdateEmail\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": user.id,\n \"authenticated\": True,\n }\n\n data = {\"new_email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 2)\n\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[0].subject, \"update_user_email_additional\")\n\n self.assertEqual(mail.outbox[1].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[1].subject, \"update_user_email_token\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.name, \"[email protected]\")\n\n self.assertEqual(len(mail.outbox), 3)",
"def test_update_email_task_send_email_current_name_not_email(self):\n\n user = fake_clients.FakeUser(\n name=\"nkdfslnkls\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/UpdateEmail\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"nkdfslnkls\",\n \"user_id\": user.id,\n \"authenticated\": True,\n \"email\": \"[email protected]\",\n }\n\n data = {\"new_email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"task created\"]})\n\n self.assertEqual(len(mail.outbox), 2)\n self.assertEqual(mail.outbox[0].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[0].subject, \"update_user_email_additional\")\n\n self.assertEqual(mail.outbox[1].to, [\"[email protected]\"])\n self.assertEqual(mail.outbox[1].subject, \"update_user_email_token\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertEqual(len(mail.outbox), 3)",
"def test_send_email(self):\n self.register()\n response = self.client.post(self.password_reset_url,\n self.email,\n format=\"json\")\n self.assertEqual(response. status_code, status.HTTP_200_OK)\n self.assertEqual(json.loads(response.content), {'message':\n 'Successfully sent.Check your email'})",
"def test_process_bn_email(app, session):\n # setup filing + business for email\n identifier = 'BC1234567'\n filing = prep_incorp_filing(session, identifier, '1', 'bn')\n business = Business.find_by_identifier(identifier)\n # sanity check\n assert filing.id\n assert business.id\n token = '1'\n # run worker\n with patch.object(AccountService, 'get_bearer_token', return_value=token):\n with patch.object(worker, 'send_email', return_value='success') as mock_send_email:\n worker.process_email(\n {'email': {'filingId': None, 'type': 'businessNumber', 'option': 'bn', 'identifier': 'BC1234567'}},\n app\n )\n # check email values\n assert '[email protected]' in mock_send_email.call_args[0][0]['recipients']\n assert '[email protected]' in mock_send_email.call_args[0][0]['recipients']\n assert mock_send_email.call_args[0][0]['content']['subject'] == \\\n f'{business.legal_name} - Business Number Information'\n assert mock_send_email.call_args[0][0]['content']['body']\n assert mock_send_email.call_args[0][0]['content']['attachments'] == []",
"def test_query_fee_recipients(\n test_client, pydex_client\n):\n res = test_client.get(\n pydex_client.fee_recipients_url\n )\n assert res.status_code == 200",
"def send_sbemail(email_addy, city):\n print(\"Single email send attempt: %s, %s\" % (email_addy, city))\n given_pair, weather = subject_phrase_picker(city)\n mailtext = '%s %s in %s!' % ( given_pair['phrasing'], weather, city)\n print(\"Going to try a mailgun post to: %s with subject %s\" % (email_addy, given_pair['subject']))\n response = requests.post(\n MG_API_URL,\n auth=(\"api\", MG_API_KEY),\n data={'from': 'Nick Cage <nickcage@%s>' % (MG_DOMAIN),\n 'to': [email_addy],\n 'subject': given_pair['subject'],\n 'text': mailtext\n })\n print(\"status: %s | %s and response: %s\" % (response.status_code, response.reason, response.text))\n print(\"Attempted send %s to: %s!\" % (mailtext, email_addy))\n return \"Sent %s to: %s!\" % (mailtext, email_addy)"
] | [
"0.59033644",
"0.5902301",
"0.5873402",
"0.58182836",
"0.5810677",
"0.57829",
"0.576119",
"0.5751676",
"0.57376075",
"0.5684819",
"0.5678226",
"0.56766224",
"0.56520915",
"0.56503695",
"0.5622658",
"0.56179893",
"0.55932885",
"0.5574992",
"0.5557751",
"0.55373156",
"0.5535638",
"0.55311424",
"0.55304044",
"0.55272704",
"0.551044",
"0.55103356",
"0.5507628",
"0.550224",
"0.5497024",
"0.5495744"
] | 0.6018859 | 0 |
Retrieve a transaction's details | def get_transaction(self, transaction_id):
url = self.base_url + 'transactions/' + str(transaction_id)
self.session.headers.update(self.sign(url))
resp = self.session.get(url)
return resp.json() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_transaction_details_request(self):\n self.trans_details.get_transaction_details(\n trans_id = 123456,\n )",
"def get_transaction_detail(payload):\n response = requests.post(url, data=payload)\n return response.json()",
"def get_transition_details(self, account_id, transaction_id):\n endpoint = 'accounts/{0}/transactions{1}'.format(account_id,\n transaction_id)\n\n return self._api.request(endpoint)",
"def gettransaction(self, txid):\n return TransactionInfo(**self.proxy.gettransaction(txid))",
"def transaction(self, transaction_id: str):\n return get_from_list(self.transactions, \"id\", transaction_id)",
"def raw_get_transaction(cls, txid):\n r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)\n r.raise_for_status() # pragma: no cover\n return r.json()",
"def getrawtransaction(self, txid, verbose=True):\n if verbose:\n return TransactionInfo(**self.proxy.getrawtransaction(txid, 1))\n return self.proxy.getrawtransaction(txid, 0)",
"def get_transaction(self, excludes_list):\n response = client.get(self.url, \"transactions\", {\"exclude_hash\": excludes_list})\n if response.status == 200:\n print(\"Transaction successfully received\")\n return Transaction.parse(response.data)\n elif response.status == 404:\n # print(\"no request to be received\")\n return None\n else:\n print(\"Unknown error while requesting transaction\")\n return None",
"def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)",
"def getTransaction(hash, config):\n platon = get_eth_obj(config)\n try:\n # 交易信息\n transaction = dict(platon.getTransaction(hash))\n HexBytes_to_str(transaction)\n # 交易回执信息\n transaction_receipt = dict(platon.getTransactionReceipt(hash))\n HexBytes_to_str(transaction_receipt)\n except Exception as e:\n cust_print('Failed to query transaction information,error message:{}.'.format(e))\n sys.exit(1)\n cust_print('query transaction information successful!', fg='g')\n info = \"transaction:\\n\"\n info += \"{}\\n\".format(json.dumps(dict(transaction), indent=2))\n info += \"\\n\\ntransaction receipt:\\n\"\n info += \"{}\".format(json.dumps(dict(transaction_receipt), indent=2))\n cust_print('{}'.format(info), fg='g')",
"def get(self, id):\n return get_transaction_list_data(id)",
"def get_transaction(id):\n\n transaction = get_db().execute('SELECT * FROM transactions WHERE id = ?', (id, )).fetchone()\n\n if transaction is None:\n abort(404, gettext(\"transaction id %(id)d does not exist.\", id=id))\n\n return transaction",
"def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)",
"def get_tx(txid):\n return requests.get(BASE+f'/api/tx/{txid}').json()",
"def get_transaction(self, i):\n\t\treturn self.transactions[i]",
"def get_transaction(self, i):\n\t\treturn self.transactions[i]",
"def get_transaction(self, i):\n return self.transactions[i]",
"def get_transaction(self, transaction_id: str) -> Transaction:\n return self.session.get_transaction(\n transaction_id=transaction_id,\n account_or_account_id=self.account_id,\n )",
"def get_transactions_grid_row_details(self):\n self.grid_row_details_dictionary.clear()\n self.grid_row_details_dictionary.update({\"Transaction Code\": \"\", \"Credit/Debit\": \"\", \"Transaction Details\": \"\", \"Currency\": \"\", \"Amount\": \"\"})\n return self.get_grid_row_details(self.transactions_grid_div_id, self.grid_row_details_dictionary, self.transactions_data_grid_name)",
"def test_get_transaction_list_request(self):\n self.trans_details.get_transaction_list(\n batch_id = 123456,\n )",
"def ReadTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"async def process_get_txn(self, int txn) -> str:\n\n return json.dumps(json.loads(await ledger.build_get_txn_request(self.did, txn))['result']['data'])",
"def get_tx_info(tx):\n\n input_addresses = []\n output_addresses = []\n payments = []\n\n try:\n response = json.loads(make_request('http://tbtc.blockr.io/api/v1/tx/info/' + tx))\n except Exception as e:\n status = json.loads(e.message).get('status')\n if status in ['error', 'fail']:\n return {'from': None, 'to': None, 'amount': None, 'confirmations': 0}\n\n vins = response.get('data').get('vins')\n vouts = response.get('data').get('vouts')\n confirmations = response.get('data').get('confirmations')\n\n for i in range(len(vins)):\n if vins[i].get('address') not in input_addresses:\n input_addresses.append(vins[i].get('address'))\n for i in range(len(vouts)):\n output_addresses.append(vouts[i].get('address'))\n payments.append(vouts[i].get('amount'))\n\n return {'from': input_addresses, 'to': output_addresses, 'amount': payments, 'confirmations': confirmations}",
"def get_transaction(session, transaction_id, expand=False):\n transaction = session.query(Transaction).get(transaction_id)\n transaction_schema = TransactionSchema()\n transaction_json = transaction_schema.dump(transaction).data\n # We may want to expand the user\n if expand:\n user_id = transaction_json[\"user\"].split(\"/\")[-1]\n user_json = get_user(session, user_id)\n transaction_json[\"user\"] = user_json\n return transaction_json",
"def test_wallets_get_transaction_list(self):\n pass",
"def get_account_details(self):\n pass",
"def get_details(self):",
"async def test_txn_get(self):\n self.stream.preset_response(transaction=Mocks.make_txns('1')[0])\n\n response = await self.get_assert_200('/transactions/1')\n self.stream.assert_valid_request_sent(transaction_id='1')\n\n self.assertNotIn('head', response)\n self.assert_has_valid_link(response, '/transactions/1')\n self.assertIn('data', response)\n self.assert_txns_well_formed(response['data'], '1')",
"def check(transaction):\n if not isinstance(transaction, Transaction):\n transaction = Transaction.objects.get(id=transaction)\n\n r = requests.post(\"https://www.blockonomics.co/api/searchhistory\",\n data=json.dumps({\"addr\": transaction.to_address}))\n try:\n history_data = json.loads(r.content.decode('utf-8'))['history'][0]\n except:\n return\n\n set_tx_details(history_data, transaction)",
"def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n #3. Call PayPal to get the transaction\n response = self.client.execute(request)\n return response\n #4. Save the transaction in your database. Implement logic to save transaction to your database for future reference."
] | [
"0.80232537",
"0.7354677",
"0.73334664",
"0.70271695",
"0.69203156",
"0.6875585",
"0.67973626",
"0.6767174",
"0.67307967",
"0.6686202",
"0.6666744",
"0.66518664",
"0.6626436",
"0.65828323",
"0.65755147",
"0.65755147",
"0.65399855",
"0.6448602",
"0.6392721",
"0.63261706",
"0.63250726",
"0.6264655",
"0.61993015",
"0.6166074",
"0.61252964",
"0.60767394",
"0.6068742",
"0.6050071",
"0.60486615",
"0.6025889"
] | 0.7456548 | 1 |
Sends a friend request to the user | async def send_friend_request(self):
logging.debug("Sending friend request to " + self.username)
if self.is_friend:
raise ObjectErrors.AlreadyFriends(
"You are already friends with " + self.display_name)
await self.client.request.post(
"/user/%s/friendRequest" % self.id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_request(request, id):\n user = get_object_or_404(User, id=id)\n f_request, created = FriendRequest.objects.get_or_create(\n from_user=request.user,\n to_user=user\n )\n if created:\n messages.success(\n request,\n f'Your friend request to {user} has been sent.'\n )\n\n return redirect('/profiles/%s/' % user.profile.slug)\n messages.info(\n request,\n f'You have already sent a friend request to {user}'\n )\n return redirect('/profiles/%s/' % user.profile.slug)",
"def user_send_friend_request(self):\n try:\n assert request.is_json\n except AssertionError:\n self.logger.debug(messages.REQUEST_IS_NOT_JSON)\n return messages.ERROR_JSON % messages.REQUEST_IS_NOT_JSON, 400\n content = request.get_json()\n if not FRIEND_REQUEST_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug(messages.MISSING_FIELDS_ERROR % (FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())))\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % (\n FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())), 400\n email_token = auth.current_user()[0]\n try:\n self.friend_database.create_friend_request(email_token, content[\"other_user_email\"])\n except UnexistentTargetUserError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % content[\"other_user_email\"])\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % content[\"other_user_email\"]), 404\n except UsersAlreadyFriendsError:\n self.logger.debug(messages.USERS_ALREADY_FRIEND_ERROR)\n return messages.ERROR_JSON % messages.USERS_ALREADY_FRIEND_ERROR, 400\n except UnexistentRequestorUserError:\n self.logger.debug(messages.INTERNAL_ERROR_CONTACT_ADMINISTRATION)\n return messages.ERROR_JSON % messages.INTERNAL_ERROR_CONTACT_ADMINISTRATION, 500\n self.notification_database.notify(content[\"other_user_email\"],\n \"New friendship request\", \"From %s\" % email_token,\n {\"kind\": \"friendship_request\",\n \"from\": email_token})\n return messages.SUCCESS_JSON, 200",
"async def send_friend_request(self, TargetId: int):\n data = {\n 'targetUserId': TargetId\n }\n e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/request-friendship',\n method='post',\n data=data)\n return e",
"def friend_request():\n if 'username' not in session:\n return redirect('/login?type=0')\n user1 = session['username']\n user2 = request.form['username']\n now_time = Time.time()\n if not re.search(ID_REG, user2) and user2 != 'admin':\n return jsonify(res=-4)\n # check friend\n with sqlite3.connect('data.db') as conn:\n cur = conn.cursor()\n cur.execute('SELECT * FROM friend WHERE user1 = ? AND user2 = ?', [user1, user2])\n if cur.fetchall() or user1 == user2:\n return jsonify(res=-1)\n cur.execute('SELECT * FROM friend_request WHERE user1 = ? AND user2 = ?', [user1, user2])\n if cur.fetchall():\n return jsonify(res=-2)\n cur.execute('SELECT * FROM friend_request WHERE user1 = ? AND user2 = ?', [user2, user1])\n if cur.fetchall():\n return jsonify(res=-3)\n cur.execute('SELECT * FROM user_login WHERE username = ?', [user2])\n if not cur.fetchall():\n return jsonify(res=-4)\n cur.execute('INSERT INTO friend_request VALUES (?, ?, ?)', [user1, user2, now_time])\n conn.commit()\n return jsonify(res=0)",
"def friend(tcp, udp, userId, data):\n\n # from server get address of potential friend\n tcp.sendMessage('SEARCH ' + data[0])\n address = tcp.receiveMessage().split()[-2:]\n address = (address[0], int(address[1]))\n\n # send friend request\n if address:\n udp.sendto('FRIEND ' + userId, address)\n print 'Sent friend request to ' + data[0]\n else: print 'Could not send friend request to ' + data[0]",
"def accept_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n if f_request.to_user == request.user:\n f_request.to_user.profile.friends.add(f_request.from_user)\n f_request.from_user.profile.friends.add(f_request.to_user)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request was successfully accepted'\n )\n return redirect('profiles:my_friends')",
"def test_request_friend(self):\n self.test_login_user()\n self.test_create_user('b')\n url = reverse('MGA:send_friend_request')\n data = {'id': 2}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.create_pending_friend_request(user_id, target_id)",
"def post(self):\n\t\tdb = getattr(g, 'db', None)\n\t\tobj = request.get_json()\n\n\t\tif ('username' not in obj) or ('session' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telif not authenticate(obj['username'],obj['session']):\n\t\t\treturn {'status':'AUTH_FAIL'}\n\t\telif ('action' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telse:\n\t\t\taction = obj['action']\n\t\t\tif action == 'ADD' and 'friend' in obj:\n\t\t\t\tqry = \"INSERT INTO friends VALUES ((SELECT id FROM profiles WHERE username = %s),\\\n\t\t\t\t\t(SELECT id FROM profiles WHERE username = %s));\"\n\t\t\t\twith db as cur:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tlines = cur.execute(qry, (obj['username'],obj['friend']))\n\n\t\t\t\t\t\tif lines > 0:\n\t\t\t\t\t\t\treturn {'status':'FRIEND_ADDED'}\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\t\t\t\t\texcept sql.IntegrityError:\n\t\t\t\t\t\treturn {'status':'DUPLICATE_USER'}\n\t\t\t\t\texcept sql.OperationalError:\n\t\t\t\t\t\treturn {'status':'NO_SUCH_USER'}\n\n\t\t\telif action == 'GET':\n\t\t\t\t\"\"\" Retrieve all friends belonging to user. \"\"\"\n\t\t\t\tfriends = [] #accepted, both ends\n\t\t\t\tpending = [] #pending answer from friend\n\n\t\t\t\t# retrieve canonical friends\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT friend FROM friends WHERE target = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tfor friend in cur.fetchall():\n\t\t\t\t\t\tfriends += friend\n\n\t\t\t\t# retrieve pending requests\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT target FROM friends WHERE friend = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tprint \"friends:\"+str(friends)\n\t\t\t\t\tfor req in cur.fetchall():\n\t\t\t\t\t\tif not req[0] in friends:\n\t\t\t\t\t\t\tpending += req\n\n\t\t\t\tif not (len(friends)<=0 and len(pending)<=0):\n\t\t\t\t\treturn {'status':'QUERY_OK', 'friends':friends, 'pending':pending}\n\t\t\t\telse:\n\t\t\t\t\treturn {'status':'NO_FRIENDS'}\n\n\t\t\telif action == 'DELETE' and 'friend' in obj:\n\t\t\t\tqry = \"DELETE FROM friends WHERE target = (SELECT id FROM profiles WHERE username = %s)\\\n\t\t\t\t\tand friend = (SELECT id FROM profiles WHERE username = %s);\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'], obj['friend']))\n\t\t\t\t\tif lines>0:\n\t\t\t\t\t\treturn {'status':'FRIEND_DELETED'}\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\n\t\t\telse:\n\t\t\t\treturn {'status':'INVALID_ACTION'}",
"def user_accept_friend_request(self):\n try:\n assert request.is_json\n except AssertionError:\n self.logger.debug(messages.REQUEST_IS_NOT_JSON)\n return messages.ERROR_JSON % messages.REQUEST_IS_NOT_JSON, 400\n content = request.get_json()\n if not FRIEND_REQUEST_MANDATORY_FIELDS.issubset(content.keys()):\n self.logger.debug(messages.MISSING_FIELDS_ERROR % (FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())))\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % (\n FRIEND_REQUEST_MANDATORY_FIELDS - set(content.keys())), 400\n email_token = auth.current_user()[0]\n try:\n self.friend_database.accept_friend_request(content[\"other_user_email\"], email_token)\n except UnexistentFriendRequest:\n self.logger.debug(messages.UNEXISTENT_FRIEND_REQUEST % (content[\"other_user_email\"], email_token))\n return messages.ERROR_JSON % (messages.UNEXISTENT_FRIEND_REQUEST %\n (content[\"other_user_email\"], email_token)), 404\n return messages.SUCCESS_JSON, 200",
"def get(self):\n user = users.get_current_user()\n if not user:\n self.response.out.write(json.dumps(error_obj('User not logged in.')))\n return\n friend = self.request.get('email')\n if not friend:\n self.response.out.write(json.dumps(error_obj('Must provide email of friend to add.')))\n return\n account = user_info.get_user_account()\n if not friend in account.friend_list:\n self.response.out.write(json.dumps(error_obj('This email is not in your friends list.')))\n return\n friend_account = user_info.get_by_email(friend)\n self.response.out.write(json.dumps(account_info(friend_account)))",
"def create_friend_request():\n if request.method == \"GET\":\n friend_requests = [f.to_dict() for f in g.user.get_friend_requests()]\n return jsonify({'success': True, 'friend_requests': friend_requests})\n\n if request.method == \"POST\":\n # Get recieving user id from request\n json = request.get_json()\n if json is None:\n raise CustomError(400, message=\"No JSON included or Content-Type\"\n \"is not application/json\")\n\n if 'recieving_user_id' not in json:\n raise CustomError(400, message=\"Must include recieving_user_id\")\n\n recieving_user_id = json['recieving_user_id']\n\n # Get the user object\n recieving_user = User.query.get(recieving_user_id)\n if recieving_user is None:\n raise CustomError(\n 404,\n message='User with id: {} was not found.'.format(\n recieving_user_id)\n )\n\n # Check friendship does not already exist\n friendship_exists = Friendship.query.filter(\n (Friendship.actioning_user_id == g.user.id) |\n (Friendship.recieving_user_id == g.user.id),\n (Friendship.actioning_user_id == recieving_user_id) |\n (Friendship.recieving_user_id == recieving_user_id)\n ).first()\n\n if friendship_exists:\n raise CustomError(\n 409,\n message=\"There is either a pending friend request between the\"\n \"two users or the two users are already friends.\"\n )\n\n # Insert friend request\n friend_request = Friendship(g.user, recieving_user)\n db.session.add(friend_request)\n db.session.commit()\n\n return jsonify({'success': True}), 201",
"def add_friend():\n\n\n user_id = session['user_id']\n add_friend = request.form.get(\"add-friend\")\n friend_id = request.form.get(\"friend_id\")\n friendship = Friendship.add_friend(user_id, friend_id)\n\n print \"This is the friend id\", friend_id\n\n return 'friend added'",
"def dispatch(self, request, *args, **kwargs):\n user_to = User.objects.get(pk=kwargs['pk'])\n user_from = self.request.user\n ###\n if user_to not in wanna_be_friends(user_from):\n friendship = FriendshipInvitation.objects.create(\n from_user=user_from, to_user=user_to, status=\"0\")\n\n notif = Notification.objects.create(sender=user_from,\n receiver=user_to,\n notif_type='friend_request')\n # Aca se ha enviado la solicitud\n else:\n return HttpResponseRedirect(\"/fr_req_fail/\")\n return HttpResponseRedirect(\"/\")",
"def get_friend_request_with_id(id):\n # Get friend request\n friendship = Friendship.query.get(id)\n if friendship is None:\n raise CustomError(\n 404,\n message=\"Friendship with id: {} not found.\".format(id)\n )\n can_view = friendship.actioning_user_id == g.user.id or \\\n friendship.recieving_user_id == g.user.id\n # Check user is has permission to view that request\n if not can_view:\n raise CustomError(\n 401,\n message=\"You are not authorised to view this resource.\"\n )\n\n if request.method == \"GET\":\n return jsonify({'success': True, 'friendship': friendship.to_dict()})\n\n if request.method == \"PATCH\":\n if friendship.recieving_user_id != g.user.id:\n raise CustomError(\n 401,\n message=\"You are not authorised to update this object.\"\n )\n\n json = request.get_json()\n if json is None:\n raise CustomError(400, message=\"No JSON included or Content-Type\"\n \"is not application/json\")\n if 'confirmed' in json:\n friendship.confirmed = json['confirmed']\n\n db.session.commit()\n return jsonify({'success': True, 'friendship': friendship.to_dict()})\n\n if request.method == \"DELETE\":\n db.session.delete(friendship)\n db.session.commit()\n return jsonify({'success': True})",
"def test_accept_friend(self):\n self.test_request_friend()\n url = reverse('MGA:accept_friend_request')\n data = {'id': 2}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def follow_friend():\n print \"followuser\"\n username = request.args.get('username')\n print \"JSON Data\", username\n # username= req_data[username]\n whom_id = get_user_id(username)\n print \"whom_id:\", whom_id\n if whom_id is None:\n abort(404)\n follow_query(whom_id)\n flash('You are now following \"%s\"' % username)\n name = {'name of following user': username}\n R_SERVER.delete(user_timeline_key)\n return jsonify(Username=name, Status_code=status.HTTP_200_OK)",
"async def accept_request(self, TargetId: int):\n data = {\n 'targetUserId': TargetId\n }\n e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/accept-friend-request',\n method='post',\n data=data)\n return e",
"def create(self, request):\n friend_obj = Friend.objects.add_friend(\n request.user, # The sender\n get_object_or_404(User, pk=request.data['user_id']), # The recipient\n message=request.data.get('message', '')\n )\n\n return Response(\n FriendshipRequestSerializer(friend_obj).data,\n status.HTTP_201_CREATED\n )",
"def friendship_accept(request, friendship_request_id):\n #if request.method == 'POST':\n #id1 = get_object_or_404(request.user.friendship_requests_sent,id=friendship_request_id)\n f_request = FriendshipRequest.objects.get(from_user=friendship_request_id, to_user = request.user)\n from_user = request.user\n f_request.accept()\n return render (request , 'reload_page.html')\n #return render(request,'friendship/template_ags/friend_requests.html', {'from_user':from_user})",
"def add_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Send friend request\n if not mock_db.add_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when adding friend!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})",
"def friends():\n friends = [u.to_dict() for u in g.user.get_friends()]\n return jsonify({'success': True, 'friends': friends})",
"def confirm_request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n\n if self.database.delete_pending_friend_request(user_id, target_id):\n return self.database.create_friend(user_id, target_id)\n return False",
"def connect(request, pk=None):\n # check if user sent request to them self\n if int(request.user.id) == int(pk):\n return Response({'status': '400', 'code': 'E_SAME_USER',\n 'detail': code['E_SAME_USER']}, status=400)\n\n # Check both Users are valid\n from_user = get_or_none(User, pk=request.user.id)\n to_user = get_or_none(User, pk=pk)\n # Return Error Message User is not valid\n if from_user is None or to_user is None:\n return Response({'status': '400', 'code': 'E_USER_NOT_FOUND',\n 'detail': code['E_USER_NOT_FOUND']}, status=400)\n\n # check user have sent request before or not\n current_request = get_or_none(FriendRequest, from_user=from_user, to_user=to_user)\n # search current request in reverse way\n if current_request is None:\n current_request = get_or_none(FriendRequest, from_user=to_user, to_user=from_user)\n # Return Error Message request have sent before\n if current_request is not None:\n return Response({'status': '400', 'code': 'E_ALREADY_SEND_REQUEST',\n 'detail': code['E_ALREADY_SEND_REQUEST']}, status=400)\n # Check both users are connect or not\n current_connection = get_or_none(FriendConnect, user=from_user, friend=to_user)\n # Return Error Message both user are friend before\n if current_connection is not None:\n return Response({'status': '400', 'code': 'E_ALREADY_CONNECT',\n 'detail': code['E_ALREADY_CONNECT']}, status=400)\n # Save new request\n new_request = FriendRequest(from_user=from_user, to_user=to_user)\n new_request.save()\n # Check request is save success\n is_created = get_or_none(FriendRequest, from_user=from_user, to_user=to_user)\n # Return Error Message Request is not save\n if is_created is None:\n return Response({'status': '500', 'code': 'E_NOT_SAVE',\n 'detail': code['E_NOT_SAVE']}, status=500)\n # Return Message sent request success\n return Response({'status': '200', 'code': 'OK_SEND_FRIEND_REQUEST',\n 'detail': code['OK_SEND_FRIEND_REQUEST']}, status=201)",
"def select_friend_request(self, event):\n print('selected')\n target = self.friend_request_list.get(self.friend_request_list.curselection())\n print(target)\n if messagebox.askyesno('Add friend', 'Accept ' + target + '?'):\n self.client.acceptFriendRequest(target)\n else:\n self.client.rejectFriendRequest(target)\n self.update()",
"def user_list_friend_requests(self):\n email_token = auth.current_user()[0]\n friend_emails = self.friend_database.get_friend_requests(email_token)\n friends = [self.auth_server.profile_query(email) for email in friend_emails]\n return json.dumps(friends), 200",
"def accept(request, pk=None):\n # check request is valid or not\n friend_request = get_or_none(FriendRequest, pk=pk)\n if friend_request is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Create friend for login user -> request user\n new_friend1 = FriendConnectSerializer(\n data={'user': friend_request.from_user.id, 'friend': friend_request.to_user.id})\n if not new_friend1.is_valid():\n return Response({'status': '400', 'code': 'E_INVALID_PARAMETER_VALUES',\n 'detail': new_friend1.errors}, status=400)\n # Create friend for request user -> login user\n new_friend2 = FriendConnectSerializer(\n data={'friend': friend_request.from_user.id, 'user': friend_request.to_user.id})\n if not new_friend2.is_valid():\n return Response({'status': '400', 'code': 'E_INVALID_PARAMETER_VALUES',\n 'detail': new_friend2.errors}, status=400)\n # Save record 1\n new_friend1.save()\n # Check save or fail\n is_save1 = get_or_none(FriendConnect, user=friend_request.from_user, friend=friend_request.to_user)\n if is_save1 is not None:\n return Response({'status': '500', 'code': 'E_NOT_SAVE',\n 'detail': code['E_NOT_SAVE']}, status=500)\n # Save record 2\n new_friend2.save()\n # Check save or fail\n is_save2 = get_or_none(FriendConnect, user=friend_request.to_user, friend=friend_request.from_user)\n # if fail delete record 1\n if is_save2 is not None:\n is_save1.delete()\n return Response({'status': '500', 'code': 'E_NOT_SAVE',\n 'detail': code['E_NOT_SAVE']}, status=500)\n # if every things ok delete request\n friend_request.delete()\n return Response({'status': '200', 'code': 'OK_SEND_FRIEND_REQUEST',\n 'detail': code['OK_ACCEPT_FRIEND_REQUEST']}, status=201)",
"def unfriend(request, pk=None):\n # Check user id and friend id\n if int(request.user.id) == int(pk):\n return Response({'status': '400', 'code': 'E_SAME_USER',\n 'detail': code['E_SAME_USER']}, status=400)\n # Check 2 user is valid\n current_user = get_or_none(User, pk=request.user.id)\n friend = get_or_none(User, pk=pk)\n # if 1 or 2 user is not valid\n if current_user is None or friend is None:\n return Response({'status': '400', 'code': 'E_USER_NOT_FOUND',\n 'detail': code['E_USER_NOT_FOUND']}, status=400)\n # get connect of request user -> friend\n # from_user=friend.to_user, to_user=request.user\n current_connection = get_or_none(Friend, from_user=current_user, to_user=friend)\n if current_connection is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # get connect of friend to request user\n # reverse_connection = get_or_none(FriendConnect, user=friend, friend=current_user)\n #if reverse_connection is None:\n # return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n # 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Delete\n current_connection.delete()\n #reverse_connection.delete()\n # if every thing ok\n return Response({'status': '200', 'code': 'OK_UNFRIEND',\n 'detail': code['OK_UNFRIEND']}, status=200)",
"def get(self, request, *args, **kwargs):\n context = self.get_context_data(params=kwargs)\n if context['friends']:\n self._user_obj.seen_initial_prompt = True\n self._user_obj.save()\n return self.render_to_response(context)",
"def add_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_add = get_id_from_username(request.form['add_user'])\n if not friend_to_add or friend_to_add==user_id:\n return redirect(url_for('message.converse'))\n add_friend_db(user_id, friend_to_add)\n return redirect(url_for('message.converse'))"
] | [
"0.7373825",
"0.713863",
"0.7067845",
"0.7044541",
"0.6986165",
"0.68482757",
"0.6795371",
"0.6744355",
"0.66788155",
"0.6590272",
"0.65513295",
"0.6515606",
"0.65038973",
"0.6477057",
"0.6457316",
"0.64259183",
"0.6416182",
"0.64161116",
"0.6396046",
"0.63797796",
"0.6361164",
"0.63319063",
"0.62974584",
"0.6237375",
"0.6232026",
"0.6224056",
"0.62083167",
"0.6177089",
"0.61762446",
"0.61351883"
] | 0.8217153 | 0 |
Fetches users permissions, returns list of permission objects Keyword Arguments | async def fetch_permissions(self, condensed=False):
logging.debug("Getting permissions (%scondensed)" % (
"" if condensed else "not "))
if condensed:
perms = await self.client.request.get(
"/auth/permissions", params={"condensed": True})
return perms["data"]
else:
perms = await self.client.request.get("/auth/permissions")
return [BasePermission.build_permission(
self.client, perm, self.loop) for perm in perms["data"]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_permissions(self, obj=None):",
"def octopus_permissions_get(self, msg, args):\r\n return self.permissions.get_permissions()",
"def get_permissions(self, principal_id):",
"def get_permissions(self):\n permissions = [IsAdminUser]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n if self.action in ['list', 'retrieve']:\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def list_permissions(self):\n # type: () -> List[Permission]\n headers = Headers({\"accept\": \"application/json\"})\n return self.connection.api_call(\n \"GET\", [\"resources\", self.id, \"permissions\"], model=Permission, headers=headers,\n )",
"def get_permissions(self):\n if self.action in ['create', 'retrieve', 'react', 'reactions']:\n permissions = [IsAuthenticated, IsFriendPostOwner]\n elif self.action in ['update', 'partial_update']:\n permissions = [IsAuthenticated, IsCommentOwner]\n elif self.action in ['destroy']:\n permissions = [IsAuthenticated, IsCommentOrPostOwner]\n else:\n permissions = [IsAuthenticated]\n return[p() for p in permissions]",
"def get_permissions(self):\n if self.action == 'list':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action in ['retrieve', 'list']:\n self.permission_classes = [permissions.ViewUserPermission,]\n elif self.action in ['update', 'partial_update']:\n self.permission_classes = [permissions.UpdateUserPermission]\n elif self.action in ['destroy']:\n self.permission_classes = [permissions.UpdateUserPermission]\n\n return [permission() for permission in self.permission_classes]",
"def get_permissions(self):\n if self.action == 'list':\n permission_classes = [AdminPermission.__or__(ReviewerPermission)]\n elif self.action == 'retrieve':\n permission_classes = [\n AdminPermission.__or__(\n ReviewerPermission.__or__(UserPermission)\n )\n ]\n elif self.action in ['update', 'partial_update']:\n permission_classes = [AdminPermission.__or__(UserPermission)]\n else:\n permission_classes = [AdminPermission]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action == 'list':\n permission_classes = [AdminPermission.__or__(ReviewerPermission)]\n elif self.action == 'retrieve':\n permission_classes = [\n AdminPermission.__or__(\n ReviewerPermission.__or__(UserPermission)\n )\n ]\n elif self.action in ['update', 'partial_update']:\n permission_classes = [AdminPermission.__or__(UserPermission)]\n else:\n permission_classes = [AdminPermission]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n elif self.action in ['retrieve']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [AllowAny]\n return [p() for p in permissions]",
"def _get_permissions(self, user_obj, obj, from_name):\n if not user_obj.is_active or user_obj.is_anonymous or obj is not None:\n return set()\n\n perm_cache_name = '_%s_perm_cache' % from_name\n if not hasattr(user_obj, perm_cache_name):\n if user_obj.is_superuser:\n perms = Permission.objects.all()\n else:\n perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj)\n perms = perms.values_list('content_type__app_label', 'codename').order_by()\n setattr(user_obj, perm_cache_name, {\"%s.%s\" % (ct, name) for ct, name in perms})\n return getattr(user_obj, perm_cache_name)",
"def get_permissions(self):\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action =='retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def permission_list(**kwargs):\n print(AppPermissionSchema(many=True).dumps(\n get_protected_routes(ignored_methods=[\"HEAD\", \"OPTIONS\"]), indent=4))",
"def getPermissionsForUser(self, scope, extra_params, perm_filter):\n\n if perm_filter is None or not any(perm_filter.__dict__.values()):\n # If no filtering is needed, this function behaves identically\n # to getPermissions().\n return self.getPermissions(scope)\n\n with DBSession(self.__config_db) as session:\n # The database connection must always be passed to the permission\n # handler.\n params = ThriftAuthHandler.__unpack_extra_params(extra_params,\n session)\n\n perms = []\n for perm in permissions.get_permissions(scope):\n should_return = True\n handler = make_handler(perm, params)\n\n if should_return and perm_filter.given:\n should_return = handler.has_permission(self.__auth_session)\n\n if should_return and perm_filter.canManage:\n # If the user has any of the permissions that are\n # authorised to manage the currently iterated permission,\n # the filter passes.\n should_return = require_manager(\n perm, params, self.__auth_session)\n\n if should_return:\n perms.append(perm)\n\n return [permissions.api_enum_for_permission(p)\n for p in perms]",
"def get_permissions(self):\n permissions = [IsAuthenticated]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n if self.action in ['list', 'create']:\n permission_classes = [IsStaffOrReadOnly]\n else:\n permission_classes = [IsAuthorOrReadOnly, IsStaffOrReadOnly]\n return [permission() for permission in permission_classes]",
"def permissions(self) -> 'outputs.PermissionsResponse':\n return pulumi.get(self, \"permissions\")",
"def my_perms(self, ids, **kwargs):\r\n auth = self.app.auth\r\n # checking all objects\r\n p = self.db.auth_permission\r\n if type(ids) in (list, tuple, set):\r\n _ids = type(ids)((0,)) + ids\r\n else:\r\n _ids = [0, ids]\r\n grouped = self.db(p.record_id.belongs(_ids) & p.group_id.belongs(auth.user_groups.keys()) & (\r\n p.table_name == self.table._tablename)).select(p.name, p.record_id).group_by_value('record_id')\r\n take_names = itemgetter('name')\r\n base_permissions = set(imap(take_names, grouped.get(0, set())))\r\n ret = dict(PERMISSIONS={self.name: [\r\n dict((id, set(imap(take_names, grouped.get(id, []))).union(base_permissions)) for id in map(int, ids))]})\r\n current.response.text = ret\r\n return ret",
"async def get_permissions(self, requester: Requester, model: Model):\n raise NotImplementedError",
"def get_permissions(self):\n if self.action in ['signup', 'login', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['retrieve', 'update', 'partial_update', 'destroy', 'u', 'p']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]",
"def get_all_permissions(self, user, obj=None):\n if user.is_anonymous or not user.is_active or obj is not None:\n return set()\n\n if not hasattr(user, '_perm_cache'):\n\n permissions = Permission.objects.filter(\n # User permissions\n Q(user=user)\n # Group permissions\n | Q(group__user=user)\n # UserClass permissions\n | Q(user_classes__users=user)\n ).distinct().values_list(\n 'content_type__app_label',\n 'codename',\n )\n\n user._perm_cache = set(\n '{app_label}.{codename}'.format(\n app_label=app_label,\n codename=codename,\n )\n for app_label, codename\n in permissions\n )\n\n return user._perm_cache",
"def get_permissions(self):\n \n if self.action in ['signup', 'login', 'verify']:\n permissions =[AllowAny]\n # cualquiera que vaya a acceder a estas peticiones lo podra hacer\n # si la accion es de tipo retrieve se debe validar el permiso de acceso\n elif self.action in ['retrieve', 'update', 'partial_update']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n # si no hay ninguna opcion debe tener una sesion autenticada \n return [p() for p in permissions]",
"def permissions(self):\n return self.get_permissions()",
"def all_perms(self, id, **kwargs):\r\n p = self.db.auth_permission\r\n if self.all_permissions:\r\n ret = self.sql(\r\n (p.record_id == id) & (p.table_name == self.table._tablename) & p.name.belongs(self.all_permissions),\r\n p.name, p.group_id,\r\n orderby=p.group_id)\r\n else:\r\n ret = []\r\n current.response.text = ret\r\n return ret",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n return [permission() for permission in permissions]",
"def _get_permissions(self, user_obj, obj, from_name):\n if not user_obj.is_active or user_obj.is_anonymous or obj is not None:\n return set()\n\n perm_cache_name = '_%s_perm_cache' % from_name\n if not hasattr(user_obj, perm_cache_name):\n if user_obj.is_acting_superuser:\n perms = Permission.objects.all()\n else:\n perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj)\n setattr(user_obj, perm_cache_name, set(perms))\n return getattr(user_obj, perm_cache_name)",
"def get_all_permissions(self, obj=None):\n return self.get_group_permissions(obj)",
"def get_permissions(cls, user):\n try:\n user_perm = UserPermission.objects.get(user_id=user.id)\n return user_perm.permission_list.split(',')\n except UserPermission.DoesNotExist:\n return []"
] | [
"0.76583916",
"0.714898",
"0.7069271",
"0.70243394",
"0.69735223",
"0.6933946",
"0.688342",
"0.6883105",
"0.6858095",
"0.6835594",
"0.6835594",
"0.6832657",
"0.68115896",
"0.6804625",
"0.6756365",
"0.6735406",
"0.6730444",
"0.6701549",
"0.66967696",
"0.66963583",
"0.66962457",
"0.66683906",
"0.6666602",
"0.66348153",
"0.6623869",
"0.66138154",
"0.66122824",
"0.6595992",
"0.6569222",
"0.65618294"
] | 0.74537504 | 1 |
Fetches user favorites, returning ``favorite_type`` or a mix of all the favorite types Keyword Arguments | async def fetch_favorites(self, favorite_type=None, n=100, offset=0):
if n > 100:
n = 100
params = {
"n": n,
"offset": offset
}
if favorite_type is not None:
params["type"] = favorite_type
favorites = await self.client.request.get("/favorites", params=params)
logging.debug("Fetching favorites")
return [BaseFavorite.build_favorite(
self.client, favorite, self.loop) for favorite in favorites["data"]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def fetch_all_favorites(self, favorite_type=None):\n\n favorites = await vrcpy.util.auto_page_coro(\n self.fetch_favorites, favorite_type=favorite_type)\n\n world = []\n friend = []\n avatar = []\n\n for favorite in favorites:\n if favorite.type == FavoriteType.WORLD:\n world.append(favorite)\n elif favorite.type == FavoriteType.FRIEND:\n friend.append(favorite)\n elif favorite.type == FavoriteType.AVATAR:\n avatar.append(favorite)\n\n if world != []:\n self.client.favorites[FavoriteType.WORLD] = world\n if friend != []:\n self.client.favorites[FavoriteType.FRIEND] = friend\n if avatar != []:\n self.client.favorites[FavoriteType.AVATAR] = avatar\n\n return favorites",
"def favorites(self):\n path = self._get_path('favorites')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return self._clean_return(response)",
"def get_oneoffixx_favorites():\n api_client = OneoffixxAPIClient()\n favorites = api_client.get_oneoffixx_favorites()\n if favorites.get('templates'):\n return favorites\n return None",
"def auto_fav(q, count=5, result_type=\"recent\"):\n\n result = search_tweets(q, count, result_type)\n\n for tweet in result[\"statuses\"]:\n try:\n # don't favorite your own tweets\n if tweet[\"user\"][\"screen_name\"] == TWITTER_HANDLE:\n continue\n\n result = t.favorites.create(_id=tweet[\"id\"])\n print(\"favorited: %s\" % (result[\"text\"].encode(\"utf-8\")))\n\n # when you have already favorited a tweet, this error is thrown\n except TwitterHTTPError as e:\n print(\"error: %s\" % (str(e)))",
"def get_queryset(self):\n\n user = get_authentication(self.request)\n queryset = Favorites.objects.filter(user=user, is_used=True)\n\n return queryset",
"def get_favorites(self):\n url = \"https://api.imgur.com/3/account/{0}/favorites\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True)\n return [_get_album_or_image(thing, self) for thing in resp]",
"def get_favorite(self, obj):\n article_fav_users = obj.favorite.all()\n return self.fetch_usernames(article_fav_users)",
"def favorite(self):\n url = \"https://api.imgur.com/3/album/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method=\"POST\")",
"def _getFavorites(self):\n url = self._genFavoritesUrlByUser(self._username)\n doc = html.document_fromstring(requests.get(url).text)\n out = dict()\n pages = get_pages(doc)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = str(f.attrib['href']).split('/')[-2]\n # topic_id =\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n for p in range(2, pages):\n url = 'http://habrahabr.ru/users/{0}/favorites/page{1}/'.format(self._username, p)\n # if show_progress:\n # print('parsing page{0}... url={1}'.format(p, url))\n doc = html.document_fromstring(requests.get(url).text)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = f.attrib['href'][-7:-1]\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n return out",
"def get_favorite(self):\n raise NotImplementedError()",
"def cmd_account_favorites(client, args):\n account_favorites = client.get_account_favorites(args.username)\n data = [item.__dict__ for item in account_favorites]\n generate_output({'account_favorites': data}, args.output_file)",
"def favorites(self):\n if not self._user_favorites_loaded:\n self._user_favorites = self._getFavorites()\n self._user_favorites_loaded = True\n return deepcopy(self._user_favorites)",
"def get_favorites_questions(user_id, api_site_parameter, page = 1, body = False, comments = False, pagesize = 100, sort = 'added'):\n path = \"users/%d/favorites\" % user_id\n \n query_filter = ')(Ybxw_gbz'\n \n if body:\n query_filter = '9F)u(CSWCtKt'\n if comments:\n query_filter = ')(YbxuzQQ.'\n if body and comments:\n query_filter = ')(YbxuzQTp'\n \n results = __fetch_results(path, api_site_parameter, page = page, filter = query_filter, pagesize = pagesize, sort = sort)\n return results",
"def get_favorites(self, user_id=None):\n if not user_id:\n user_id = self.user_id\n\n favorite_decks = self.data_source.get_favorites(user_id)\n\n return favorite_decks",
"def favorite(self):\n url = \"https://api.imgur.com/3/image/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')",
"def play_favourite_core(speaker, favourite, favourite_number=None):\n\n fs = speaker.music_library.get_sonos_favorites(complete_result=True)\n\n if favourite_number:\n err_msg = \"Favourite number must be integer between 1 and {}\".format(len(fs))\n try:\n favourite_number = int(favourite_number)\n except ValueError:\n return False, err_msg\n if not 0 < favourite_number <= len(fs):\n return False, err_msg\n\n # List must be sorted by title to match the output of 'list_favourites'\n fs.sort(key=lambda x: x.title)\n the_fav = fs[favourite_number - 1]\n logging.info(\n \"Favourite number {} is '{}'\".format(favourite_number, the_fav.title)\n )\n\n else:\n the_fav = None\n # Strict match\n for f in fs:\n if favourite == f.title:\n logging.info(\"Strict match '{}' found\".format(f.title))\n the_fav = f\n break\n\n # Fuzzy match\n if not the_fav:\n favourite = favourite.lower()\n for f in fs:\n if favourite in f.title.lower():\n logging.info(\"Fuzzy match '{}' found\".format(f.title))\n the_fav = f\n break\n\n if the_fav:\n # play_uri works for some favourites\n # TODO: this is broken and we should test for the\n # type of favourite\n try:\n uri = the_fav.get_uri()\n metadata = the_fav.resource_meta_data\n logging.info(\n \"Trying 'play_uri()': URI={}, Metadata={}\".format(uri, metadata)\n )\n speaker.play_uri(uri=uri, meta=metadata)\n return True, \"\"\n except Exception as e:\n e1 = e\n\n # Other favourites will be added to the queue, then played\n try:\n # Add to the end of the current queue and play\n logging.info(\"Trying 'add_to_queue()'\")\n index = speaker.add_to_queue(the_fav, as_next=True)\n speaker.play_from_queue(index, start=True)\n return True, \"\"\n except Exception as e2:\n msg = \"1: {} | 2: {}\".format(str(e1), str(e2))\n return False, msg\n msg = \"Favourite '{}' not found\".format(favourite)\n return False, msg",
"def favorite():\n # handle pre-flight for browsers CORS access\n if request.method == \"OPTIONS\":\n return generate_response()\n # part1: verify the token\n checked_and_verified, response = check_verify_token(request,login_session)\n if checked_and_verified == False: return response\n # handle the edge case where user is authorized to perform create user but not other method\n if not is_loggedin(login_session):\n response = generate_message(MESSAGE_USER_NOT_LOGGED_IN,401)\n return response\n # handles the get request\n if request.method == \"GET\":\n favorites = read_criteria(Favorite,{\"user_id\":login_session[\"user_id\"]},session,\"m\") or []\n favorites_room_json = [room_json(favorite.room, session,app.config[\"OFFLINE_TESTING\"], login_session) for favorite in favorites]\n return generate_response(elem={\"favorites\":favorites_room_json})\n # part2: check json, handle POST request\n checked_json, response, requested_json = check_json_form(request,MESSAGE_BAD_JSON,MESSAGE_GET_FAV_NO_JSON)\n if checked_json != True: return response\n # verify room id type, with strict mode\n requested_json[\"user_id\"] = login_session[\"user_id\"]\n correct_format,valid_update_pairs, response = process_request_json(Favorite,requested_json, True, access_mode=\"read\",nondb_type_map={\"action\":str})\n if correct_format == False: \n return response\n room = get_row_if_exists(Room, session, ** {\"id\": requested_json[\"room_id\"]})\n user = get_row_if_exists(User, session, ** {\"id\": login_session[\"user_id\"]})\n # if the room id in the request doesn't fit any entry in db, return error message\n if room is None:\n response = generate_message(MESSAGE_FAV_ROOM_NOT_EXIST,404)\n return response\n if requested_json[\"action\"] == \"add\":\n # the add favorite already handle duplicates add\n # it treats multiple adds as one add and every duplicate add afterwards is counted as success\n add_favorite(room,user, session)\n response = generate_message(MESSAGE_POST_FAV_ADD_SUCCESS,201)\n return response\n elif requested_json[\"action\"] == \"delete\":\n # the delete favorite already handle duplicates delete\n # it treats multiple delete as one delete and every duplicate delete afterwards is counted as success\n remove_entry(Favorite,requested_json[\"room_id\"], session)\n response = generate_message(MESSAGE_POST_FAV_DEL_SUCCESS,200)\n return response\n else: # method not supported\n response = generate_message(MESSAGE_POST_FAV_METHOD_NOT_SUPPORTED,405)\n return response",
"def favorites(request):\n cur_user = request.user # Gets the current logged-in user\n fav_products = Favorite.objects.all() # Gets all \"Favorite\" model objects\n\n # Gets the favorites of the current user\n fav_prod_filtered = fav_products.filter(users_id=cur_user).order_by('-id')\n\n # Adds pagination for up to 6 products per page\n paginator = Paginator(fav_prod_filtered, 6)\n page = request.GET.get('page')\n\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n context = {\n 'favorites': products,\n 'paginate': True,\n }\n return render(request, 'favorites/favorites.html', context)",
"def fetch_favourites(self):\n while True:\n self.cur.execute(\"SELECT DISTINCT product_id FROM Product_substitute\")\n response = self.cur.fetchall()\n\n for i, element in enumerate (response):\n print (\"Tapez {} pour voir les substituts de:\".format(i+1))\n self.display_product_from_id(element[0])\n \n choice_id = response[self.secure_input(1, len(response))-1]\n \n self.cur.execute(\"SELECT substitute_id FROM Product_substitute WHERE product_id = %s\", (choice_id[0], ))\n response = self.cur.fetchall()\n\n print(\"Voici les substituts trouves pour:\")\n self.display_product_from_id(choice_id[0])\n for element in response:\n self.display_product_from_id(element[0])\n\n print(\"Faire une autre recherche dans vos favoris? Oui = 1 non =0\")\n again = self.secure_input(0, 1)\n if again == 1:\n continue\n else:\n break",
"def fav_place(uri, data={}):\n sv = veggiesailor.StorageFav()\n return sv.switch(uri, 1, data)",
"def get_favorites(request):\n companies = request.user.profile.companies.all()\n context = {'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)",
"def get_favourites(self, username):\n self.cur.execute(\"SELECT video_ID FROM favourites WHERE username = \\\"{}\\\"\".format(username))\n favourites = []\n for ID in self.cur.fetchall():\n favourites.append(ID[0])\n return favourites",
"def favourites_read(self, data, sesh):\n\n\t\t# Fetch the favourites for the thrower\n\t\tlFavourites = Favourites.get(sesh['thrower']['_id'], raw=['ids'])\n\n\t\t# If there's none\n\t\tif not lFavourites:\n\t\t\treturn Services.Effect([])\n\n\t\t# Look up all the throwers using the IDs\n\t\tlThrowers = Thrower.get(lFavourites['ids'], raw=['_id', 'alias'])\n\n\t\t# Return what's found\n\t\treturn Services.Effect(lThrowers)",
"def profile():\n from flickrAPI import FlickrAPI\n #flickr = FlickrAPI(key=session['resource_owner_key'], secret=session['resource_owner_secret'])\n flickr = FlickrAPI(key=request.cookies.get('oauth_token'), secret=request.cookies.get('oauth_token_secret'))\n faves = flickr.favorites_getList(user_id=\"44124394781@N01\", page=1, per_page=5, extras='owner_name')\n return str(faves)",
"def get_favorites(self) -> Dict:\n return self.query(\"SELECT * FROM substituted_product\")",
"def getUserFavorites(request, userid):\n try:\n User.objects.get(id=userid)\n favList = list(Favorite.objects.filter(user=userid).values())\n favInfoDict = {}\n num = 0\n\n for fav in favList:\n try:\n favItem = Item.objects.get(id=fav.get(\"item_id\")) \n favInfoDict[num] = model_to_dict(favItem)\n num = num + 1\n \n except Item.DoesNotExist:\n favInfoDict[\"favorite\"] = \"doesnotexist\"\n\n return JsonResponse(favInfoDict)\n\n except User.DoesNotExist:\n fail = {\n \"user\":\"doesnotexist\"\n }\n return JsonResponse(fail)",
"def show_fav_recipes():\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n \n data = search_recipes(request) \n favorite_list = [l.id for l in g.user.recipes]\n favorites = [f['id'] for f in data['results'] if f['id'] in favorite_list]\n \n\n return render_template(\"favs/show.html\", favorites=favorites)",
"def get_favorite(self):\n\n\t\treturn self.__favorite",
"def list_favor(self):\n if \"all\" in self.switches:\n favors = Reputation.objects.exclude(favor=0).order_by(\"-date_gossip_set\")\n self.msg(\"Characters with favor: %s\" % \", \".join(str(ob) for ob in favors))\n return\n org = self.get_organization(check_perm=False)\n favors = org.reputations.filter(Q(favor__gt=0) | Q(favor__lt=0)).order_by(\n \"-favor\"\n )\n msg = \"{wThose Favored/Disfavored by %s{n\\n\" % org\n msg += \"\\n\\n\".join(\n \"{c%s{w (%s):{n %s\" % (ob.player, ob.favor, ob.npc_gossip) for ob in favors\n )\n self.msg(msg)",
"def search_in_fav(request):\n query = request.GET.get('user_search')\n\n if query:\n # Returns the query in lower case and without accents\n query = unidecode(query).lower()\n result = True\n\n cur_user = request.user\n # Returns all favorites\n favorites = Favorite.objects.all()\n\n # Returns current user filtered favorites\n fav_filtered = favorites.filter(\n users_id=cur_user\n ).filter(products__name__icontains=query).order_by('id')\n\n if not fav_filtered.exists():\n result = False\n fav_filtered = favorites.filter(\n users_id=cur_user).order_by('id')\n\n # Init pagination with 6 products\n paginator = Paginator(fav_filtered, 6)\n page = request.GET.get('page')\n\n try:\n fav_filtered = paginator.page(page)\n except PageNotAnInteger:\n fav_filtered = paginator.page(1)\n except EmptyPage:\n fav_filtered = paginator.page(paginator.num_pages)\n\n if result:\n title = \"Résultats de la recherche : {}\".format(query)\n else:\n title = \"Aucun résultat pour la recherche : {}\".format(query)\n\n context = {\n 'is_result': result,\n 'fav_filtered': fav_filtered,\n 'title': title,\n 'paginate': True,\n }\n\n return render(request, 'favorites/search_in_fav.html', context)"
] | [
"0.7206394",
"0.6586617",
"0.6417396",
"0.62762886",
"0.62620825",
"0.62332726",
"0.62299156",
"0.60298043",
"0.6029627",
"0.6012111",
"0.6006813",
"0.5984588",
"0.59840655",
"0.5982405",
"0.5941768",
"0.5735374",
"0.5715554",
"0.57118183",
"0.57083035",
"0.5686642",
"0.5667025",
"0.5650479",
"0.5632115",
"0.5562854",
"0.5509652",
"0.55041784",
"0.5502908",
"0.54982936",
"0.5493377",
"0.54715717"
] | 0.6721451 | 1 |
Fetches all favorites by autopaging, returning ``favorite_type`` or a mix of all the favorite types. Using this also updates favorite cache Keyword Arguments | async def fetch_all_favorites(self, favorite_type=None):
favorites = await vrcpy.util.auto_page_coro(
self.fetch_favorites, favorite_type=favorite_type)
world = []
friend = []
avatar = []
for favorite in favorites:
if favorite.type == FavoriteType.WORLD:
world.append(favorite)
elif favorite.type == FavoriteType.FRIEND:
friend.append(favorite)
elif favorite.type == FavoriteType.AVATAR:
avatar.append(favorite)
if world != []:
self.client.favorites[FavoriteType.WORLD] = world
if friend != []:
self.client.favorites[FavoriteType.FRIEND] = friend
if avatar != []:
self.client.favorites[FavoriteType.AVATAR] = avatar
return favorites | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def fetch_favorites(self, favorite_type=None, n=100, offset=0):\n\n if n > 100:\n n = 100\n\n params = {\n \"n\": n,\n \"offset\": offset\n }\n\n if favorite_type is not None:\n params[\"type\"] = favorite_type\n\n favorites = await self.client.request.get(\"/favorites\", params=params)\n logging.debug(\"Fetching favorites\")\n\n return [BaseFavorite.build_favorite(\n self.client, favorite, self.loop) for favorite in favorites[\"data\"]]",
"def favorites(self):\n path = self._get_path('favorites')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return self._clean_return(response)",
"def get_favorites(self):\n url = \"https://api.imgur.com/3/account/{0}/favorites\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True)\n return [_get_album_or_image(thing, self) for thing in resp]",
"def get_queryset(self):\n\n user = get_authentication(self.request)\n queryset = Favorites.objects.filter(user=user, is_used=True)\n\n return queryset",
"def favorites(self):\n if not self._user_favorites_loaded:\n self._user_favorites = self._getFavorites()\n self._user_favorites_loaded = True\n return deepcopy(self._user_favorites)",
"def get_oneoffixx_favorites():\n api_client = OneoffixxAPIClient()\n favorites = api_client.get_oneoffixx_favorites()\n if favorites.get('templates'):\n return favorites\n return None",
"def auto_fav(q, count=5, result_type=\"recent\"):\n\n result = search_tweets(q, count, result_type)\n\n for tweet in result[\"statuses\"]:\n try:\n # don't favorite your own tweets\n if tweet[\"user\"][\"screen_name\"] == TWITTER_HANDLE:\n continue\n\n result = t.favorites.create(_id=tweet[\"id\"])\n print(\"favorited: %s\" % (result[\"text\"].encode(\"utf-8\")))\n\n # when you have already favorited a tweet, this error is thrown\n except TwitterHTTPError as e:\n print(\"error: %s\" % (str(e)))",
"def _getFavorites(self):\n url = self._genFavoritesUrlByUser(self._username)\n doc = html.document_fromstring(requests.get(url).text)\n out = dict()\n pages = get_pages(doc)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = str(f.attrib['href']).split('/')[-2]\n # topic_id =\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n for p in range(2, pages):\n url = 'http://habrahabr.ru/users/{0}/favorites/page{1}/'.format(self._username, p)\n # if show_progress:\n # print('parsing page{0}... url={1}'.format(p, url))\n doc = html.document_fromstring(requests.get(url).text)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = f.attrib['href'][-7:-1]\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n return out",
"def get_queryset(self):\n queryset = Favorites.objects.get(owner=self.request.user)\n return queryset.anuncios.published(). select_related('owner').\\\n prefetch_related('image_anuncio').select_subclasses()",
"def cmd_account_favorites(client, args):\n account_favorites = client.get_account_favorites(args.username)\n data = [item.__dict__ for item in account_favorites]\n generate_output({'account_favorites': data}, args.output_file)",
"def favorite(self):\n url = \"https://api.imgur.com/3/album/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method=\"POST\")",
"def cmd_account_gallery_favorites(client, args):\n gallery_favorites = client.get_gallery_favorites(args.username)\n data = [item.__dict__ for item in gallery_favorites]\n generate_output({'gallery_favorites': data}, args.output_file)",
"def get_all_favorite(self):\n products = self.db.query(f\"\"\"\n SELECT original.`name` as \"product_as_original\", substitute.`name`\n as \"product_as_substitut\", substitute.`url` as \"url\",\n substitute.`nutrition_grade`,\n GROUP_CONCAT(DISTINCT store.`name` SEPARATOR ', ')\n as stores FROM favorite as fav\n JOIN product as original ON original.id = fav.original_id\n JOIN product as substitute ON substitute.id = fav.substitut_id\n JOIN product_store ON product_store.product_id = substitute.id\n JOIN store ON store.id = product_store.store_id\n GROUP BY original.name, substitute.name, substitute.url,\n substitute.nutrition_grade\n \"\"\").all(as_dict=True)\n return [self.model(**product) for product in products]",
"def favorite(self):\n url = \"https://api.imgur.com/3/image/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')",
"def get_favorites(self) -> Dict:\n return self.query(\"SELECT * FROM substituted_product\")",
"def fetch_favourites(self):\n while True:\n self.cur.execute(\"SELECT DISTINCT product_id FROM Product_substitute\")\n response = self.cur.fetchall()\n\n for i, element in enumerate (response):\n print (\"Tapez {} pour voir les substituts de:\".format(i+1))\n self.display_product_from_id(element[0])\n \n choice_id = response[self.secure_input(1, len(response))-1]\n \n self.cur.execute(\"SELECT substitute_id FROM Product_substitute WHERE product_id = %s\", (choice_id[0], ))\n response = self.cur.fetchall()\n\n print(\"Voici les substituts trouves pour:\")\n self.display_product_from_id(choice_id[0])\n for element in response:\n self.display_product_from_id(element[0])\n\n print(\"Faire une autre recherche dans vos favoris? Oui = 1 non =0\")\n again = self.secure_input(0, 1)\n if again == 1:\n continue\n else:\n break",
"def get_gallery_favorites(self):\n url = (\"https://api.imgur.com/3/account/{0}/gallery_favorites\".format(\n self.name))\n resp = self._imgur._send_request(url)\n return [Image(img, self._imgur) for img in resp]",
"def get_favourites_page(self, page, page_size):\n roms = Roms(self._connection)\n roms._debug = True\n return roms.select(['rom', 'description']).page_size(page_size).page_offset(page).order_by('rom desc').get_all()",
"def get_favorite(self):\n raise NotImplementedError()",
"def get_favorites(self, user_id=None):\n if not user_id:\n user_id = self.user_id\n\n favorite_decks = self.data_source.get_favorites(user_id)\n\n return favorite_decks",
"def favorites(request):\n cur_user = request.user # Gets the current logged-in user\n fav_products = Favorite.objects.all() # Gets all \"Favorite\" model objects\n\n # Gets the favorites of the current user\n fav_prod_filtered = fav_products.filter(users_id=cur_user).order_by('-id')\n\n # Adds pagination for up to 6 products per page\n paginator = Paginator(fav_prod_filtered, 6)\n page = request.GET.get('page')\n\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n context = {\n 'favorites': products,\n 'paginate': True,\n }\n return render(request, 'favorites/favorites.html', context)",
"def get_favorite(self, obj):\n article_fav_users = obj.favorite.all()\n return self.fetch_usernames(article_fav_users)",
"def get_favorites(request):\n companies = request.user.profile.companies.all()\n context = {'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)",
"def get_favorites_questions(user_id, api_site_parameter, page = 1, body = False, comments = False, pagesize = 100, sort = 'added'):\n path = \"users/%d/favorites\" % user_id\n \n query_filter = ')(Ybxw_gbz'\n \n if body:\n query_filter = '9F)u(CSWCtKt'\n if comments:\n query_filter = ')(YbxuzQQ.'\n if body and comments:\n query_filter = ')(YbxuzQTp'\n \n results = __fetch_results(path, api_site_parameter, page = page, filter = query_filter, pagesize = pagesize, sort = sort)\n return results",
"def list_favor(self):\n if \"all\" in self.switches:\n favors = Reputation.objects.exclude(favor=0).order_by(\"-date_gossip_set\")\n self.msg(\"Characters with favor: %s\" % \", \".join(str(ob) for ob in favors))\n return\n org = self.get_organization(check_perm=False)\n favors = org.reputations.filter(Q(favor__gt=0) | Q(favor__lt=0)).order_by(\n \"-favor\"\n )\n msg = \"{wThose Favored/Disfavored by %s{n\\n\" % org\n msg += \"\\n\\n\".join(\n \"{c%s{w (%s):{n %s\" % (ob.player, ob.favor, ob.npc_gossip) for ob in favors\n )\n self.msg(msg)",
"def fav_place(uri, data={}):\n sv = veggiesailor.StorageFav()\n return sv.switch(uri, 1, data)",
"def affiche_favoris():\r\n # Liste des favoris utilisés pour la fonction \"select_favorite\"\r\n favorite_dict = {}\r\n # pour les produits dans Count\r\n cursor.execute('USE openfoodfacts;')\r\n cursor.execute(\"\"\"SELECT F1.name as Product, F2.name as Substitute \\\r\n FROM Backup \\\r\n INNER JOIN Food F1 ON Backup.produit_id = F1.id \r\n INNER JOIN Food F2 ON Backup.substitut_id = F2.id\"\"\")\r\n favorite = cursor.fetchall()\r\n index = 1\r\n for i in favorite:\r\n favorite_tuple = (i[0], i[1])\r\n print(\"\\n {}. {}, Peut être remplacé par {}.\".format(index, \\\r\n favorite_tuple[0], favorite_tuple[1]))\r\n favorite_dict[index] = favorite_tuple\r\n index += 1\r\n\r\n if not favorite_dict:\r\n print (\"La liste des favoris est vide.\")\r\n else:\r\n print('Choisissez un chiffre pour plus de détail.')\r\n select_favorite(favorite_dict)",
"def favourites_read(self, data, sesh):\n\n\t\t# Fetch the favourites for the thrower\n\t\tlFavourites = Favourites.get(sesh['thrower']['_id'], raw=['ids'])\n\n\t\t# If there's none\n\t\tif not lFavourites:\n\t\t\treturn Services.Effect([])\n\n\t\t# Look up all the throwers using the IDs\n\t\tlThrowers = Thrower.get(lFavourites['ids'], raw=['_id', 'alias'])\n\n\t\t# Return what's found\n\t\treturn Services.Effect(lThrowers)",
"def favorite():\n # handle pre-flight for browsers CORS access\n if request.method == \"OPTIONS\":\n return generate_response()\n # part1: verify the token\n checked_and_verified, response = check_verify_token(request,login_session)\n if checked_and_verified == False: return response\n # handle the edge case where user is authorized to perform create user but not other method\n if not is_loggedin(login_session):\n response = generate_message(MESSAGE_USER_NOT_LOGGED_IN,401)\n return response\n # handles the get request\n if request.method == \"GET\":\n favorites = read_criteria(Favorite,{\"user_id\":login_session[\"user_id\"]},session,\"m\") or []\n favorites_room_json = [room_json(favorite.room, session,app.config[\"OFFLINE_TESTING\"], login_session) for favorite in favorites]\n return generate_response(elem={\"favorites\":favorites_room_json})\n # part2: check json, handle POST request\n checked_json, response, requested_json = check_json_form(request,MESSAGE_BAD_JSON,MESSAGE_GET_FAV_NO_JSON)\n if checked_json != True: return response\n # verify room id type, with strict mode\n requested_json[\"user_id\"] = login_session[\"user_id\"]\n correct_format,valid_update_pairs, response = process_request_json(Favorite,requested_json, True, access_mode=\"read\",nondb_type_map={\"action\":str})\n if correct_format == False: \n return response\n room = get_row_if_exists(Room, session, ** {\"id\": requested_json[\"room_id\"]})\n user = get_row_if_exists(User, session, ** {\"id\": login_session[\"user_id\"]})\n # if the room id in the request doesn't fit any entry in db, return error message\n if room is None:\n response = generate_message(MESSAGE_FAV_ROOM_NOT_EXIST,404)\n return response\n if requested_json[\"action\"] == \"add\":\n # the add favorite already handle duplicates add\n # it treats multiple adds as one add and every duplicate add afterwards is counted as success\n add_favorite(room,user, session)\n response = generate_message(MESSAGE_POST_FAV_ADD_SUCCESS,201)\n return response\n elif requested_json[\"action\"] == \"delete\":\n # the delete favorite already handle duplicates delete\n # it treats multiple delete as one delete and every duplicate delete afterwards is counted as success\n remove_entry(Favorite,requested_json[\"room_id\"], session)\n response = generate_message(MESSAGE_POST_FAV_DEL_SUCCESS,200)\n return response\n else: # method not supported\n response = generate_message(MESSAGE_POST_FAV_METHOD_NOT_SUPPORTED,405)\n return response",
"def select_favorite_foods(self):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n self.cursor.execute(\"USE Purbeurre\")\n self.cursor.execute(\"\"\"SELECT Favorite.id, Food.name_food\n FROM Food \n JOIN Favorite ON Food.id = Favorite.id_substitute_chooses \n WHERE Food.id = Favorite.id_substitute_chooses\n ORDER BY Favorite.id\"\"\")\n id_name_substitute = self.cursor.fetchall()\n self.cursor.execute(\"\"\"SELECT Food.name_food\n FROM Food\n JOIN Favorite ON Food.id = Favorite.id_food\n WHERE Food.id = Favorite.id_food\n ORDER BY Favorite.id\"\"\")\n name_substituted_food = self.cursor.fetchall()\n substituted_food_substitute = self.new_orm.transform_favorite_foods_to_object\\\n (id_name_substitute, name_substituted_food)\n id_substitute = substituted_food_substitute[0]\n name_substitute = substituted_food_substitute[1]\n name_substituted_food = substituted_food_substitute[2]\n return id_substitute, name_substituted_food, name_substitute"
] | [
"0.6839941",
"0.64985955",
"0.6240866",
"0.62036407",
"0.6013773",
"0.60058326",
"0.59991366",
"0.5770625",
"0.57559884",
"0.57079",
"0.5651222",
"0.557293",
"0.5548027",
"0.55305433",
"0.5496854",
"0.5464642",
"0.54065603",
"0.5399308",
"0.5373338",
"0.53702813",
"0.5295063",
"0.525557",
"0.5178641",
"0.5139629",
"0.5117751",
"0.5083415",
"0.5018743",
"0.5014129",
"0.5003066",
"0.4987304"
] | 0.72638565 | 0 |
Fetches moderations against the user | async def fetch_moderated(self):
logging.debug("Fetching moderated")
data = await self.client.request.get("/auth/user/playermoderated")
return [PlayerModeration.build_moderation(
self.client, mod, self.loop) for mod in data["data"]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_moderating(self, user):\n if not user: return False\n query = db.Query(GameModerator)\n query.filter('game =', self)\n query.filter('user =', user)\n return query.get()",
"def current_user_moderating(self):\n return self.user_moderating(users.GetCurrentUser())",
"def get_user_games_moderating(user):\n if not user: return []\n moderating = db.Query(GameModerator).filter('user =', user)\n return [m.game for m in moderating]",
"def get_current_user_games_moderating():\n return Game.get_user_games_moderating(users.GetCurrentUser())",
"def test_can_retrieve_banned_users_of_community_if_mod(self):\n user = make_user()\n headers = make_authentication_headers_for_user(user)\n\n other_user = make_user()\n community = make_community(creator=other_user)\n community_name = community.name\n\n user.join_community_with_name(community_name)\n other_user.add_moderator_with_username_to_community_with_name(username=user.username,\n community_name=community.name)\n\n amount_of_banned_users = 5\n banned_users_ids = []\n\n for i in range(0, amount_of_banned_users):\n community_member = make_user()\n other_user.ban_user_with_username_from_community_with_name(username=community_member.username,\n community_name=community_name)\n banned_users_ids.append(community_member.pk)\n\n url = self._get_url(community_name=community.name)\n response = self.client.get(url, **headers)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response_banned_users = json.loads(response.content)\n\n self.assertEqual(len(response_banned_users), len(banned_users_ids))\n\n for response_banned_user in response_banned_users:\n response_member_id = response_banned_user.get('id')\n self.assertIn(response_member_id, banned_users_ids)",
"def test_list_my_memberships_moderator(self):\n url = '/api/v1/communities/0/list_my_memberships/'\n\n response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n data = response.data\n self.assertEqual(2, data['count'])\n self.assertEqual(3, data['results'][0]['community']['id'])\n self.assertEqual(4, data['results'][1]['community']['id'])\n self.assertEqual('0', data['results'][0]['status'])\n self.assertEqual('1', data['results'][1]['status'])\n self.assertEqual('1', data['results'][0]['role'])\n self.assertEqual('1', data['results'][1]['role'])",
"def get_moderator_queryset(self):\n if not settings.CMS_MODERATOR or not self.tree_id:\n return PageModerator.objects.get_empty_query_set()\n \n q = Q(page__tree_id=self.tree_id, page__level__lt=self.level, moderate_descendants=True) | \\\n Q(page__tree_id=self.tree_id, page__level=self.level - 1, moderate_children=True) | \\\n Q(page__pk=self.pk, moderate_page=True)\n \n return PageModerator.objects.distinct().filter(q).order_by('page__level')",
"def moderation_view():\n\n # Ensure that the current user is an admin.\n assert users.get_current_user() and users.is_current_user_admin()\n\n config = ModerationConfig.get()\n\n # Approve something, if we were asked to.\n if request.form.get(\"approve\"):\n key = ndb.Key(urlsafe=request.form.get(\"approve\"))\n if key.kind() not in [\"UnapprovedListing\", \"UnapprovedInquiry\"]:\n raise ValueError\n\n entity = key.get()\n entity.approve(\"Approved by {!r} on {!r}\".format(\n users.get_current_user().email(),\n str(datetime.datetime.now())\n ))\n entity.put()\n return \"\"\n\n elif request.form.get(\"deny\"):\n key = ndb.Key(urlsafe=request.form.get(\"deny\"))\n if key.kind() not in [\"UnapprovedListing\", \"UnapprovedInquiry\"]:\n raise ValueError\n\n key.delete()\n return \"\"\n\n if request.form.get(\"automod\"):\n if config.enabled:\n config.blacklist = [x.strip() for x in\n request.form.get(\"blacklist\", \"\").split(\"\\n\")\n if x.strip()]\n config.min_delay = int(request.form.get(\"min_delay\", \"0\"))\n config.enabled = (request.form.get(\"automod\") == \"true\")\n config.put()\n\n inquiries = model.UnapprovedInquiry().query().fetch(100)\n listings = model.UnapprovedListing().query().fetch(100)\n\n inquiries.sort(key=email_order)\n listings.sort(key=email_order)\n\n return render_template(\"moderation/view.html\",\n inquiries=inquiries,\n listings=listings,\n config=config)",
"def admins(message):\n hf.query_users(message, hf.get_users(), \"admin\")",
"def is_moderator(self):\n return self.user_type == 'M'",
"def is_mod():\n\n async def predicate(ctx: commands.context):\n if any(role.name in MODERATOR_ROLES for role in ctx.message.author.roles):\n return True\n else:\n await ctx.send(\n f\"Sorry {ctx.message.author.mention}. You are not a Polyphony moderator.\",\n delete_after=10,\n )\n return False\n\n return commands.check(predicate)",
"def update_moderator_ids():\n moderator_emails_config = Registry.get_config_property(\n 'moderator_emails')\n if not moderator_emails_config:\n return []\n\n moderator_ids = []\n for email in moderator_emails_config.value:\n user_id = user_services.get_user_id_from_email(email)\n if user_id is not None:\n moderator_ids.append(user_id)\n else:\n raise Exception('Bad moderator email: %s' % email)\n return moderator_ids",
"def test_get_model_moderator(self, *mocks):\n moderator = get_model_moderator(Article)\n self.assertIsNotNone(moderator)",
"def user(self, request):\n\t\treturn super(cbotManager, self).get_queryset().filter(author=request.user)",
"def mods_command(server, output):\n mods = server.mods\n if len(mods) > 0:\n server.tell(output.name, 'Moderators:')\n for mod in mods:\n server.tell(output.name, mod)\n else:\n server.tell(output.name, 'There are no moderators')\n return",
"def get_queryset(self):\n user_requested = self.kwargs['user']\n self.check_object_permissions(self.request, user_requested)\n return Poll.objects.filter(created_by__username=user_requested)",
"def test_user_is_global_moderator(self):\n thread = self.create_thread()\n user = self.create_user()\n message = thread.first_message\n message.status = 'pending'\n message.save()\n\n self.assertFalse(message.visible_to_user(user))\n\n self.add_perm(user, 'can_moderate_all_messages', 'accounts', 'user')\n\n # To get rid of the user permission cache we should re-grab our user\n latest_user = USER_MODEL.objects.get(pk=user.pk)\n self.assertTrue(message.visible_to_user(latest_user))",
"def get_recommendations(self):\n endpoints = '/user/recs'\n return self.get_request(endpoints)",
"def getResponsibleUsers():",
"def get_queryset(self):\n user = self.request.user\n if not (user.is_authenticated and user.check_permstring(\"builders\")):\n raise Http404(\"Not staff\")\n return super(IncompleteRosterListView, self).get_queryset()",
"def get_queryset(self):\n queryset = super(FlagLogView, self).get_queryset()\n if not self.request.user.global_moderator:\n queryset = queryset.filter(\n message__thread__group__in=self.request.user.groups_moderating\n ).distinct()\n return queryset",
"def dispatch(self, request, *args, **kwargs):\n if not request.user.can_moderate:\n raise Http404\n return super(ModeratorOnlyMixin, self).dispatch(\n request, *args, **kwargs)",
"def test_can_retrieve_banned_users_of_community_if_admin(self):\n user = make_user()\n headers = make_authentication_headers_for_user(user)\n\n other_user = make_user()\n community = make_community(creator=other_user)\n community_name = community.name\n\n user.join_community_with_name(community_name)\n other_user.add_administrator_with_username_to_community_with_name(username=user.username,\n community_name=community.name)\n\n amount_of_banned_users = 5\n banned_users_ids = []\n\n for i in range(0, amount_of_banned_users):\n community_member = make_user()\n other_user.ban_user_with_username_from_community_with_name(username=community_member.username,\n community_name=community_name)\n banned_users_ids.append(community_member.pk)\n\n url = self._get_url(community_name=community.name)\n response = self.client.get(url, **headers)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response_banned_users = json.loads(response.content)\n\n self.assertEqual(len(response_banned_users), len(banned_users_ids))\n\n for response_banned_user in response_banned_users:\n response_member_id = response_banned_user.get('id')\n self.assertIn(response_member_id, banned_users_ids)",
"def get_recommendations(self):\n\n try:\n recommendations = Recommendations.objects.get(user_id=self.id)\n except DoesNotExist:\n print \"No recommendation object found. Creating one now.\"\n recommendations = Recommendations(user_id=self.id)\n recommendations.save()\n\n return recommendations",
"def multireddits(self):\n return self._reddit.get(API_PATH['multireddit_user'].format(user=self))",
"def return_admin_list(request):\n del request\n return return_user_list(Administrador)",
"def moderate(request, content_type, object_id, mode):\n user = request.user\n content_type_object = ContentType.objects.get(id = content_type)\n object = content_type_object.model_class().objects.get_all(id = object_id)\n status = ContentApprovalVote.objects.vote(object, user, mode)\n \n redirect_url = request.GET.get('queue_url', reverse('moderation-queue'))\n return http.HttpResponseRedirect(redirect_url)",
"def has_moderate_permission(self, request):\n if not settings.CMS_MODERATOR:\n return False\n return self.has_generic_permission(request, \"moderate\")",
"def get_admins():\n users = get_users()\n admins = []\n for user in users:\n if user[\"approval_level\"] == \"admin\":\n admins.append(user)\n\n return admins",
"async def moderation(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"ban_kick_mute\")\n\n if new_value:\n message = \":white_check_mark: You will now receive DMs when you get muted, kicked or banned by me.\"\n else:\n message = \":white_check_mark: You will no longer receive DMs when you get muted, kicked or banned.\"\n\n await ctx.send(message)"
] | [
"0.70628023",
"0.6201899",
"0.61619765",
"0.59319955",
"0.5841453",
"0.57664704",
"0.57575977",
"0.5677609",
"0.56595176",
"0.563347",
"0.5538352",
"0.55252314",
"0.5504787",
"0.54894733",
"0.54779696",
"0.54731494",
"0.544925",
"0.54163736",
"0.53645295",
"0.5355124",
"0.53216296",
"0.5311809",
"0.52746034",
"0.52589256",
"0.52572864",
"0.5247195",
"0.5239891",
"0.52363455",
"0.52197236",
"0.5219393"
] | 0.77385306 | 0 |
Fetches favorite groups for worlds, avatars and users Keyword Arguments | async def fetch_favorite_groups(self, n: int = 50):
resp = await self.client.request.get("/favorite/groups", params={"n": str(n)})
groups = {}
for group in resp["data"]:
groups.append(FavoriteGroup.build_favorite_group(
self.client, group, self.loop))
return groups | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_groups(request):\r\n if(any(elem not in [\"name\",\"password\",\"gid\",\"member\"] for elem in request.GET)):\r\n badRequest(\"Parameters incorrect\")\r\n group = Group()\r\n # Must format params to allow for multiple members under same key. Dict converts all to arrays, rest is converted back.\r\n params = dict(request.GET)\r\n for key in params.keys():\r\n if(key != \"member\"):\r\n params[key] = params[key][0]\r\n return HttpResponse(json.dumps(group.query(params)))",
"def get_groups():\r\n if 'username' not in flask.session:\r\n return flask.jsonify(**{'message': 'Forbidden', 'status_code': 403})\r\n\r\n context = {}\r\n context['url'] = flask.request.path\r\n context['groups'] = []\r\n\r\n # Retreive query variables\r\n query_num_groups = flask.request.args.get('size') \r\n query_page = flask.request.args.get('page') \r\n num_groups = int(query_num_groups) if query_num_groups != None else 10\r\n page_number = int(query_page) if query_page != None else 0\r\n\r\n groups = get_group_listing(flask.session['username'], \r\n num_groups, page_number)\r\n for g in groups:\r\n context['groups'].append({\r\n 'id': g[0],\r\n 'name': g[1]\r\n })\r\n\r\n if (num_groups == 10):\r\n context['next'] = '{}?page={}'.format(context['url'], page_number + 1)\r\n else:\r\n context['next'] = '{}?page={}&size={}'.format(context['url'], \r\n page_number + 1, num_groups)\r\n\r\n return flask.jsonify(**context)",
"def users_groups():\n if request.method == \"GET\":\n query = {\"token\": ciconnect_api_token, \"globus_id\": session[\"primary_identity\"]}\n # Get user info to derive unix name\n user = get_user_info(session)\n unix_name = user[\"metadata\"][\"unix_name\"]\n # Get user's group membership info based on session unix name\n users_group_memberships = get_user_group_memberships(session, unix_name)\n\n multiplexJson = {}\n group_membership_status = {}\n for group in users_group_memberships:\n if group[\"state\"] not in [\"nonmember\"]:\n group_name = group[\"name\"]\n group_query = (\n \"/v1alpha1/groups/\" + group_name + \"?token=\" + query[\"token\"]\n )\n multiplexJson[group_query] = {\"method\": \"GET\"}\n group_membership_status[group_query] = group[\"state\"]\n # POST request for multiplex return\n multiplex = get_multiplex(multiplexJson)\n\n users_groups = []\n for group in multiplex:\n if (\n session[\"url_host\"][\"unix_name\"]\n in (json.loads(multiplex[group][\"body\"])[\"metadata\"][\"name\"])\n ) and (\n len(\n (json.loads(multiplex[group][\"body\"])[\"metadata\"][\"name\"]).split(\n \".\"\n )\n )\n > 1\n ):\n users_groups.append(\n (\n json.loads(multiplex[group][\"body\"]),\n group_membership_status[group],\n )\n )\n # users_groups = [group for group in users_groups if len(group['name'].split('.')) == 3]\n\n # Query user's pending project requests\n pending_project_requests = get_user_pending_project_requests(unix_name)\n # Check user's member status of root connect group\n connect_group = session[\"url_host\"][\"unix_name\"]\n user_status = get_user_connect_status(unix_name, connect_group)\n\n domain_name = domain_name_edgecase()\n\n with open(\n brand_dir\n + \"/\"\n + domain_name\n + \"/form_descriptions/group_unix_name_description.md\",\n \"r\",\n ) as file:\n group_unix_name_description = file.read()\n\n return render_template(\n \"users_groups.html\",\n groups=users_groups,\n project_requests=pending_project_requests,\n user_status=user_status,\n group_unix_name_description=group_unix_name_description,\n )",
"def groupfinder(user_id, request):\n ret = DBSession.query(User).filter_by(user_id=user_id).all()\n if len(ret) == 0:\n return None\n user = ret[0]\n groups = [x.group_name for x in user.groups]\n return groups",
"def cmd_account_gallery_favorites(client, args):\n gallery_favorites = client.get_gallery_favorites(args.username)\n data = [item.__dict__ for item in gallery_favorites]\n generate_output({'gallery_favorites': data}, args.output_file)",
"def get_queryset(self):\n user = self.request.user\n return user.group_set.all()",
"def test_request_users_user_groups(self):\n response = requests.get(self.url + '/users/John/groups')\n\n self.assertEqual(response.status_code, 200)\n\n json = response.json()\n self.assertIsInstance(json, dict)\n self.assertEqual(len(json.keys()), 1)\n self.assertIn('groups', json.keys())\n\n groups = json.get('groups')\n self.assertIsInstance(groups, list)\n self.assertEqual(len(groups), 2)\n self.assertIn('Human', groups)\n self.assertIn('Male', groups)",
"def GroupFindings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def groups(request, group_id = 1):\n group = get_object_or_404(ResearchGroup, pk=group_id)\n groups = ResearchGroup.objects.order_by('name')\n group_list = []\n for g in groups:\n if g.id is not group.id:\n group_list.append({'name': g.name, 'id': g.id})\n # default showing group\n # chosen group info\n group_info = {}\n group_info['name'] = group.name\n personnel = list()\n for p in group.personnel.all():\n personnel.append(p.username)\n group_info['personnel'] = \" \".join(str(x) for x in personnel)\n group_info['projects'] = group.projects\n group_info['directions'] = group.directions\n group_info['papers'] = group.papers.split()\n context = {'group_list': group_list, 'group_info': group_info}\n return render(request, 'sacms/groups.html', context)",
"def groupfinder(name, request):\n #FIXME: Implement\n return ()\n return request.context.get_groups(name)",
"def get(self):\n # fetch parameter\n get_parser = reqparse.RequestParser(bundle_errors=True)\n get_parser.add_argument(\"user_id\", required=True, help=\"User ID required to fetch favourite projects\")\n args = get_parser.parse_args(strict=True)\n\n # get user_id\n user_id = args[\"user_id\"]\n\n # set up return json data\n ret = {\n \"_id\": \"\",\n \"user_id\": \"\",\n \"favourite_projects\": []\n }\n\n # convert user_id (string) into ObjectId\n try:\n user_id = ObjectId(user_id)\n except:\n return {\"message\": \"invalid user id\"}, 400\n\n # fetch the favourites list of the user\n if 'user_id' in args.keys():\n # check if user is in the database\n user = self.users.find_one({\"_id\": user_id})\n if user is None:\n return {\"message\": \"user not found\"}, 404\n else:\n # check if user has any favourites\n user_favourites = self.favourites.find_one({\"user_id\": user_id})\n if user_favourites is None:\n return {\"message\": \"user does not have any favourites\"}, 400 \n else:\n # add return _id and user_id data\n ret[\"_id\"] = str(user_favourites[\"_id\"])\n ret[\"user_id\"] = str(user_favourites[\"user_id\"])\n\n # update project details if needed\n update_project_details = []\n for project in user_favourites[\"favourite_projects\"]:\n project_id = str(project[\"_id\"])\n project_id = ObjectId(project_id)\n\n doc = self.projects.find_one({\"_id\": project_id})\n if doc:\n update_project_details.append(deepcopy(doc))\n\n # ret details\n # fetch the username for each user id\n ret_members = []\n for member_id in doc[\"members\"]:\n mem = self.users.find_one({\"_id\": member_id})\n mem_dict = {\"_id\": str(member_id), \"username\": mem[\"username\"]}\n ret_members.append(mem_dict)\n\n leader = self.users.find_one({\"_id\": doc[\"leader\"]})\n ret_leader = {\"_id\": str(doc[\"leader\"]), \"username\": leader[\"username\"]}\n\n # json format for each project\n ret_project = {\n \"project_id\": str(doc[\"_id\"]),\n \"title\": doc[\"title\"],\n \"leader\": ret_leader,\n \"max_people\": doc[\"max_people\"],\n \"cur_people\": doc[\"cur_people\"],\n \"members\": ret_members,\n \"description\": doc[\"description\"],\n \"course\": doc[\"course\"],\n \"technologies\": doc[\"technologies\"],\n \"languages\": doc[\"languages\"],\n \"tags\": doc[\"tags\"]\n }\n ret[\"favourite_projects\"].append(ret_project)\n \n # update the favourites list for this user and send back the updated details\n new_favourites = {\"favourite_projects\": update_project_details}\n self.favourites.update({\"user_id\": user_id}, {\"$set\": new_favourites}, upsert=False)\n \n return ret, 200 \n else:\n return {\"message\": \"user id required to fetch the favourites list\"}, 400",
"def filter_users(request):\n groups = models.UserGroup.all().order('name')\n return utility.respond(request, 'admin/filter_users', {'groups': groups})",
"def test_request_users_user_groups_group(self):\n response = requests.get(self.url + '/users/John/groups/Human')\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNone(response.json())",
"def test_users_groups_get(self):\n pass",
"def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list",
"def list_groups(args):\n\n for group in get_groups(args):\n print(group)",
"def get_gallery_favorites(self):\n url = (\"https://api.imgur.com/3/account/{0}/gallery_favorites\".format(\n self.name))\n resp = self._imgur._send_request(url)\n return [Image(img, self._imgur) for img in resp]",
"def list(self, request, *args, **kwargs):\n if not request.user.is_superuser:\n self.queryset = Group.objects.filter(owner__pk=request.user.id)\n\n return super().list(request, args, kwargs)",
"def resolve_available_gifts(self, info, user_id, **kwargs):\n\n available_gifts_qs = \\\n Gift.objects.\\\n get_available(user=User.objects.get(id=user_id)).\\\n order_by('price', 'name')\n\n filter_is_group_wide = kwargs.get('is_group_wide')\n if filter_is_group_wide is not None:\n return available_gifts_qs.filter(\n is_group_wide=filter_is_group_wide\n )\n\n return available_gifts_qs",
"async def fetch_all_favorites(self, favorite_type=None):\n\n favorites = await vrcpy.util.auto_page_coro(\n self.fetch_favorites, favorite_type=favorite_type)\n\n world = []\n friend = []\n avatar = []\n\n for favorite in favorites:\n if favorite.type == FavoriteType.WORLD:\n world.append(favorite)\n elif favorite.type == FavoriteType.FRIEND:\n friend.append(favorite)\n elif favorite.type == FavoriteType.AVATAR:\n avatar.append(favorite)\n\n if world != []:\n self.client.favorites[FavoriteType.WORLD] = world\n if friend != []:\n self.client.favorites[FavoriteType.FRIEND] = friend\n if avatar != []:\n self.client.favorites[FavoriteType.AVATAR] = avatar\n\n return favorites",
"def get_voting_group_for(userid):",
"def test_get_groups(self):\n group0 = self.test_save('TestGroup0')\n group1 = self.test_save('TestGroup1')\n \n group0.grant('Perm1', object0)\n group0.grant('Perm3', object1)\n group1.grant('Perm2', object1)\n \n self.assert_(group0 in get_groups(object0))\n self.assertFalse(group1 in get_groups(object0))\n self.assert_(group0 in get_groups(object1))\n self.assert_(group1 in get_groups(object1))\n self.assert_(len(get_groups(object1))==2)",
"def view_group(request, group_id):\n users = models.UserProfile.all().order('email')\n if group_id:\n group = models.UserGroup.get_by_id(int(group_id))\n if group.users:\n users = models.UserProfile.get(group.users)\n else:\n users = []\n return utility.respond(request, 'admin/view_group', {'users': users})",
"def groups_by_uid(request, uid):\r\n user = User()\r\n users = user.query({\"uid\":str(uid)})\r\n if(len(users) < 1):\r\n return HttpResponse(\"No user found under uid \"+ str(uid))\r\n group = Group()\r\n group = group.query({\"gid\":str(users[0]['gid'])})\r\n if(len(group) < 1):\r\n return HttpResponse(\"No group found under uid \"+ str(uid))\r\n return HttpResponse(json.dumps(group))",
"def test_get_groups(self):\n pass",
"def test_get_groups(self):\n pass",
"def test_groups_group_users_get(self):\n pass",
"def test_groups_group_users_get(self):\n pass",
"def _find_groups_for_user(email):\n return [g['name'] for g in groups.find({\"users\":email})]",
"def get_groups():\n\n groups = [\"shelter\", \"sharing\", \"unsheltered\", \"motel\"]\n\n for item in groups:\n group = Group(group_name=item)\n\n db.session.add(group)\n\n db.session.commit()"
] | [
"0.5789151",
"0.57112396",
"0.5698942",
"0.5577221",
"0.5553875",
"0.54356575",
"0.5378318",
"0.5366365",
"0.5348086",
"0.53193164",
"0.53164315",
"0.53105015",
"0.5299645",
"0.5295492",
"0.5291341",
"0.52426225",
"0.5204173",
"0.52040464",
"0.5195504",
"0.5156613",
"0.51309824",
"0.5115167",
"0.51134986",
"0.51019996",
"0.5089188",
"0.5089188",
"0.5087531",
"0.5087531",
"0.50735325",
"0.5053559"
] | 0.5782045 | 1 |
Handles incoming text messages from Twilio API. Isolates the text message and uses it to query The Dog API for a dog picture. Supports searching by breed and getting a random dog picture. | def incoming_sms():
txt = request.form['Body']
# remove leading and trailing white space and make lowercase
txt = txt.strip()
txt = txt.lower()
# handle random searches differently than breed searches
if txt == 'random' or txt == 'dog':
url = get_dogs.get_random_dog()
else:
url = get_dogs.request_breed(txt)
resp = MessagingResponse()
if url:
resp.message(url)
else:
resp.message("Sorry! We couldn't find a dog matching that query. Please try \
a more general search term.")
return str(resp) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def dog(self):\r\n dog_api = \"https://dog.ceo/api/breeds/image/random\"\r\n json_data = requests.get(dog_api).json()\r\n dogimage = json_data['message']\r\n await self.bot.say(dogimage)",
"async def dog(self, ctx: Message):\n\t\timage_url = requests.get(\"https://api.thedogapi.com/v1/images/search\", headers={\"x-api-key\": \"d0558cf8-f941-42f7-8daa-6741a67c5a2e\"}).json()[0][\"url\"]\n\t\tawait self.send(image_url, whisper=[ctx.author.id])",
"def handle(msg):\n # Get text or data from the message\n text = msg.get(\"text\", None)\n data = msg.get(\"data\", None)\n\n if data is not None:\n # This is a message from a custom keyboard\n chat_id = msg[\"message\"][\"chat\"][\"id\"]\n content_type = \"data\"\n elif text is not None:\n # This is a text message from the user\n chat_id = msg[\"chat\"][\"id\"]\n content_type = \"text\"\n else:\n # This is a message we don't know how to handle\n content_type = \"unknown\"\n \n if content_type == \"text\":\n message = msg[\"text\"]\n logging.info(\"Received from chat_id={}: {}\".format(chat_id, message))\n\n if message == \"/start\":\n # Check against the server to see\n # if the user is new or not\n # TODO\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/register', json=payload)\n response = json.loads(r.content)\n if response['exists']:\n message = \"Welcome back!\"\n else:\n message = \"Welcome!\"\n bot.sendMessage(chat_id, message)\n\n \n elif message == \"/rate\":\n # Ask the server to return a random\n # movie, and ask the user to rate the movie\n # You should send the user the following information:\n # 1. Name of the movie\n # 2. A link to the movie on IMDB\n # TODO\n\n # Create a custom keyboard to let user enter rating\n payload = {'chat_id':chat_id}\n r = requests.post(host_addr+'/get_unrated_movie', json=payload)\n response = json.loads(r.content)\n movieid = response['id']\n movieinfo = '%s: %s' % (response['title'], response['url'])\n bot.sendMessage(chat_id, movieinfo)\n my_inline_keyboard = [[\n InlineKeyboardButton(text='1', callback_data=str(movieid)+' rate_movie_1'),\n InlineKeyboardButton(text='2', callback_data=str(movieid)+' rate_movie_2'),\n InlineKeyboardButton(text='3', callback_data=str(movieid)+' rate_movie_3'),\n InlineKeyboardButton(text='4', callback_data=str(movieid)+' rate_movie_4'),\n InlineKeyboardButton(text='5', callback_data=str(movieid)+' rate_movie_5')\n ]]\n keyboard = InlineKeyboardMarkup(inline_keyboard=my_inline_keyboard )\n bot.sendMessage(chat_id, \"How do you rate this movie?\", reply_markup=keyboard)\n\n \n elif message == \"/recommend\":\n # Ask the server to generate a list of\n # recommended movies to the user\n payload = {'chat_id':chat_id, 'top_n':3}\n r = requests.post(host_addr+'/recommend', json=payload)\n response = json.loads(r.content)\n # print(response)\n if response['movies']==[]:\n message = 'You have not rated enough movies, we cannot generate recommendation for you.'\n bot.sendMessage(chat_id, message)\n else:\n bot.sendMessage(chat_id, \"My recommendations:\")\n for item in response['movies']:\n movieinfo = '%s: %s' % (item['title'], item['url'])\n bot.sendMessage(chat_id, movieinfo)\n\n\n else:\n # Some command that we don't understand\n bot.sendMessage(chat_id, \"I don't understand your command.\")\n\n elif content_type == \"data\":\n # This is data returned by the custom keyboard\n # Extract the movie ID and the rating from the data\n # and then send this to the server\n # TODO\n # print(data)\n info = str.split(data)\n movieid = int(info[0])\n rate = info[1][-1]\n logging.info(\"Received rating: {}\".format(rate))\n bot.sendMessage(chat_id, \"Your rating is received!\")\n # logging.info('Movie id = %d' % movieid)\n payload = {'chat_id':chat_id, 'movie_id': movieid, 'rating': rate}\n r = requests.post(host_addr+'/rate_movie', json=payload)\n response = json.loads(r.content)\n logging.info('Update status: '+response['status'])",
"async def on_message(self, msg: Message):\n from_contact = msg.talker()\n text = msg.text()\n room = msg.room()\n if text == '#ding':\n conversation: Union[\n Room, Contact] = from_contact if room is None else room\n await conversation.ready()\n await conversation.say('dong')\n file_box = FileBox.from_url(\n 'https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/'\n 'u=1116676390,2305043183&fm=26&gp=0.jpg',\n name='ding-dong.jpg')\n await conversation.say(file_box)",
"def handle_message() -> Response:\n commend = request.get_json()[\"message\"][\"text\"]\n chat_id = request.get_json()[\"message\"][\"chat\"][\"id\"]\n\n if commend == \"/start\":\n txt = \"Welcome to the shopping bot.\"+'\\n'+\"please enter category, or type popular to get the most popular searches \"\n elif str(commend).lower() in items:\n order[0] = str(commend)\n txt = \"choose color\"\n elif str(commend).lower() in colors:\n if order[0] == 0:\n txt = \"choose category\"\n order[1] = str(commend)\n txt = \"choose size\"\n elif str(commend).lower() in size:\n order[2] = str(commend)\n rec(chat_id, order)\n txt = get_url(order)\n elif str(commend).lower() == \"popular\":\n txt = get_popular(orders_dic)\n else:\n txt = \"try again\"\n # print(orders_dic)\n chat_id = request.get_json()[\"message\"][\"chat\"][\"id\"]\n print(chat_id)\n requests.get(f\"https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id={chat_id}&text={txt}\")\n return Response(\"Success\")",
"def text(request):\n data = []\n\n if request.method == \"GET\":\n response = {\"success\": False, \"error\": \"Get method is not allowed\"}\n return HttpResponse(json.dumps(response), status=501)\n\n elif request.method == \"POST\":\n ner_logger.debug(\"Fetching result\")\n\n try:\n verify_text_request(request)\n # if verify success get detection data\n data = get_text_entity_detection_data(request)\n\n except KeyError as err:\n response = {\"success\": False, \"error\": str(err)}\n # TODO: move to ner_logger.error\n ner_logger.exception(response)\n return HttpResponse(json.dumps(response), content_type='application/json',\n status=400)\n except TypeError as err:\n response = {\"success\": False, \"error\": str(err)}\n ner_logger.exception(response)\n return HttpResponse(json.dumps(response), content_type='application/json',\n status=400)\n except Exception as err:\n response = {\"success\": False, \"error\": str(err)}\n ner_logger.exception(response)\n return HttpResponse(json.dumps(response), content_type='application/json',\n status=400)\n\n if data:\n response = {\"success\": True, \"error\": None, \"data\": data}\n return HttpResponse(json.dumps(response), content_type='application/json', status=200)\n else:\n response = {\"success\": False, \"error\": \"Some error while parsing\"}\n return HttpResponse(json.dumps(response), status=400)",
"def message_handler(message):\n location = database.get_location(message.from_user.id)\n if not location:\n return {\"text\": \"Для поиска лекарств отправь своё местоположение\"}\n\n return get_drugs_message(find_drugs(message.text.encode('utf-8')))",
"def sms_reply():\n # Fetch the message\n media_msg = request.form.get('NumMedia')\n msg = request.form.get('Body').lower()\n resp = MessagingResponse()\n responded = False\n if '1' in media_msg:\n pic_url = request.form.get('MediaUrl0') # URL of the person's media\n # pprint(pic_url) # so you can see the URL that the picture generated \n resp.message(\"We have recieved your request for image analysis! Please wait for our response\")\n resp.message(pic_url)\n url = \"https://techclan-twitter.herokuapp.com/reverse_image?URL=\"\n url=url+pic_url\n resp.message('The image has been succesfully uploaded to our server!The Url of the image is :')\n response=requests.get(url)\n parsed=json.loads(response.text)\n s1=\"\"\n count=0\n for each in parsed:\n s1=s1+each+\"\\n ................\\n\"\n if count>5:\n break\n count=count+1\n resp.message('The reverse image analysis of image reports are:')\n resp.message(s1)\n time.sleep(1)\n u='http://18.205.87.224/api/text?id='\n u=u+pic_url\n response=requests.get(u)\n parsed=json.loads(response.text)\n resp.message(parsed)\n responded==True\n elif '5' in msg:\n r = requests.get('https://coronavirus-19-api.herokuapp.com/countries/india')\n if r.status_code == 200:\n data = r.json()\n text = f'_Covid-19 Cases in India_ \\n..........................\\nConfirmed Cases : *{data[\"cases\"]}* \\n................\\nToday Cases : *{data[\"todayCases\"]}* \\n..............\\nDeaths : *{data[\"deaths\"]}* \\n..................................\\nRecovered : *{data[\"recovered\"]}* \\n\\n..................\\nTotal Tested : *{data[\"totalTests\"]}* \\n\\n Type 0 to return to main menu'\n else:\n text = 'I could not retrieve the results at this time, sorry.'\n resp.message(text)\n responded = True \n \n elif '1' in msg:\n \n resp.message(\"wait we will fetch your results soon!!\")\n url = \"http://18.234.107.157:5000/api/text?id=\"\n ms=str(msg)\n #a,b=ms.split(' ',1)\n url=url+ms\n response=requests.get(url)\n parsed=json.loads(response.text)\n agree=0\n disagree=0\n discuss=0\n ctr=0\n for each in parsed:\n if ctr>100:\n break\n ctr=ctr+1\n answ=each.get('Score',\"error\")\n if answ == \"agree\":\n agree=agree+1\n elif answ == \"disagree\":\n disagree=disagree+1\n if(agree>disagree):\n resp.message(\"This is *REAL* according to our sources !! Our results are based on following sources..we cannot be 100% Sure.\")\n else:\n resp.message(\"This is *FAKE* according to our sources !! Our results are based on following sources..we cannot be 100% Sure.\")\n count=0\n s1=\"\"\n for each in parsed:\n s1=s1+each['link']+\"*Title :*\" +each['title']+\"\\n ................\\n\"\n if count>5:\n break\n count=count+1\n resp.message(s1)\n responded==True\n #reporting\n elif '3' in msg:\n # resp.message(\"We have reported your content to our police database!!\")\n ms=str(msg)\n a,b=ms.split(' ',1)\n url='https://spreadsheetupdate1.herokuapp.com/spreed?id='\n url=url+ms\n r=requests.get(url)\n resp.message(\"We have reported your content to our police database!!\")\n responded==True\n\n\n\n \n #for news\n\n elif msg=='news' or msg=='4':\n \n url=\"\"\"https://newsapi.org/v2/top-headlines?sources=bbc-news,cnn,cnbc,abc-news,google-news-uk,independent&apiKey=3ff5909978da49b68997fd2a1e21fae8\"\"\"\n r = requests.get(url)\n #resp.message(\"stay\") \n if r.status_code == 200:\n resp.message(\"stay here with us! We are fetching news for you \")\n data = r.json()\n articles = data['articles'][:5]\n result = \"\"\n ctr=0 \n for article in articles:\n # if ctr>10:\n # break\n # ctr=ctr+1\n title = article['title']\n url = article['url']\n if 'Z' in article['publishedAt']:\n published_at = datetime.datetime.strptime(article['publishedAt'][:19], \"%Y-%m-%dT%H:%M:%S\")\n else:\n published_at = datetime.datetime.strptime(article['publishedAt'], \"%Y-%m-%dT%H:%M:%S%z\")\n \n result += \"\"\"*{}*\nRead more: {}\n_Published at {:02}/{:02}/{:02} {:02}:{:02}:{:02} UTC_\n\"\"\".format(\n title,\n url, \n published_at.day, \n published_at.month, \n published_at.year, \n published_at.hour, \n published_at.minute, \n published_at.second\n )+\"\\n ..................\\n\"\n\n else:\n result = 'I cannot fetch news at this time. Sorry!'\n\n resp.message(result)\n responded = True\t\n else:\n phone_no = request.form.get('From')\n reply = fetch_reply(msg, phone_no)\n\n resp = MessagingResponse()\n resp.message(reply)\n responded = True\n \n\n \t\n\n return str(resp)",
"def handle(msg):\n\n # glance to get some meta on the message\n content_type, chat_type, chat_id = telepot.glance(msg)\n chat_id = str(chat_id)\n\n # we only want to process text messages from our specified chat\n if (content_type == 'text') and (chat_id in allowed_chat_ids):\n command = msg['text']\n try:\n _cmd = get_command(command)\n except UserWarning as ex:\n logger.error(ex)\n raise\n _cmd.execute(chat_id)",
"def image_handler(self, bot, update):\n text = update.message.text\n if text.startswith('/recon'):\n bot.sendMessage(chat_id=update.message.chat_id, text='*Object recognition*\\nSend me an image',\n parse_mode=ParseMode.MARKDOWN)\n return 10\n elif text.startswith('/faces'):\n bot.sendMessage(chat_id=update.message.chat_id, text='*Face recognition*\\nSend me an image',\n parse_mode=ParseMode.MARKDOWN)\n return 11",
"async def on_message(msg: Message):\n if not msg.is_self() and isinstance(msg.text(), str) and len(msg.text()) > 0 and \\\n msg._payload.type == MessageType.MESSAGE_TYPE_TEXT:\n text_new = re.sub(r'<.*>', '', msg.text())\n if len(text_new) < 400:\n if '@' in text_new:\n if '@小裕' in text_new:\n bot_response = model.predict(data=text_new.replace('@小裕', ''))\n await msg.say(bot_response)\n else:\n bot_response = model.predict(data=text_new)\n await msg.say(bot_response)\n else:\n await msg.say('说的太多了,长话短说啊')\n elif not msg.is_self() and msg._payload.type == MessageType.MESSAGE_TYPE_IMAGE:\n file_box_2 = await msg.to_file_box() # 将Message转换为FileBox\n await file_box_2.to_file(file_path=img_in_path, overwrite=True) # 将图片保存为本地文件\n img_new_path = img_transform(img_in_path) # 调用图片风格转换的函数\n file_box_3 = FileBox.from_file(img_new_path) # 从新的路径获取图片\n await msg.say(file_box_3)\n elif not msg.is_self() and msg._payload.type == MessageType.MESSAGE_TYPE_AUDIO:\n file_box_audio = await msg.to_file_box()\n await file_box_audio.to_file(file_path=mp3_path, overwrite=True)\n audio_path_new = resample_rate(mp3_path, wav_path, new_sample_rate=16000) # 转换能识别格式\n text = aip_asr(audio_path_new) # 语音识别成文字\n bot_response = model.predict(data=text) # 生产文字回复\n bot_response_path = aip_synthesis(bot_response, wav_path_res) # 语音生成\n file_box_audio_new = FileBox.from_file(bot_response_path)\n await msg.say(file_box_audio_new)",
"def sms_reply():\n # Start our TwiML response\n # if body.lower()==\"good\":\n message=\"Hi I'm IRIS, an Immediately Responsive Intelligent System\\nHow are you feeling today?\"\n user=request.form['Body']\n\n # message=\"Hi \"+ name+ \"\"\n # user=request.form['Body']\n\n if user==\"good\":\n message=\"Glad to hear it! I hope you continue to feel this way! Celebrate this feeling and hold onto what happened ot make you feel this way so that you can repeat it in the future!\"\n\n if user==\"sad\":\n message=\"I’m sorry to hear that. Here are some things I do to make me feel better: take a walk outside, listen to uplifting music, call or message a loved one, or watch or read something positive to take my mind off of what I’m feeling.\"\n\n if user==\"nervous\":\n message=\"It’s going to be ok! This feeling will not last forever.\"\n if user==\"lonely\":\n message=\"I’m here for you, and know that you are loved, supported, and important. The world would not be the same without you! For a loving quote respond\"\n\n if user==\"angry\":\n message=\"“Let me help you turn your anger into something positive. Here are some ways to burn off energy productively: take a long walk, remove yourself from the situation, paint of draw, listen to loud music, or take a break from what you are doing.\"\n\n if user==\"tired\":\n message=\"I understand what you are feeling well. I recommend taking a break to do an activity you enjoy, taking a nap, getting a coffee, doing 20 jumping jacks, listening to a pump-up playlist, or standing up to stretch for a bit.\"\n\n if user==\"average\":\n message=\"There are many things to look forward to!\"\n resp = MessagingResponse()\n\t # Add a message\n \n resp.message(message)\n\t # Add a picture message\n\t #msg.media(\"https://farm8.staticflickr.com/7090/6941316406_80b4d6d50e_z_d.jpg\")\n\n return str(resp)",
"def text_reply(msg):\n if msg['Type'] != TEXT:\n # sanitize the text field so that we can assume it always contains string.\n # and this is also to avoid infinite loop during serialization in the persist function\n msg['Text'] = msg['Type']\n\n to_user_id_name = msg['ToUserName']\n from_user_id_name = msg['FromUserName']\n\n if is_my_outgoing_msg(msg):\n handle_outgoing_msg(msg, to_user_id_name)\n else: # this is an incoming message from my friend\n handle_incoming_msg(msg, from_user_id_name)",
"def on_message(msg, server):\n global MY_INFO\n\n if MY_INFO is None:\n MY_INFO = server.slack.login_data['self']\n # MY_INFO['id']\n\n pprint.pprint(msg)\n text = msg.get(\"text\", \"\").lower()\n text += msg.get(\"file\", {}).get(\"preview\", \"\")\n recommendation = room_recommender(text)\n if recommendation:\n trigger_string, room_name = recommendation\n room_id = CHANNELS[room_name]['id']\n response_text = \"Hi, I noticed you were talking about “{trigger_string}”\\n You may have better luck posting this in <#{room_id}|{room_name}>\"\n response_msg = response_text.format(\n trigger_string=trigger_string,\n room_id=room_id,\n room_name=room_name\n )\n dm_user(server, msg.get('user'), response_msg)",
"async def ponyr(self, *text):\n if len(text) > 0:\n if len(text[0]) > 1 and len(text[0]) < 20:\n try:\n msg = \"+\".join(text)\n search = \"https://derpiboo.ru/search.json?q=\" + msg + \"&random_image=y\" \n async with aiohttp.get(search) as r:\n result = await r.json()\n if \"id\" in result:\n imgid = str(result[\"id\"])\n async with aiohttp.get(\"https://derpiboo.ru/images/\" + imgid + \".json\") as r:\n result = await r.json()\n url = \"http:\" + result[\"image\"]\n await self.bot.say(url)\n else:\n await self.bot.say(\"Your search terms gave no results.\")\n except:\n await self.bot.say(\"Error.\")\n else:\n await self.bot.say(\"Invalid search.\")\n else:\n async with aiohttp.get(\"https://derpiboo.ru/search.json?q=*&random_image=y\") as r:\n result = await r.json()\n imgid = str(result[\"id\"])\n async with aiohttp.get(\"https://derpiboo.ru/images/\" + imgid + \".json\") as r:\n result = await r.json()\n url = result[\"image\"]\n await self.bot.say(\"http:\" + url )",
"async def on_message(self, msg: Message):\r\n from_contact = msg.talker()\r\n text = msg.text()\r\n type = msg.type()\r\n room = msg.room()\r\n #\r\n username = from_contact.name\r\n if username=='KFu':\r\n print('message from myself')\r\n return\r\n # 不处理群消息\r\n # if room is None:\r\n if msg.type() == Message.Type.MESSAGE_TYPE_IMAGE:\r\n\r\n print('__image')\r\n image_file_box = await msg.to_file_box()\r\n filename='p'+str(time.time())+'.jpg'\r\n\r\n await image_file_box.to_file(file_path=filename,overwrite=True)\r\n inputdata=\"#pic#\"+filename\r\n bot = self.bm.run(username, inputdata)\r\n if bot is not None:\r\n # print('bot',bot)\r\n # print('bot replys',bot.replys[-1])\r\n # print('bot.replys_index',bot.replys_index)\r\n for i in range(bot.replys_index):\r\n bot, rdict = self.tm.run(bot)\r\n print('rdict',rdict)\r\n\r\n if len(list(rdict.keys()))==0:continue\r\n if list(rdict.keys())[0] == \"str\":\r\n print('reply str')\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n print('ready')\r\n await conversation.ready()\r\n print(list(rdict.values())[0])\r\n await conversation.say(list(rdict.values())[0])\r\n elif list(rdict.keys())[0] == \"pic\" or 'mov':\r\n print('reply pic/mov')\r\n\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n\r\n await conversation.ready()\r\n try:\r\n file_box = FileBox.from_file(list(rdict.values())[0])\r\n except Exception as e:\r\n print('file box error',e)\r\n file_box='嗯嗯'\r\n await conversation.say(file_box)\r\n\r\n elif msg.type() == Message.Type.MESSAGE_TYPE_TEXT:\r\n inputdata = \"#str#\" + msg.text()\r\n print('————text')\r\n\r\n bot = self.bm.run(username, inputdata)\r\n if bot is not None:\r\n # print('bot', bot)\r\n # print('bot replys',bot.replys[-1])\r\n # print('bot.replys_index',bot.replys_index)\r\n for i in range(bot.replys_index):\r\n bot, rdict = self.tm.run(bot)\r\n print('rdict',rdict)\r\n if len(list(rdict.keys()))==0:continue\r\n if list(rdict.keys())[0] == \"str\":\r\n print('reply str')\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n\r\n await conversation.ready()\r\n print('rdict[splitNum:]',list(rdict.values())[0])\r\n await conversation.say(list(rdict.values())[0])\r\n elif list(rdict.keys())[0] == \"pic\" or 'mov':\r\n print('reply pic/mov')\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n\r\n await conversation.ready()\r\n try:\r\n file_box = FileBox.from_file(list(rdict.values())[0])\r\n except Exception as e:\r\n print('file box error',e)\r\n file_box='嗯嗯'\r\n await conversation.say(file_box)\r\n else:\r\n print('__new for dict')\r\n conversation: Union[\r\n Room, Contact] = from_contact if room is None else room\r\n await conversation.ready()\r\n await conversation.say('暂时不支持这种类型的消息哦')",
"def handle_text_messages(self, update, context):\n\n # Split user input into single words\n words = set(update.message.text.lower().split())\n logging.debug(f'Received message: {update.message.text}')\n\n # For debugging: Log users that received something from bot\n chat_user_client = update.message.from_user.username\n if chat_user_client == None:\n chat_user_client = update.message.chat_id\n\n\n # Possibility: received command from menu_trigger\n for Trigger in self.menu_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.show_menu(update, context)\n logging.info(f'{chat_user_client} checked out the menu!')\n\n return\n\n\n # Possibility: received command from loan_stats_trigger\n for Trigger in self.loan_stats_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n #self.send_textfile('under_construction.txt', update, context)\n self.show_loan_stats(update, context)\n self.send_signature(update, context)\n logging.info(f'{chat_user_client} got loan stats!')\n\n return\n\n # Possibility: received command from il_trigger\n for Trigger in self.il_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.show_il(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get IL info!')\n\n return\n\n # Possibility: received command from assets_trigger\n for Trigger in self.assets_trigger:\n for word in words:\n if word.startswith(Trigger):\n\n self.send_textfile('under_construction.txt', update, context)\n #self.self.show_assets(update, context)\n #self.send_signature(update, context)\n logging.info(f'{chat_user_client} tried to get asset info!')\n\n return",
"def text(message):\n room = session.get('room')\n name = session.get('name')\n msg = message['msg']\n\n if name == CONST_NAME0:\n handleText(msg, CONST_ID0, room)\n elif name == CONST_NAME1:\n handleText(msg, CONST_ID1, room)\n elif name == CONST_NAME2:\n handleText(msg, CONST_ID2, room)\n else:\n emit('message', {'msg': session.get('name') + ': User doesn\\'t exist!'}, room=room)",
"def process_incoming_message(self):\n\n # Get the webhook data\n post_data = request.json\n\n # Determine the Spark Room to send reply to\n room_id = post_data[\"data\"][\"roomId\"]\n\n # Get the details about the message that was sent.\n message_id = post_data[\"data\"][\"id\"]\n message = self.spark.messages.get(message_id)\n if self.DEBUG:\n sys.stderr.write(\"Message content:\" + \"\\n\")\n sys.stderr.write(str(message) + \"\\n\")\n\n # First make sure not processing a message from the bots\n # Needed to avoid the bot talking to itself\n # We check using IDs instead of emails since the email\n # of the bot could change while the bot is running\n # for example from [email protected] to [email protected]\n if message.personId in self.spark.people.me().id:\n if self.DEBUG:\n sys.stderr.write(\"Ignoring message from our self\" + \"\\n\")\n return \"\"\n\n # Log details on message\n sys.stderr.write(\"Message from: \" + message.personEmail + \"\\n\")\n\n # Find the command that was sent, if any\n command = \"\"\n for c in self.commands.items():\n if message.text.find(c[0]) != -1:\n command = c[0]\n sys.stderr.write(\"Found command: \" + command + \"\\n\")\n # If a command was found, stop looking for others\n break\n\n # Build the reply to the user\n reply = \"\"\n\n # Take action based on command\n # If no command found, send the default_action\n if command in [\"\"] and self.default_action:\n # noinspection PyCallingNonCallable\n reply = self.commands[self.default_action][\"callback\"](message)\n elif command in self.commands.keys():\n # noinspection PyCallingNonCallable\n reply = self.commands[command][\"callback\"](message)\n else:\n pass\n\n # allow command handlers to craft their own Spark message\n if reply and isinstance(reply, Response):\n reply.roomId = room_id\n reply = reply.as_dict()\n self.spark.messages.create(**reply)\n reply = \"ok\"\n elif reply:\n self.spark.messages.create(roomId=room_id, markdown=reply)\n return reply",
"def handle(message):\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)",
"def on_bot_message():\n handle_bot_message(request.get_json())\n return \"ok\"",
"async def bingadult(self, ctx, *, text):\n settings = loadauth()\n channel = ctx.message.channel\n server = ctx.message.server\n operation = 'adultimagesearch'\n check = self.checkadult(server, channel, settings)\n if check is False:\n return await self.bot.say(\"Usage of %bingadult is disabled in \" +\n \"this server and/or channel.\")\n if settings['apikey'] == '' or settings['apikey'] == 'blank':\n return await self.bot.say(\"Missing or incorrect API key. Please \" +\n \"contact the owner to add an API key.\")\n apikey = settings['apikey']\n text, limit = self.limitget(text)\n result = self.getfrombing(apikey, text, limit, operation)\n bottext = self.obtainresult(result, operation)\n return await self.bot.say(bottext)",
"async def on_message(message):\n if message.author.name == client.user.name:\n return\n text = message.content\n pattern = re.compile(r'(?<!/)\\br/([A-Z]|[a-z]|[0-9])+([A-Z]|[a-z]|[0-9]|_)*')\n match = pattern.search(text)\n subreddit = ''\n if match is None:\n print('No subreddit pattern provided in message text: {}.'.format(text))\n else:\n subreddit = match.group(0)\n\n outgoing_message = None\n if text.startswith('!rbot'):\n words = text.split(' ')\n if len(words) < 2 or not rbot_commands.Commands.commands.get(words[1]):\n outgoing_message = show_help(message, rbot_commands.Commands.commands)\n else:\n outgoing_message = rbot_commands.Commands.commands[words[1]].do(message, subreddit)\n elif subreddit:\n outgoing_message = 'https://www.reddit.com/{}'.format(subreddit)\n else:\n print('nothing to do for message: {}'.format(text))\n\n if outgoing_message:\n print('outgoing message: {}'.format(outgoing_message))\n await client.send_message(message.channel, outgoing_message)\n else:\n print('Invalid message result.')",
"def call_chatbot():\n\t# prase the given data\n\tdata = flask.json.loads(flask.request.data.decode('utf-8'))\n\tuse_tts, text, current_id= data[\"useTTS\"], data[\"text\"], data[\"id\"]\n\tapp.logger.info(\"User Query: \"+ text)\n\t\n\tresult = []\n\t# call rasa: some requests get lost, loop till you get a response\n\twhile(True):\n\t\ttry:\n\t\t\t# rasa rest API\n\t\t\turl = \"http://localhost:5005/webhooks/rest/webhook\"\n\t\t\t# rasa request must have a \"sender\" and \"message\" keys\n\t\t\tres = requests.post(\n\t\t\t\t\turl=url,\n\t\t\t\t\tdata=flask.json.dumps({\"sender\": \"Rasa\", \"message\": text}),\n\t\t\t\t\ttimeout=5).json()\n\t\texcept:\n\t\t\t# mimic the rasa response when something wrong happens\n\t\t\tres = [{'recipient_id': 'Rasa',\n\t\t\t\t\t'text': \"[SOMETHING WENT WRONG!!]\"}]\n\t\tif res: break\n\tapp.logger.info(\"Rasa Response:\")\n\tapp.logger.info(\"\\t\"+ str(res))\n\tfor item in res:\n\t\td = {}\n\t\tcurrent_id += 1\n\t\td[\"id\"] = current_id\n\t\td[\"type\"] = \"text\" if \"text\" in item.keys() else \"image\"\n\t\td[\"body\"] = item[d[\"type\"]]\n\t\tif use_tts and d[\"type\"] == \"text\":\n\t\t\ttic = time.time()\n\t\t\twav, sr = tts_model.synthesize(d[\"body\"])\n\t\t\tduration = len(wav) / sr\n\t\t\td[\"snd\"] = {\n\t\t\t\t\"audio\": wav.tolist(),\n\t\t\t\t\"sample_rate\": sr,\n\t\t\t}\n\t\t\ttoc = time.time()\n\t\t\tapp.logger.info( \"TTS Duration: {} seconds\".format(toc-tic) )\n\t\tresult.append(d)\n\n\t# get back the result\n\tflask_response = app.response_class(response=flask.json.dumps(result),\n\t\t\t\t\t\t\t\t\t\tstatus=200,\n\t\t\t\t\t\t\t\t\t\tmimetype='application/json' )\n\treturn flask_response",
"def text_message(self, update, context):\n # check mode\n if self.adding_meals:\n # text from the message is retrieved\n typed_meal = update.message.text\n # we get the instance from the meal list. It might be None\n meal = self.meal_list.get(typed_meal)\n try:\n # might produce an AttributeError if ingridients is None\n # every ingridient in the meal is checked\n for ingridient in meal.ingridients:\n # if it's already in self.list the quantity increases\n if ingridient.name in self.list.keys():\n self.list[ingridient.name][1] += 1\n else:\n # the instance is added to the list\n self.list[ingridient.name] = [ingridient, 1]\n # the list is transformed to text\n to_write = functions.list_to_text(sorted(self.list.values(),\n key=lambda x: x[0].category))\n except AttributeError:\n to_write = MESSAGES[\"meal_error\"]\n # message is send\n self.send_message(update, context, to_write)\n # check mode\n elif self.adding_ingridients:\n # text from the message is retrieved\n typed_ingridient = update.message.text\n # we get the instance from the ingridients list. It might be None\n ingridient = self.ingridients.get(typed_ingridient)\n try:\n # might produce an AttributeError if ingridients is None\n # if it's already in self.list the quantity increases\n if ingridient.name in self.list.keys():\n self.list[ingridient.name][1] += 1\n else:\n # the instance is added to the list\n self.list[ingridient.name] = [ingridient, 1]\n # the list is transformed to text\n to_write = functions.list_to_text(sorted(self.list.values(),\n key=lambda x: x[0].category))\n except AttributeError:\n to_write = MESSAGES[\"add_ingridient_error\"]\n # message is send\n self.send_message(update, context, to_write)\n # check mode\n elif self.removing_ingridients:\n # text from the message is retrieved\n typed_ingridient = update.message.text\n try:\n # might produce a KeyError if typed_meal is not in self.list\n # decreases amounot of the ingridient\n self.list[typed_ingridient][1] -= 1\n # remove igridient from list when the quantity is 0\n if self.list[typed_ingridient][1] == 0:\n del self.list[typed_ingridient]\n # the list is transformed to text\n to_write = functions.list_to_text(sorted(self.list.values(),\n key=lambda x: x[0].category))\n except KeyError:\n to_write = MESSAGES[\"remove_ingridient_error\"]\n # message is send\n self.keyboard = \"remove_ingridients\"\n self.send_message(update, context, to_write)",
"def handle_text_message(event: MessageEvent):\n\n line_bot_api = LineBotApi(LINE_CHANNEL_TOKEN)\n line_bot_api.reply_message(\n event.reply_token, TextSendMessage(event.message.text)\n )",
"async def bing(self, *, text):\n settings = loadauth()\n operation = 'moderateimagesearch'\n if settings['apikey'] == '' or settings['apikey'] == 'blank':\n return await self.bot.say(\"Missing or incorrect API key. Please \" +\n \"contact the owner to add an API key.\")\n apikey = settings['apikey']\n text, limit = self.limitget(text)\n result = self.getfrombing(apikey, text, limit, operation)\n bottext = self.obtainresult(result, operation)\n return await self.bot.say(bottext)",
"def incoming(request):\n message = TropoIncomingMessage(\n sender=request.REQUEST.get('sender'),\n text=str(request.REQUEST.get('text')),\n received=datetime.now())\n \n return process_message(message)",
"def handle_message(event):\n intention = parse_intention(event.message.text)\n if intention == config.QUERY_INTENTION:\n handle_query_weather_message(event)\n elif intention == config.SUBSCRIBE_INTENTION:\n handle_subscribe_message(event)\n else:\n handle_unknown_message(event)",
"async def handle_message(self, message_object):\n\n regex_result_list = re.findall(\n r'((?<=http)((?<=s)|)://|)((?<=www.)|)((?<=ex)|(?<=e-))hentai.org/g/([0-9]+)/([0-9a-f]{10})(/|)',\n message_object.content)\n regex_result_list_unique = (tuple(self.return_unique_set(regex_result_list)))\n\n for link_tuple in regex_result_list_unique:\n\n # Setting the 2nd last and last tuple value of the RegEx to properly named variables.\n # the regex_result_list contains of a tuple with the different substrings in it\n # link_tuple[0] is the whole string, link_tuple[n] are the regex matches\n\n gallery_id = link_tuple[4]\n gallery_token = link_tuple[5]\n\n # create json from POST-response using requests built-in parser\n json_data = requests.post(api_url, self.build_payload(gallery_id, gallery_token),\n json_request_headers).json()\n\n if 'gmetadata' in json_data and json_data['gmetadata'][0].get('error') is None:\n em = discord.Embed(description=self.build_title_string(json_data) + \"\\n\" +\n self.build_title_jpn_string(json_data) + \"\\n\" +\n \"*\" + self.build_tag_section(json_data) + \"*\",\n colour=self.pm.clientWrap.get_color(self.name))\n em.set_image(url=json_data['gmetadata'][0]['thumb'])\n await self.pm.client.send_message(message_object.channel, \"\", embed=em)"
] | [
"0.63701046",
"0.6288669",
"0.61613667",
"0.6006372",
"0.59462607",
"0.5779944",
"0.57687557",
"0.57631946",
"0.57228655",
"0.5700433",
"0.5691575",
"0.56864244",
"0.5678104",
"0.5676385",
"0.5672712",
"0.5642777",
"0.561658",
"0.558971",
"0.55819124",
"0.55591977",
"0.5492036",
"0.54674494",
"0.5460551",
"0.5459816",
"0.5411868",
"0.5369022",
"0.5359423",
"0.5349594",
"0.53397316",
"0.53362876"
] | 0.67864186 | 0 |
Create a Frame Buffer Object (fbo) with the given size and color components | def create_fbo(ctx: moderngl.Context, size: Size, components: int = 4) -> moderngl.Framebuffer:
return ctx.framebuffer(
color_attachments=[ctx.renderbuffer(size, components)],
depth_attachment=ctx.depth_renderbuffer(size)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_fbo_head():\n lfb_prefix_path = osp.normpath(\n osp.join(osp.dirname(__file__), '../data/lfb'))\n\n st_feat_shape = (1, 16, 1, 8, 8)\n st_feat = generate_backbone_demo_inputs(st_feat_shape)\n rois = torch.randn(1, 5)\n rois[0][0] = 0\n img_metas = [dict(img_key='video_1, 930')]\n\n # non local fbo\n fbo_head = FBOHead(\n lfb_cfg=dict(\n lfb_prefix_path=lfb_prefix_path,\n max_num_sampled_feat=5,\n window_size=60,\n lfb_channels=16,\n dataset_modes=('unittest'),\n device='cpu'),\n fbo_cfg=dict(\n type='non_local',\n st_feat_channels=16,\n lt_feat_channels=16,\n latent_channels=8,\n num_st_feat=1,\n num_lt_feat=5 * 60,\n ))\n fbo_head.init_weights()\n out = fbo_head(st_feat, rois, img_metas)\n assert out.shape == (1, 24, 1, 1, 1)\n\n # avg fbo\n fbo_head = FBOHead(\n lfb_cfg=dict(\n lfb_prefix_path=lfb_prefix_path,\n max_num_sampled_feat=5,\n window_size=60,\n lfb_channels=16,\n dataset_modes=('unittest'),\n device='cpu'),\n fbo_cfg=dict(type='avg'))\n fbo_head.init_weights()\n out = fbo_head(st_feat, rois, img_metas)\n assert out.shape == (1, 32, 1, 1, 1)\n\n # max fbo\n fbo_head = FBOHead(\n lfb_cfg=dict(\n lfb_prefix_path=lfb_prefix_path,\n max_num_sampled_feat=5,\n window_size=60,\n lfb_channels=16,\n dataset_modes=('unittest'),\n device='cpu'),\n fbo_cfg=dict(type='max'))\n fbo_head.init_weights()\n out = fbo_head(st_feat, rois, img_metas)\n assert out.shape == (1, 32, 1, 1, 1)",
"def _make_buffer(self, width, height):\n fb_prop = p3d.FrameBufferProperties(p3d.FrameBufferProperties.get_default())\n fb_prop.set_multisamples(self._multisamples)\n fb_prop.set_srgb_color(self._srgb_color)\n\n self._buffer = self._engine.make_output(\n self._pipe, name=\"offscreen\", sort=0,\n fb_prop=p3d.FrameBufferProperties.get_default(),\n win_prop=p3d.WindowProperties(size=(width, height)),\n flags=p3d.GraphicsPipe.BFRefuseWindow)\n\n self._region = self._buffer.make_display_region()\n\n self._depth_tex = p3d.Texture()\n self._depth_tex.setFormat(p3d.Texture.FDepthComponent)\n self._buffer.addRenderTexture(\n self._depth_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPDepth)\n\n self._color_tex = p3d.Texture()\n self._color_tex.setFormat(p3d.Texture.FRgba8)\n self._buffer.addRenderTexture(\n self._color_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPColor)",
"def __init__(self, env, n_frames=4, dim_order='tensorflow'):\r\n super(FrameBuffer, self).__init__(env)\r\n self.dim_order = dim_order\r\n if dim_order == 'tensorflow':\r\n height, width, n_channels = env.observation_space.shape\r\n obs_shape = [height, width, n_channels * n_frames]\r\n elif dim_order == 'pytorch':\r\n n_channels, height, width = env.observation_space.shape\r\n obs_shape = [n_channels * n_frames, height, width]\r\n else:\r\n raise ValueError(\r\n 'dim_order should be \"tensorflow\" or \"pytorch\", got {}'.format(dim_order))\r\n self.observation_space = Box(0.0, 1.0, obs_shape)\r\n self.framebuffer = np.zeros(obs_shape, 'float32')",
"def face_maker(size=180, color='0x9C661F'):\n face = GOval(size, size-20)\n face.filled = True\n face.fill_color = color\n return face",
"def snapshot(self, components=4):\n fbo = self.fbo\n data = fbo.read(components=3)\n from PIL import Image\n return Image.frombytes('RGB', fbo.size, data).transpose(Image.FLIP_TOP_BOTTOM)",
"def FrameAddColorFrame(builder, colorFrame):\n return AddColorFrame(builder, colorFrame)",
"def _start(self):\r\n opengles.glBindFramebuffer(GL_FRAMEBUFFER, self.framebuffer[0])\r\n opengles.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,\r\n GL_TEXTURE_2D, self._tex.value, 0)\r\n #thanks to PeterO c.o. RPi forum for pointing out missing depth attchmnt\r\n opengles.glBindRenderbuffer(GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16,\r\n self.ix, self.iy)\r\n opengles.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,\r\n GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT)\r\n\r\n #assert opengles.glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE\r",
"def __enter__(self):\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)",
"def create_rectangle(width, height, color):\n data = [-width / 2, -height / 2,\n width / 2, -height / 2,\n width / 2, height / 2,\n -width / 2, height / 2]\n\n vbo_id = GL.GLuint()\n\n GL.glGenBuffers(1, ctypes.pointer(vbo_id))\n\n v2f = data\n data2 = (GL.GLfloat * len(v2f))(*v2f)\n\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, vbo_id)\n GL.glBufferData(GL.GL_ARRAY_BUFFER, ctypes.sizeof(data2), data2,\n GL.GL_STATIC_DRAW)\n\n shape = VertexBuffer(vbo_id, len(v2f) // 2, width, height, color)\n return shape",
"def fill(self, framebuf, color):\n rgb565_color = self.color_to_rgb565(color)\n for i in range(0, len(framebuf.buf), 2):\n framebuf.buf[i : i + 2] = rgb565_color",
"def __init__(self, size):\n self.__size = size\n\n self.__height, self.__width = self.__size\n self.__output_image = zeros(self.__width * self.__height * 4,\n dtype='f')\n\n self.__light = None\n self.__face = None\n self.__model_matrix = zeros((4, 4), dtype='f')\n self.__light_matrix = zeros((4, 4), dtype='f')\n\n self.__init_display()\n self.__enable_depth_test()\n\n glEnableClientState(GL_COLOR_ARRAY)\n glEnableClientState(GL_VERTEX_ARRAY)\n\n glClearColor(1., 1., 1., 0.)\n\n self.__sh = ShadersHelper(['face.vert', 'depth.vert'],\n ['face.frag', 'depth.frag'], 1, 2)\n\n glutDisplayFunc(self.__display)\n self.__callback = None\n\n self.__sh.add_attribute(0, self.__mean_face, 'mean_position')\n self.__sh.bind_buffer()\n self.__sh.use_shaders()\n self.__sh.link_texture('principal_components', 0)\n self.__sh.link_texture('depth_map', 1)\n self.__bind_pca_texture()\n self.__sh.bind_depth_texture(self.__size)",
"def put_color(self, _pos, _color):\n assert(((len(_pos) == 2) and (len(_color) == self.__resolution[2])) or\n ((len(_pos) == 3) and (len(_color) == 1)))\n self.__framebuffer[_pos] = _color",
"def define_gabor_fragment(frag_size):\n bg_value = 0\n\n # frag = np.ones(frag_size, dtype='uint8') * 255\n # frag[:, frag_size[0] // 2 - 2, :] = 0\n # frag[:, frag_size[0] // 2 - 1, :] = 0\n # frag[:, frag_size[0] // 2, :] = 0\n # frag[:, frag_size[0] // 2 + 1, :] = 0\n # frag[:, frag_size[0] // 2 + 2, :] = 0\n\n frag = np.array([\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255],\n [255, 255, 0, 0, 0, 255, 255]\n ])\n frag = np.stack([frag, frag, frag], axis=-1)\n\n # --------------------------------------------------------------\n plt.figure()\n plt.imshow(frag)\n plt.title(\"Specified Fragment\")\n import pdb\n pdb.set_trace()\n\n print(\"Finding Gabor Fit ...\")\n frag = (frag - frag.min()) / (frag.max() - frag.min())\n gabor_params_list = gabor_fits.find_best_fit_2d_gabor(frag, verbose=1)\n\n g_params = gabor_fits.convert_gabor_params_list_to_dict(gabor_params_list)\n g_params.print_params(g_params)\n\n fitted_gabor = gabor_fits.get_gabor_fragment(gabor_params, frag_size[:2])\n\n f, ax_arr = plt.subplots(1, 2)\n ax_arr[0].imshow(frag)\n ax_arr[0].set_title(\"Specified Fragment\")\n ax_arr[1].imshow(fitted_gabor)\n ax_arr[1].set_title(\"Generated Fragment\")\n\n return fitted_gabor, g_params, bg_value",
"def __init__(self, x_size: int, y_size: int, dtype=np.int8):\r\n self.fb = np.zeros(shape=(x_size, y_size), dtype=dtype)\r\n self.x_size = x_size\r\n self.y_size = y_size\r\n self.dtype = dtype",
"def create_buffers(\n flags, \n screen_shape,\n minimap_shape,\n player_shape, \n num_actions, \n max_num_spatial_args, \n max_num_categorical_args\n) -> Buffers:\n T = flags.unroll_length\n # specs is a dict of dict which containt the keys 'size' and 'dtype'\n specs = dict(\n screen_layers=dict(size=(T+1, *screen_shape), dtype=torch.float32), \n minimap_layers=dict(size=(T+1, *minimap_shape), dtype=torch.float32),\n player_state=dict(size=(T+1, player_shape), dtype=torch.float32), \n screen_layers_trg=dict(size=(T+1, *screen_shape), dtype=torch.float32), \n minimap_layers_trg=dict(size=(T+1, *minimap_shape), dtype=torch.float32),\n player_state_trg=dict(size=(T+1, player_shape), dtype=torch.float32), \n last_action=dict(size=(T+1,), dtype=torch.int64),\n action_mask=dict(size=(T+1, num_actions), dtype=torch.bool), \n reward=dict(size=(T+1,), dtype=torch.float32),\n done=dict(size=(T+1,), dtype=torch.bool),\n bootstrap=dict(size=(T+1,), dtype=torch.bool),\n episode_return=dict(size=(T+1,), dtype=torch.float32),\n episode_step=dict(size=(T+1,), dtype=torch.int32),\n log_prob=dict(size=(T+1,), dtype=torch.float32),\n main_action=dict(size=(T+1,), dtype=torch.int64), \n categorical_indexes=dict(size=(T+1, max_num_categorical_args), dtype=torch.int64),\n spatial_indexes=dict(size=(T+1, max_num_spatial_args), dtype=torch.int64),\n )\n buffers: Buffers = {key: [] for key in specs}\n for _ in range(flags.num_buffers):\n for key in buffers:\n buffers[key].append(torch.empty(**specs[key]).share_memory_())\n return buffers",
"def _init_buffers(self, v, n, _):\n super()._init_buffers(v, n, _)\n\n self.vbos.append(gl.glGenBuffers(1))\n\n # init VBO 2 - dynamic color data\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n loc = self.get_attribute_location(\"carried\")\n gl.glEnableVertexAttribArray(loc)\n gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0))\n gl.glVertexAttribDivisor(loc, 1)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)",
"def test_color__color_object_arg(self):\n color_args = (10, 20, 30, 40)\n color_obj = pygame.Color(*color_args)\n\n new_color_obj = pygame.Color(color_obj)\n\n self.assertIsInstance(new_color_obj, pygame.Color)\n self.assertEqual(new_color_obj, color_obj)\n self.assertEqual(new_color_obj.r, color_args[0])\n self.assertEqual(new_color_obj.g, color_args[1])\n self.assertEqual(new_color_obj.b, color_args[2])\n self.assertEqual(new_color_obj.a, color_args[3])",
"def bdev_uring_create(client, filename, name, block_size=None):\n params = {'name': name,\n 'filename': filename}\n\n if block_size:\n params['block_size'] = block_size\n\n return client.call('bdev_uring_create', params)",
"def __init__(self, color, size=10, spacing=1, flip=False, filled=True):\n super().__init__()\n self.size = size\n self.spacing = spacing\n self.color = mcolors.to_rgba(color)\n self.flip = flip\n self.filled = filled\n self._symbol_width = None",
"def __init__(self, camera=None, light=None, name=\"\",\r\n x=0.0, y=0.0, z=0.0,\r\n rx=0.0, ry=0.0, rz=0.0,\r\n sx=1.0, sy=1.0, sz=1.0,\r\n cx=0.0, cy=0.0, cz=0.0):\r\n super(MergeShape, self).__init__(camera, light, name, x, y, z,\r\n rx, ry, rz, sx, sy, sz, cx, cy, cz)\r\n\r\n if VERBOSE:\r\n print(\"Creating Merge Shape ...\")\r\n\r\n self.vertices = []\r\n self.normals = []\r\n self.tex_coords = []\r\n self.indices = [] #stores all indices for single render\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))",
"def new(mode, size, color=0):\r\n\r\n _check_size(size)\r\n\r\n if color is None:\r\n # don't initialize\r\n _im = Image()._new(mode, size)\r\n return Image(_im)\r\n\r\n if type(color).__name__ == \"str\":\r\n # css3-style specifier\r\n color = ImageColor().getcolor(color, mode)\r\n color = ImageDraw(None)._convert_bgr2rgb(color)\r\n\r\n _im = Image()._new(mode, size, color)\r\n return Image(_im)",
"def create_camera_blueprint(self, image_width: str, image_height: str) -> carla.ActorBlueprint:\n bp_library = self.carla_world.get_blueprint_library()\n blueprint = bp_library.find(\"sensor.camera.rgb\")\n blueprint.set_attribute('image_size_x', image_width)\n blueprint.set_attribute('image_size_y', image_height)\n\n blueprint.set_attribute('role_name', \"scene_camera\")\n blueprint.set_attribute('blur_amount', str(0.5))\n blueprint.set_attribute('motion_blur_intensity', str(0.225))\n blueprint.set_attribute('motion_blur_max_distortion', str(0.175))\n blueprint.set_attribute('motion_blur_min_object_screen_size', str(0.05))\n return blueprint",
"def ensure_size(self, device, size):\n if size != self.size:\n self.size = size\n usage = wgpu.TextureUsage.RENDER_ATTACHMENT | wgpu.TextureUsage.COPY_SRC\n if self.format.startswith((\"rgb\", \"bgr\")):\n usage |= wgpu.TextureUsage.TEXTURE_BINDING\n self.texture = device.create_texture(\n size=size, usage=usage, dimension=\"2d\", format=self.format\n )\n self.texture_view = self.texture.create_view()",
"def generate_frame(offset: int = 0, color: str = \"yellow\"):\n # Setup the canvas\n c = COLORS[color]\n canvas = Image.new(\"RGBA\", (768, 768), c[0])\n draw = ImageDraw.Draw(canvas)\n n = 5\n q = 360 / (2 * n)\n\n # Render each 'beam' of the sunbeam effect\n for i in range(n):\n startang = offset + 2 * (i - 1) * q\n endang = offset + (2 * (i - 1) + 1) * q\n draw.pieslice((0, 0, 768, 768), startang, endang, fill=c[1])\n\n # Crop to the center 512x\n return canvas.crop((128, 128, 640, 640))",
"def make_surface(\n width: NumberType,\n height: NumberType,\n alpha: bool = False,\n fill_color: Optional[ColorInputType] = None\n) -> 'pygame.Surface':\n assert isinstance(width, NumberInstance)\n assert isinstance(height, NumberInstance)\n assert isinstance(alpha, bool)\n assert width >= 0 and height >= 0, \\\n 'surface width and height must be equal or greater than zero'\n surface = pygame.Surface((int(width), int(height)), pygame.SRCALPHA, 32) # lgtm [py/call/wrong-arguments]\n if alpha:\n # noinspection PyArgumentList\n surface = pygame.Surface.convert_alpha(surface)\n if fill_color is not None:\n fill_color = assert_color(fill_color)\n surface.fill(fill_color)\n return surface",
"def __init__(self, name):\r\n super(OffScreenTexture, self).__init__(name)\r\n from pi3d.Display import Display\r\n self.ix, self.iy = Display.INSTANCE.width, Display.INSTANCE.height\r\n self.im = Image.new(\"RGBA\",(self.ix, self.iy))\r\n self.image = self.im.convert(\"RGBA\").tostring('raw', \"RGBA\")\r\n self.alpha = True\r\n self.blend = False\r\n\r\n self._tex = ctypes.c_int()\r\n self.framebuffer = (ctypes.c_int * 1)()\r\n opengles.glGenFramebuffers(1, self.framebuffer)\r\n self.depthbuffer = (ctypes.c_int * 1)()\r\n opengles.glGenRenderbuffers(1, self.depthbuffer)",
"def render_frame_color(self, sim: Simulator) -> np.array:\n (w, h) = sim.get_frame_size()\n rgba = 4\n size = h * w * rgba\n frame = bytearray(size)\n self.get_state().render_into_buffer(frame, True)\n return np.asarray(frame, dtype=np.uint8).reshape(h, w, rgba)",
"def doBlur(*args, colorFile: AnyStr=\"\", length: float=0.0, memCapSize: float=0.0, sharpness:\n float=0.0, smooth: float=0.0, smoothColor: bool=True, vectorFile: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass",
"def fill(framebuf, color):\n if color:\n fill = 0xFF\n else:\n fill = 0x00\n for i in range(len(framebuf.buf)): # pylint: disable=consider-using-enumerate\n framebuf.buf[i] = fill",
"def fill(framebuf, color):\n if color:\n fill = 0xFF\n else:\n fill = 0x00\n for i in range(len(framebuf.buf)): # pylint: disable=consider-using-enumerate\n framebuf.buf[i] = fill"
] | [
"0.5801889",
"0.5702869",
"0.5541218",
"0.5540134",
"0.53373677",
"0.5206853",
"0.52051324",
"0.51887405",
"0.5129463",
"0.5122263",
"0.5088751",
"0.50370806",
"0.5023039",
"0.50155634",
"0.49956527",
"0.49854746",
"0.49744597",
"0.49742502",
"0.4924055",
"0.49196365",
"0.49181545",
"0.49169278",
"0.49027008",
"0.48602638",
"0.4843578",
"0.48185492",
"0.4817611",
"0.4809872",
"0.4806997",
"0.4806997"
] | 0.87300503 | 0 |
Returns current fbo as an ndarray | def snapshot2(self) -> np.array:
fbo = self.fbo
data = fbo.read(components=3, dtype='f4')
w, h = self.size
return np.flipud(np.frombuffer(data, dtype='f4').reshape((h, w, 3))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_numpy_array(self):\n return self.frame",
"def buffer(self) -> np.ndarray:\n return np.array(self._image_data, copy=False)",
"def array(self):\n return self.get_array()",
"def __array__(self):\n return np.asarray(self.data)",
"def to_numpy(self) -> np.ndarray:\n return self.frame",
"def array(self):\n return np.asarray(self)",
"def get_fre_band_arr(self):\n\n fn = self._lib['cwtObj_getFreBandArr']\n fn.argtypes = [POINTER(OpaqueCWT)]\n fn.restype = c_void_p\n p = fn(self._obj)\n ret = np.frombuffer((c_float * self.num).from_address(p), np.float32).copy()\n return ret",
"def __array__(self):\n return self.to_array()",
"def __array__(self):\n return self.array",
"def getBufferedData(self):\n if not self.ringBuffer: # first time when buffer is empty\n return np.zeros((1, self.windowLength, self.sensorChannels)) \n return np.array(self.ringBuffer)",
"def read(self) -> np.ndarray:\n return self[self._head]",
"def fobj(self):\n return self._fobj",
"def numpy(self):\n return self.data",
"def get_array(self) -> numpy.array:\r\n \r\n return self.pic_array",
"def get_bfst(self):\n return array(self.LIST2,int)",
"def get_fx(self):\n return self.fx[:self.nump, :]",
"def to_fits_array(self):\n return self.data",
"def get_array(self):\n return numpy.array(self._ar)",
"def __call__(self):\n return self.array",
"def arr(self):\n return self._arr",
"def get(self):\r\n return self.data_array",
"def data_array(self):\n return self._data_array",
"def vbo( self, mode ):\n uploaded = mode.cache.getData( self, 'buffer' )\n if uploaded is None:\n uploaded = vbo.VBO( \n self.buffer, \n usage=self.gl_usage(), \n target=self.gl_target(),\n ) # TODO: stream type\n holder = mode.cache.holder( self, uploaded, 'buffer' )\n holder.depend( self, 'buffer' )\n return uploaded",
"def getArray2d(self):\n\t\treturn self.array2d",
"def to_array(self):\n return np.array(self.to_image())",
"def array(self):",
"def asarray(self):\n from numpy import asarray\n return asarray(self)",
"def data(self) -> np.ndarray:\n return self._data",
"def get_array(self,vname):\n v=self.f.variables[vname]\n v=v[self.istep,:,:]\n v=pylab.flipud(v)\n return v",
"def pc_output_buffers_full_var(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.beamformer_sptr_pc_output_buffers_full_var(self, *args)"
] | [
"0.6436365",
"0.6390906",
"0.63370705",
"0.63307345",
"0.6315179",
"0.63022697",
"0.6293401",
"0.6287471",
"0.62845415",
"0.6267776",
"0.62338114",
"0.62107676",
"0.6197706",
"0.61791164",
"0.6142685",
"0.60981643",
"0.6054168",
"0.6038342",
"0.6030088",
"0.60291946",
"0.60286266",
"0.60162175",
"0.59991795",
"0.59402907",
"0.5932502",
"0.5922387",
"0.58866477",
"0.5883718",
"0.5847052",
"0.58452594"
] | 0.6831023 | 0 |
Create a monitor by connecting to the kernel daemon through netlink. | def from_netlink(cls, context, source='udev'):
if source not in ('kernel', 'udev'):
raise ValueError('Invalid source: {0!r}. Must be one of "udev" '
'or "kernel"'.format(source))
monitor = context._libudev.udev_monitor_new_from_netlink(
context, ensure_byte_string(source))
if not monitor:
raise EnvironmentError('Could not create udev monitor')
return cls(context, monitor) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __initializeMonitor( self ):\n if self.__moduleProperties[ 'standalone' ]:\n self.monitor = gMonitor\n else:\n self.monitor = MonitoringClient()\n self.monitor.setComponentType( self.monitor.COMPONENT_AGENT )\n self.monitor.setComponentName( self.__moduleProperties[ 'fullName' ] )\n self.monitor.initialize()\n self.monitor.registerActivity( 'CPU', \"CPU Usage\", 'Framework', \"CPU,%\", self.monitor.OP_MEAN, 600 )\n self.monitor.registerActivity( 'MEM', \"Memory Usage\", 'Framework', 'Memory,MB', self.monitor.OP_MEAN, 600 )\n # Component monitor\n for field in ( 'version', 'DIRACVersion', 'description', 'platform' ):\n self.monitor.setComponentExtraParam( field, self.__codeProperties[ field ] )\n self.monitor.setComponentExtraParam( 'startTime', Time.dateTime() )\n self.monitor.setComponentExtraParam( 'cycles', 0 )\n self.monitor.disable()\n self.__monitorLastStatsUpdate = time.time()",
"async def _async_start_monitor(self) -> None:\n if not sys.platform.startswith(\"linux\"):\n return\n info = await system_info.async_get_system_info(self.hass)\n if info.get(\"docker\"):\n return\n\n from pyudev import ( # pylint: disable=import-outside-toplevel\n Context,\n Monitor,\n MonitorObserver,\n )\n\n try:\n context = Context()\n except (ImportError, OSError):\n return\n\n monitor = Monitor.from_netlink(context)\n try:\n monitor.filter_by(subsystem=\"tty\")\n except ValueError as ex: # this fails on WSL\n _LOGGER.debug(\n \"Unable to setup pyudev filtering; This is expected on WSL: %s\", ex\n )\n return\n observer = MonitorObserver(\n monitor, callback=self._device_discovered, name=\"usb-observer\"\n )\n observer.start()\n\n def _stop_observer(event: Event) -> None:\n observer.stop()\n\n self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_observer)\n self.observer_active = True",
"def _start_monitor(instance=\"default\"):\n global logger_ic\n logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance)\n logger_ic.info(\"ipmi-console monitor thread starts to run.\")\n monitor_thread = threading.Thread(target=monitor, args=(instance,))\n monitor_thread.setDaemon(True)\n monitor_thread.start()",
"def launch(**kwargs):\n\n logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)\n config = load_config(kwargs['config'], logger=logger)\n\n\n ao_client = find_client(logger, clients, 'nidaqmx')\n ai_client = find_client(logger, clients, 'nidaqmx_ai')\n\n # Instantiate Monitor script\n laser_stabilizer = LaserStabilizer(\n config=kwargs['config'],\n ao_client=ao_client,\n ai_client=ai_client\n )\n\n update_service = Service()\n update_service.assign_module(module=laser_stabilizer)\n update_service.assign_logger(logger=logger)\n update_server, update_port = create_server(update_service, logger, host=get_ip())\n logger.update_data(data={'port': update_port})\n laser_stabilizer.gui.set_network_info(port=update_port)\n update_server.start()\n\n # Run continuously\n # Note that the actual operation inside run() can be paused using the update server\n while True:\n\n laser_stabilizer.run()",
"def set_monitor(w_card):\n\n # standard name for the monitor interfaces\n mon_id = \"mon{}\".format(w_card.phy)\n\n if mon_id not in pyw.winterfaces():\n # this monitor interface is not set\n # then create a new one\n m_card = pyw.devadd(w_card, mon_id, 'monitor')\n\n # remove obsolete interface\n pyw.devdel(w_card)\n\n return m_card\n\n return None",
"def initialize(self, process_monitor):\n self.process_monitor = process_monitor\n self.radvd = ra.DaemonMonitor(self.router_id,\n self.ns_name,\n process_monitor,\n self.get_internal_device_name)\n\n if self.router_namespace:\n self.router_namespace.create()",
"def create_health_monitor(self, body=None):\r\n return self.post(self.health_monitors_path, body=body)",
"def create_health_monitor(request, **kwargs):\n data = request.DATA\n\n conn = get_sdk_connection(request)\n health_mon = conn.load_balancer.create_health_monitor(\n type=data['monitor']['type'],\n delay=data['monitor']['delay'],\n timeout=data['monitor']['timeout'],\n max_retries=data['monitor']['max_retries'],\n max_retries_down=data['monitor']['max_retries_down'],\n pool_id=kwargs['pool_id'],\n http_method=data['monitor'].get('http_method'),\n url_path=data['monitor'].get('url_path'),\n expected_codes=data['monitor'].get('expected_codes'),\n admin_state_up=data['monitor'].get('admin_state_up'),\n name=data['monitor'].get('name')\n )\n\n return _get_sdk_object_dict(health_mon)",
"def createNet(self):\n\n sw = OVSKernelSwitch\n topo = G2Topo(self.config.topoData)\n ctrl = RemoteController('c', ip=REMOTE_CONTROLLER_IP, port=CONTROLLER_PORT)\n\n # Default link parameters.\n # HTB: Hierarchical Token Bucket rate limiter.\n spec = self.config.topoData['defaultLinkInfo']\n if spec:\n mybw = float(spec['bw'])\n mydelay = spec['delay']\n myloss = float(spec['loss'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss)\n if spec['max_queue_size'] != 'N/A' and spec['use_htb'] == 'N/A':\n myqueue = int(spec['max_queue_size'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, max_queue_size=myqueue)\n if spec['max_queue_size'] == 'N/A' and spec['use_htb'] != 'N/A':\n myhtb = bool(spec['use_htb'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, use_htb=myhtb)\n if spec['max_queue_size'] != 'N/A' and spec['use_htb'] != 'N/A':\n myqueue = int(spec['max_queue_size'])\n myhtb = bool(spec['use_htb'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, max_queue_size=myqueue, use_htb=myhtb)\n else:\n # No spec for default parameters, using Mininet defaults.\n info(\"**** [G2]: using Mininet default parameters for links other than those configured in link_info \\n\")\n link = TCLink\n\n # Configure bw, delay, loss, etc. for some links that are specified in config file.\n for spec in self.config.topoData['linkInfos']:\n src = spec['src']\n dst = spec['dst']\n try:\n linkInfo = topo.linkInfo(src, dst)\n if spec['bw'] != 'N/A':\n linkInfo['bw'] = float(spec['bw']) # Mbit\n if spec['delay'] != 'N/A':\n linkInfo['delay'] = spec['delay'] # ms\n if spec['loss'] != 'N/A':\n linkInfo['loss'] = float(spec['loss']) # Percentage\n if spec['max_queue_size'] != 'N/A':\n linkInfo['max_queue_size'] = int(spec['max_queue_size'])\n if spec['use_htb'] != 'N/A':\n linkInfo['use_htb'] = bool(spec['use_htb'])\n\n topo.setlinkInfo(src,dst,linkInfo)\n except KeyError:\n info(\"**** [G2]: no link exists between switch pair (%s, %s) \\n\" %(src, dst))\n\n # Assign a fraction of overall CPU time to Mininet hosts.\n nHosts = float(len(self.config.topoData['hosts']))\n cpuHostFrac = 0.50/nHosts\n # 'cpu' is the fraction of CPU that each host would get.\n # Indirectly, it sets 'cpu.cfs_quota_us': the total available run-time within a period (in microseconds).\n # Mininet uses the following scheme: cfs_quota_us = (cpuHostFrac * nCPU * period_us) microseconds.\n # 'period_us' sets cpu.cfs_period_us.\n # Larger period would allow for increased burst capacity.\n host = custom(CPULimitedHost, cpu=cpuHostFrac, period_us=100000)\n\n net = Mininet(topo=topo,\n host=host,\n switch=sw,\n controller=ctrl,\n waitConnected=True,\n autoStaticArp=True,\n link=link)\n\n # Create a default route for each host.\n # Turn on tcpdump on each host if debug mode is on.\n for hs in topo.hosts():\n net.getNodeByName(hs).setDefaultRoute(intf='%s-eth0' %hs) # 1st interface on hosts is hi-eth0\n if self.config.isDebug:\n net.getNodeByName(hs).cmd('tcpdump -w %s.pcap -i %s-eth0 &' %(hs,hs))\n return net",
"def test_monitor_creation(processor, measure, dialog_sleep):\n def run(measure):\n t = Thread(target=processor._start_monitors, args=(measure,))\n t.start()\n while t.is_alive():\n process_app_events()\n sleep(0.001)\n process_app_events()\n sleep(dialog_sleep)\n\n processor.engine = processor.plugin.create('engine', 'dummy')\n\n measure.add_tool('monitor', 'dummy')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 1\n\n measure.add_tool('monitor', 'dummy2')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n measure.remove_tool('monitor', 'dummy2')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 1\n\n measure.add_tool('monitor', 'dummy3')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n measure.add_tool('monitor', 'dummy4')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n processor.plugin.stop()\n assert not processor.monitors_window",
"def create_healthmonitor(self, context, healthmonitor):\n LOG.info(\"Received request 'Create Pool Health Monitor' for\"\n \"Health monitor:%(hm)s\",\n {'hm': healthmonitor['id']})\n arg_dict = {'context': context,\n lb_const.HEALTHMONITOR: healthmonitor\n }\n self._send_event(lb_const.EVENT_CREATE_HEALTH_MONITOR_V2,\n arg_dict, serialize=True,\n binding_key=healthmonitor[lb_const.POOL][\n 'loadbalancer_id'],\n key=healthmonitor['id'])",
"def startup(self):\n for v in self.virt_nodes:\n v.create()\n \n \"\"\" scan for nodes \"\"\"\n self.scan_for_nodes()\n \n \"\"\" connect to all nodes and call setup \"\"\"\n for n in self.scan_nodes:\n n.connect()\n \n ''' list of open addresses for the node '''\n oalist = []\n \n ''' if the multicast interface is defined use it as open address '''\n if self.mcast_interface != \"\":\n oalist.append(self.mcast_interface)\n \n ''' open the connection to the default address of the slave '''\n oalist.append(socket.gethostbyname(socket.gethostname()))\n \n ''' read the monitor node list '''\n monitor_list = open(os.path.join(self.workdir, \"monitor-nodes.txt\"), \"r\")\n for maddress in monitor_list.readlines():\n oalist.append(maddress.strip())\n \n ''' call the setup procedure '''\n n.setup(oalist)",
"def __init__(self):\r\n self.client_socket = socket.socket() # the socket of the client.\r\n self.communicator = Communicator()\r\n self.events_handler = EventsHandler(self.client_socket)\r\n self.running = True\r\n self.display_resolution = DEFAULT_DISPLAY_RESOLUTION\r\n self.screen = self.get_display()",
"def set_monitor_mode(controller_name):\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"down\"])\n subprocess.run([\"airmon-ng\", \"check\", \"kill\"])\n subprocess.run([\"iw\", wifi_name, \"set\", \"monitor\", \"none\"])\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"up\"])",
"def launch ():\n def start_switch (event):\n log.info(\"switch %s has come up\" % event.dpid)\n log.info(event.connection.ports)\n sw = switches_by_dpid.get(event.dpid)\n\n if sw is None:\n # New switch\n sw = TopoSwitch(event.connection)\n switches_by_dpid[event.dpid] = sw\n sw.connect(event.connection)\n else:\n sw.connect(event.connection)\n core.openflow.addListenerByName(\"ConnectionUp\", start_switch)",
"def launch():\n\n core.openflow.addListenerByName(\"ConnectionUp\", _handle_ConnectionUp)\n log.info(\"Hub running\")",
"def start_nanny():\n global listener\n log.info(\"%s %s.\", settings.MUD_NAME_FULL, __version__)\n listener = TelnetServer(address=settings.BIND_ADDRESS,\n port=settings.BIND_PORT,\n timeout=0,\n create_client=False)\n channels.subscribe(**{\"server-reload-request\": _handle_reload_request})\n server = ServerProcess()\n listener.on_connect = _on_connect\n server.start()\n servers[server.pid] = server\n try:\n while True:\n dead_servers = []\n for server in servers.values():\n if not server.alive:\n log.debug(\"Process %s finished with code %s.\",\n server.pid, server.exit_code)\n dead_servers.append(server)\n for server in dead_servers:\n del servers[server.pid]\n if not servers:\n log.info(\"No servers running, goodbye.\")\n break\n listener.poll()\n channels.get_message()\n sleep(0.1)\n except KeyboardInterrupt: # pragma: no cover\n pass\n finally:\n listener.stop()\n channels.unsubscribe() # pragma: no cover",
"def open_ssh_tunnel(log, config, server, ssh_port=622, timeout=5, ipc_wait_file=5):\n\n user = config[\"user\"]\n\n # server must be visable for now\n try:\n server_info = socket.gethostbyaddr(server)\n except socket.herror:\n raise TunnelError(\"host %s is inaccessible\" % server)\n except socket.gaierror as e:\n raise TunnelError(str(e))\n\n # make sure the kernel isn't on localhost\n if server_info[0] == \"localhost\":\n log(\"kernel on localhost - nothing to do\")\n return\n\n # no gui password prompt\n env = os.environ.copy()\n env.pop(\"SSH_ASKPASS\", None)\n\n if try_ssh(log, server, ssh_port, env):\n mode = \"ssh\"\n elif try_mrsh(log, server, ssh_port, env):\n mode = \"mrsh\"\n else:\n raise TunnelError(\"Unable to connect, tried ssh and mrsh\")\n\n protocol = config[\"protocol\"]\n\n # remote (r) ports are the ports for the machine hosting the kernel\n if protocol == \"ipc\":\n rport = config[\"uds\"]\n lport = \"%s-%s\" % (rport, localhost)\n config[\"uds\"] = lport\n elif protocol == \"tcp\":\n rport = config[\"port\"]\n lport = select_random_ports(1)[0]\n config[\"port\"] = lport\n else:\n raise TunnelError(\"Unsupported protocol %s\" % protocol)\n\n log(\"attempting to create tunnels from %s@%s to %s@%s\" % (protocol, localhost,\n protocol, server))\n\n ssh_tunnel(log, mode, ltransport=protocol, lport=lport,\n rtransport=protocol, rport=rport,\n server=server, user=user, ssh_port=ssh_port)\n\n if protocol == \"ipc\":\n while not os.path.exists(lport) and ipc_wait_file > 0:\n log(\"waiting for local ipc socket - %d\" % ipc_wait_file)\n time.sleep(1)\n ipc_wait_file -= 1\n if not os.path.exists(lport):\n raise TunnelError(\"local ipc socket doesn't exist: %s\" % lport)\n elif protocol == \"tcp\":\n time.sleep(2)",
"def connect(self):\n self.snmp_client = SNMPClient(host=self.host,\n read_community=self.read_community,\n write_community=self.write_community,\n port=self.port,\n version=self.version,\n log=self.log)",
"def createMachine():\n cd('/')\n machine = create(machineName, 'UnixMachine')\n cd('Machines/'+machineName+'/NodeManager/'+machineName)\n cmo.setName(machineName)\n cmo.setListenAddress(hostname)",
"async def start_monitor(self):\n self._logger.info(\"Starting monitor...\")\n org1_admin = self.fabric_client.get_user(org_name='org1.example.com', name='Admin')\n\n self._logger.info(\"Starting monitor...\")\n cmd = \"/home/martijn/go/bin/go run \" \\\n \"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/fabric-cli.go event listenblock \" \\\n \"--cid mychannel --peer localhost:8001 \" \\\n \"--config /home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/config.yaml\"\n out_file = open(\"transactions.txt\", \"w\")\n my_env = os.environ.copy()\n my_env[\"GOPATH\"] = \"/home/martijn/gocode\"\n self.monitor_process = subprocess.Popen(cmd.split(\" \"), env=my_env, stdout=out_file,\n cwd=\"/home/martijn/fabric-examples/fabric-cli/cmd/fabric-cli/\")\n\n async def get_latest_block_num():\n self._logger.info(\"Getting latest block nr...\")\n response = await self.fabric_client.query_info(\n requestor=org1_admin,\n channel_name='mychannel',\n peers=['peer0.org1.example.com'],\n decode=True\n )\n print(response)\n\n latest_block = response.height\n if latest_block > self.latest_block_num:\n self._logger.info(\"Updating to block nr %d\", latest_block)\n old_latest_block_num = self.latest_block_num\n self.latest_block_num = latest_block\n confirm_time = int(round(time.time() * 1000))\n for confirmed_block_num in range(old_latest_block_num + 1, latest_block + 1):\n self.block_confirm_times[confirmed_block_num] = confirm_time\n\n self.monitor_lc = run_task(get_latest_block_num, interval=0.1)",
"def test_create_healthmonitor_with_mandatory_params(self):\r\n resource = 'health_monitor'\r\n cmd = healthmonitor.CreateHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n admin_state_up = False\r\n delay = '60'\r\n max_retries = '2'\r\n timeout = '10'\r\n type = 'TCP'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--admin-state-down',\r\n '--delay', delay,\r\n '--max-retries', max_retries,\r\n '--timeout', timeout,\r\n '--type', type,\r\n '--tenant-id', tenant_id]\r\n position_names = ['admin_state_up', 'delay', 'max_retries', 'timeout',\r\n 'type', 'tenant_id']\r\n position_values = [admin_state_up, delay, max_retries, timeout, type,\r\n tenant_id]\r\n self._test_create_resource(resource, cmd, '', my_id, args,\r\n position_names, position_values)",
"def attach(cls, monitor_schedule_name, sagemaker_session=None):\n sagemaker_session = sagemaker_session or Session()\n schedule_desc = sagemaker_session.describe_monitoring_schedule(\n monitoring_schedule_name=monitor_schedule_name\n )\n monitoring_type = schedule_desc[\"MonitoringScheduleConfig\"].get(\"MonitoringType\")\n if monitoring_type != cls.monitoring_type():\n raise TypeError(\n \"{} can only attach to ModelExplainability schedule.\".format(__class__.__name__)\n )\n job_definition_name = schedule_desc[\"MonitoringScheduleConfig\"][\n \"MonitoringJobDefinitionName\"\n ]\n job_desc = sagemaker_session.sagemaker_client.describe_model_explainability_job_definition(\n JobDefinitionName=job_definition_name\n )\n tags = sagemaker_session.list_tags(resource_arn=schedule_desc[\"MonitoringScheduleArn\"])\n return ClarifyModelMonitor._attach(\n clazz=cls,\n sagemaker_session=sagemaker_session,\n schedule_desc=schedule_desc,\n job_desc=job_desc,\n tags=tags,\n )",
"def connect():\n if not is_notebook():\n print('Python session is not running in a Notebook Kernel')\n return\n\n global _comm\n\n kernel = get_ipython().kernel\n kernel.comm_manager.register_target('tdb', handle_comm_opened)\n # initiate connection to frontend.\n _comm = Comm(target_name='tdb', data={})\n # bind recv handler\n _comm.on_msg(None)",
"def newMonitor(self, monitorName, monitorType):\n if monitorName in self._ShREEKMonitors.keys():\n msg = \"Tried to add Duplicate monitor:\\n\"\n msg += \"%s\\n\" % monitorName\n msg += \"To ShREEKInterface, existsing names:\\n\"\n msg += str(self._ShREEKMonitors.keys())\n raise ShREEKException(\n msg, ClassInstance = self,\n DuplicateName = monitorName,\n ExistingNames = self._ShREEKMonitors.keys())\n \n newMonitor = ShREEKMonitorCfg(MonitorName = monitorName,\n MonitorType = monitorType)\n \n self._ShREEKMonitors[monitorName] = newMonitor\n self._ShREEKConfig.addMonitorCfg(newMonitor)\n return",
"def pedrpc_connect(self):\n # If the process monitor is alive, set it's options\n if self.procmon:\n while 1:\n if self.procmon.alive():\n break\n\n time.sleep(1)\n\n # connection established.\n for key, value in self.procmon_options.items():\n getattr(self.procmon, 'set_{0}'.format(key))(value)\n\n # If the network monitor is alive, set it's options\n if self.netmon:\n while 1:\n if self.netmon.alive():\n break\n\n time.sleep(1)\n\n # connection established.\n for key in self.netmon_options.keys():\n eval('self.netmon.set_%s(self.netmon_options[\"%s\"])' % (key, key))",
"def test_initialize_mutornadmon(self, mutornadomon_mock):\n result = initialize_mutornadomon(sentinel.application,\n host_limit='test')\n monitor_inst = mutornadomon_mock.MuTornadoMon.return_value\n\n # initialize_mutornadomon() should return the monitor instance\n self.assertEqual(result, monitor_inst)\n\n # MuTornadoMon was created with monitor config values\n mutornadomon_mock.MuTornadoMon.assert_called_once_with(\n host_limit='test')\n\n # Monitor instance was registered with tornado application\n monitor_inst.register_application.assert_called_once_with(\n sentinel.application)",
"def __init__(self, dev_id, devname):\n enocean.EnOceanDevice.__init__(self)\n self.stype = \"listener\"\n self.dev_id = dev_id\n self.which = -1\n self.onoff = -1\n self.devname = devname",
"def monitor(instance=\"default\"):\n global logger_ic\n while True:\n try:\n with open(\"{}/{}/.{}-bmc.pid\".format(\n config.infrasim_home, instance, instance), \"r\") as f:\n pid = f.readline().strip()\n if not os.path.exists(\"/proc/{}\".format(pid)):\n logger_ic.warning(\"Node {} vBMC {} is not running, \"\n \"ipmi-console is ready to quit\".\n format(instance, pid))\n break\n time.sleep(3)\n except IOError:\n logger_ic.warning(\"Node {} workspace is possibly destroyed, \"\n \"ipmi-console is ready to quit\".format(instance))\n break\n stop(instance)",
"def event_monitor(self, event_monitor_path=\"\", action=\"start\"):\n device_name = self.find_iio_device_name()\n if not event_monitor_path:\n self.is_bin_exist(\"iio_event_monitor\", silent_discard=False)\n event_monitor_path = \"iio_event_monitor\"\n if action == \"start\":\n self.console.runcmd(\n f\"{event_monitor_path} {device_name} &\",\n err_msg=\"Event Monitor Initialisation Failed\",\n timeout=50,\n )\n elif action == \"stop\":\n self.console.runcmd(\n f\"pidof {event_monitor_path} {device_name}\", expected=\"\\r\\n\"\n )\n pid_no = self.console.output()\n if pid_no:\n self.console.runcmd(f\"kill -9 {pid_no}\")\n else:\n assert False, \"Not a valid action for event_monitor\""
] | [
"0.5842441",
"0.5641095",
"0.55806166",
"0.5491063",
"0.54298496",
"0.53146154",
"0.5299109",
"0.5246322",
"0.5216399",
"0.52025867",
"0.5193295",
"0.51505154",
"0.5142829",
"0.5134207",
"0.5082128",
"0.5077155",
"0.50586104",
"0.50409234",
"0.50360125",
"0.49993685",
"0.49617743",
"0.495448",
"0.4953593",
"0.4927135",
"0.4921466",
"0.49185333",
"0.49142042",
"0.49017078",
"0.48994836",
"0.48893386"
] | 0.629272 | 0 |
Connect to an arbitrary udev daemon using the given ``socket_path``. | def from_socket(cls, context, socket_path):
monitor = context._libudev.udev_monitor_new_from_socket(
context, ensure_byte_string(socket_path))
if not monitor:
raise EnvironmentError('Could not create monitor for socket: '
'{0!r}'.format(socket_path))
return cls(context, monitor, socket_path=socket_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect_socket(root_dir):\n # Get config directory where the daemon socket is located\n config_dir = os.path.join(root_dir, '.config/pueue')\n\n # Create Socket and exit with 1, if socket can't be created\n try:\n client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n socket_path = os.path.join(config_dir, 'pueue.sock')\n if os.path.exists(socket_path):\n client.connect(socket_path)\n else:\n print(\"Socket doesn't exist\")\n raise Exception\n except:\n print(\"Error connecting to socket. Make sure the daemon is running\")\n sys.exit(1)\n return client",
"def open_client_socket():\n\n client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n client_socket.bind(CLIENT_SOCKET_FILE)\n\n return client_socket",
"def unix_connect(path, service=VoidService, config={}):\n s = SocketStream.unix_connect(path)\n return connect_stream(s, service, config)",
"def __init__(self, sock_path):\n\n self.socket_path = sock_path\n self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)",
"def ConnectDevice(\n self, *args, **kwargs\n ): # pylint: disable=invalid-name, no-self-use\n raise socket_error",
"def bind_sock_to_device(sock, dev=''):\n # TODO: use IP_PKTINFO\n # TODO: move to sockios\n sock.setsockopt(socket.SOL_SOCKET, SO_BINDTODEVICE, dev)",
"def connect_device(uri):\n d = urlparse(uri)\n platform = d.scheme\n host = d.netloc\n uuid = d.path.lstrip(\"/\")\n params = dict(parse_qsl(d.query))\n if host:\n params[\"host\"] = host.split(\":\")\n dev = init_device(platform, uuid, **params)\n return dev",
"def make_socket(remote_host):\n port_src = 5556\n port_dest = 5557\n sock = socket(AF_INET, SOCK_DGRAM)\n sock.bind(('', port_src))\n sock.connect((remote_host, port_dest))\n return sock",
"def unix_socket(self, path: str) -> \"Socket\":\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"unixSocket\", _args)\n return Socket(_ctx)",
"def connectSocket(socketID, channel_name):\n log.debug('Connecting socket with id %d to channel %s', socketID, channel_name)\n socketID = ctypes.c_int(socketID)\n\n ifr = IFREQ()\n ifr.ifr_name = channel_name.encode('ascii')\n log.debug('calling ioctl SIOCGIFINDEX')\n # ifr.ifr_ifindex gets filled with that device's index\n libc.ioctl(socketID, SIOCGIFINDEX, ctypes.byref(ifr))\n log.info('ifr.ifr_ifindex: %d', ifr.ifr_ifindex)\n\n # select the CAN interface and bind the socket to it\n addr = SOCKADDR_CAN(AF_CAN, ifr.ifr_ifindex)\n\n error = libc.connect(socketID, ctypes.byref(addr), ctypes.sizeof(addr))\n\n if error < 0:\n log.error(\"Couldn't connect socket\")\n log.debug('connect returned: %d', error)\n\n return error",
"def _connect_socket(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((self.ip, self.port))\n print(\"Connected to %s at port %d\" % (self.ip, self.port))",
"def _create_socket():\n sock = socket.socket()\n return sock",
"def connect(self):\n\n log.info('Connecting to device \"{0}\" using {1} at \"{2}\".'.format(\n self.name, self.driver, self.connection_resource))\n\n if self.driver == drivers.pyvisa:\n try:\n if not (legacyVisa):\n rm = pyvisa.ResourceManager()\n self.device = rm.open_resource(**self.connection_resource)\n else:\n self.device = pyvisa.Instrument(**self.connection_resource)\n except pyvisa.VisaIOError as e:\n raise DeviceNotFoundError(\n 'Could not open device at \"{0}\".'.format(self.connection_resource), e)\n\n elif self.driver == drivers.telnet:\n self.device = telnetlib.Telnet(\n timeout=2, **self.connection_resource)\n elif self.driver == drivers.requests:\n r = requests.get(self.request_address)\n if r.status_code != 200:\n raise DeviceNotFoundError(\n 'Could not connect to device at \"{0}\".'.format(self.connection_resource), e)\n\n elif self.driver == drivers.lgpib:\n try:\n self.device = Gpib.Gpib(**self.connection_resource)\n except gpib.GpibError as e:\n raise DeviceNotFoundError(\n 'Could not open device at \"{0}\".'.format(self.connection_resource), e)\n elif self.driver == drivers.pyvisa_usb:\n try:\n if not (legacyVisa):\n rm = pyvisa.ResourceManager()\n self.device = rm.open_resource(**self.connection_resource)\n else:\n class USBDevice(pyvisa.Instrument):\n \"\"\"\n Using USB devices with PyVISA requires a small hack: the object must be an Instrument, but we can't call Instrument.__init__.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # Bypass the initialization in visa.Instrument, due to \"send_end\" not being valid for USB.\n pyvisa.ResourceTemplate.__init__(\n self, *args, **kwargs)\n\n self.device = USBDevice(**self.connection_resource)\n\n except pyvisa.VisaIOError as e:\n raise DeviceNotFoundError(\n 'Could not open device at \"{0}\".'.format(self.connection_resource), e)\n\n try:\n self._connected()\n except Exception as e:\n raise DeviceNotFoundError('Could not finish connection to device at \"{0}\".'.format(\n self.connection_resource), e)",
"def start_client(socket_path):\n # create a robot object\n magellan = robot.Robot(x=0, y=-4, phi=0)\n magellan.set_map([[0, 0], [3, 5], [5, -7], [-1, 8], [-8, -9], [-8, 8]])\n\n # open a socket\n if os.path.exists(socket_path):\n client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n client.connect(socket_path)\n print(\"Initialised client socket.\")\n print(\"Sending 'DONE' shuts down the server and quits.\")\n for _ in range(1000):\n magellan.move([1, 0.25])\n cur_state = magellan.get_state().tostring()\n readings = magellan.sense().reshape(-1, 1).tostring()\n client.send(cur_state)\n client.send(readings)\n client.send(\"DONE\".encode('utf-8'))\n client.close()\n else:\n print(\"Couldn't Connect! to {}\".format(socket_path))\n print(\"Shutting down the worker\")",
"def ConnectSocket(self) -> Socket:",
"def build_socket(self):\n sock = socket(AF_UNIX, SOCK_SEQPACKET)\n sock.connect(self.my_id)\n\n return sock",
"def connect_socket(self):\n try:\n self.socket.connect((self.request.host, int(self.request.port)))\n except socket.gaierror:\n raise socket.gaierror(\"Socket connection could not be established\")\n except socket.timeout:\n raise socket.timeout(\"Socket connection timed out\")\n except InterruptedError:\n raise InterruptedError(\"Socket connection has been interrupted by a signal\")",
"def connect(self, params, connect_timeout=_CONNECT_TIMEOUT):\n if connect_timeout is not None:\n connect_timeout = connect_timeout / 1000 # Convert to seconds\n try:\n self._socket = socket.create_connection(params, connect_timeout)\n self._host = params[0]\n except ValueError:\n try:\n self._socket = socket.socket(socket.AF_UNIX)\n self._socket.settimeout(connect_timeout)\n self._socket.connect(params)\n self._is_socket = True\n except AttributeError:\n raise InterfaceError(\"Unix socket unsupported\") from None\n self._socket.settimeout(None)",
"def connect(spec, timeout=None, nagle_off=True, cache=0,\n _cache=_connect_cache, _lock=_connect_cache_lock):\n # pylint: disable = W0102, R0912, R0915\n\n sock = None\n try:\n adi = None\n if cache > 0:\n _lock.acquire()\n try:\n if spec in _cache:\n adi, stamp = _cache[spec]\n if stamp < _datetime.datetime.utcnow():\n del _cache[spec]\n adi = None\n finally:\n _lock.release()\n if adi is None:\n if isinstance(spec, str):\n try:\n AF_UNIX = _socket.AF_UNIX\n except AttributeError:\n raise NotImplementedError(\n \"UNIX domain sockets are not supported\"\n )\n adi = [(AF_UNIX, _socket.SOCK_STREAM, 0, None, spec)]\n else:\n adi = _socket.getaddrinfo(spec[0], spec[1],\n _socket.AF_UNSPEC, _socket.SOCK_STREAM, 0, 0)\n if cache > 0:\n _lock.acquire()\n try:\n if spec not in _cache:\n _cache[spec] = (\n adi,\n _datetime.datetime.utcnow()\n + _datetime.timedelta(seconds=cache),\n )\n finally:\n _lock.release()\n\n AF_INET6 = getattr(_socket, 'AF_INET6', None)\n for family, stype, proto, _, addr in adi:\n if not _socket.has_ipv6 and family == AF_INET6:\n continue # skip silenty if python was built without it.\n\n sock = _socket.socket(family, stype, proto)\n sock.settimeout(timeout)\n retry = True\n while retry:\n try:\n sock.connect(addr)\n except _socket.timeout:\n break\n except _socket.error, e:\n if e[0] == _errno.EINTR:\n continue\n elif e[0] in (_errno.ENETUNREACH, _errno.ECONNREFUSED):\n break\n raise\n retry = False\n else:\n if nagle_off:\n disable_nagle(sock)\n return sock\n sock.close()\n except (_socket.error, IOError):\n try:\n raise_socket_error(timeout=timeout)\n except SocketError:\n e = _sys.exc_info()\n try:\n if sock is not None:\n sock.close()\n finally:\n try:\n raise e[0], e[1], e[2]\n finally:\n del e\n return None",
"def _connect_to_target(self, host):\n port = 80\n if ':' in host:\n host, _, port = host.partition(':')\n (socket_family, _, _, _, address) = socket.getaddrinfo(host, port)[0]\n self.target = socket.socket(socket_family)\n self.target.connect(address)",
"def get_socket():\n return socket.create_connection((HOST, PORT))",
"def __connect():\n # Create socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect socket to server\n sock.connect((SERVER_IP, SERVER_PORT))\n\n # Return connected socket\n return sock",
"def SlimConnect(self, fd= -1, s_port = 3000, s_host_id_i = 2102 ):\r\n self.s_port_i = s_port\r\n self.s_host_id_i = s_host_id_i\r\n sockaddr_in = struct.pack(\"HHI\",socket.AF_INET, socket.ntohs(self.s_port_i), socket.htonl(self.s_host_id_i))\r\n sockaddr_in_len = 16 #16 is long for sockaddr_in \r\n self.s_addr_in_len = ctypes.c_int(sockaddr_in_len) \r\n \r\n self.log(\"[SlimConnect] \\t sockaddr_in is %s; sockaddr_in len is %s\" % (repr(sockaddr_in), len(sockaddr_in)))\r\n fd_ = self.fd_ if -1 == fd else fd \r\n res = self.so.SlimConnect(fd_, sockaddr_in, self.s_addr_in_len )\r\n self.log(res, \"SlimConnect res is\")\r\n return res",
"def setup_sockets(address):\n udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return udp_sock",
"def connect_to_server(host, port) -> socket.SocketIO:\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (host, port)\n print('[CLIENT LOG] connecting to {} port {}'.format(host,port)) \n sock.connect(server_address)\n return sock",
"def from_socket(cls, socket, io_loop=None):\n return cls(_from_socket=socket, io_loop=io_loop)",
"def _connect(self):\n try:\n #print(\"try to connect _connect\")\n sock = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(self.remote_address)\n except socket.error as error:\n logger.warning(\"Couldn't connect to %s: %s.\",\n self._repr_remote(), error)\n else:\n self.initialize(sock, self.remote_service_coord)",
"def connect(self):\n \n try:\n self.__sock.connect((self.__host, self.__port))\n\n except socket.error,e:\n print 'Oops, unable to connect. Try again!',e\n sys.exit(1)",
"def connect(self):\n sock = socket.create_connection((self.host, self.port))\n try:\n self.sock = ssl.wrap_socket(sock, keyfile=self.key_file,\n certfile=self.cert_file,\n cert_reqs=self.cert_reqs,\n ca_certs=self.ca_certs)\n except ssl.SSLError, e:\n raise Error('Error validating SSL certificate for \"' + self.host +\n '\": ' + str(e))\n\n if self.cert_reqs == ssl.CERT_REQUIRED:\n self._VerifyHostName(self.host, self.sock.getpeercert())",
"def connect(self):\n if not self._socket:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self.host, self.port))\n self._socket.settimeout(0.0)"
] | [
"0.6658648",
"0.57487077",
"0.57248783",
"0.56902397",
"0.5646829",
"0.5571467",
"0.5560274",
"0.5491816",
"0.5469069",
"0.5340727",
"0.53019065",
"0.5294809",
"0.5244782",
"0.5243381",
"0.5200097",
"0.5186089",
"0.51457965",
"0.51306194",
"0.51290256",
"0.509967",
"0.50901103",
"0.5085437",
"0.5079268",
"0.50459397",
"0.5041611",
"0.5041319",
"0.50238436",
"0.50174123",
"0.50097615",
"0.50067323"
] | 0.6565483 | 1 |
Return the file description associated with this monitor as integer. This is really a real file descriptor ;), which can be watched and | def fileno(self):
return self._libudev.udev_monitor_get_fd(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def file_desc():\n \n with open('/proc/sys/fs/file-nr') as f:\n line = f.readline()\n \n fd = [int(x) for x in line.split()]\n \n return fd",
"def fileno(self):\r\n return self._fd.fileno()",
"def fileno(self) -> int:",
"def fileno(self):\n return self.channel.fileno()",
"def fileno(self):\n return self._stream.fileno()",
"def fileno(self):\n return self.fd",
"def file_descriptor(self):\n return self._file_descriptor",
"def fileno(self):\n return self.__fd",
"def fileno(self) -> int:\n return self._fd.fileno()",
"def fileno(self):\n return self._handle.fileno()",
"def fileno(self):\n return self._fd",
"def fileno(self):\n\n return self._handle.fileno()",
"def fileno(self):\n return self.file.fileno()",
"def fileno(self):\n return 1",
"def fileno(self):\n return self.listener.fileno()",
"def fileno(self):\n\t\treturn self.socket.fileno()",
"def fileno(self):\n\t\treturn self.socket.fileno()",
"def 10MbFd(self):\n\t\treturn self._get_attribute('10MbFd')",
"def 100MbFd(self):\n\t\treturn self._get_attribute('100MbFd')",
"def fileno(self):\n return self._socket.fileno()",
"def fileno(self):\n return self._socket.fileno()",
"def fileno (self):\n return -1\n #TODO: assign unique pseudo-filenos to mock sockets,\n # so apps don't get confused.",
"def fileno(self):\n return None",
"def fileno(self):\n return None",
"def fileno(self):\r\n raise NotImplementedError()",
"def fileno(self):\n return self.fdWrite",
"def fileno(self):\n return self.fdWrite",
"def fileno(self):\n return self.fdWrite",
"def fileno(self):\n return self.fdWrite",
"def get_file(self) -> int:\r\n return self.file"
] | [
"0.74031085",
"0.67754346",
"0.67130655",
"0.6663038",
"0.6642915",
"0.66414106",
"0.6630495",
"0.66018164",
"0.6600667",
"0.6547988",
"0.6527958",
"0.6447365",
"0.6383324",
"0.6367332",
"0.63505316",
"0.63328177",
"0.63328177",
"0.63305724",
"0.63293976",
"0.6233179",
"0.6233179",
"0.61582226",
"0.6137191",
"0.6137191",
"0.6124437",
"0.6103598",
"0.6103598",
"0.6103598",
"0.6103598",
"0.60626346"
] | 0.7083363 | 1 |
Filter incoming events. ``subsystem`` is a byte or unicode string with the name of a subsystem (e.g. ``'input'``). Only events originating from the given subsystem pass the filter and are handed to the caller. If given, ``device_type`` is a byte or unicode string specifying the device type. Only devices with the given device type are propagated to the caller. If ``device_type`` is not given, no additional filter for a specific device type is installed. These filters are executed inside the kernel, and client processes will usually not be woken up for device, that do not match these filters. | def filter_by(self, subsystem, device_type=None):
subsystem = ensure_byte_string(subsystem)
if device_type:
device_type = ensure_byte_string(device_type)
self._libudev.udev_monitor_filter_add_match_subsystem_devtype(
self, subsystem, device_type)
self._libudev.udev_monitor_filter_update(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_devices(self, subsystem: UdevSubsystem | None = None) -> list[Device]:\n devices = set()\n for device in self.devices:\n if subsystem and device.subsystem != subsystem:\n continue\n devices.add(device)\n return list(devices)",
"def pass_filters(device):\n if opts.filter_on_group:\n if device.owningTeam not in opts.filter_on_group:\n return False\n if opts.filter_on_type:\n if device.deviceType not in opts.filter_on_type:\n return False\n\n return True",
"def _get_tuya_devices_filtered(self, types, exclude_mode=False, type_prefix=True):\n config_list = {}\n types_filter = set(types)\n tuya = self.hass.data[DOMAIN][TUYA_DATA]\n devices_list = tuya.get_all_devices()\n for device in devices_list:\n dev_type = device.device_type()\n exclude = (\n dev_type in types_filter\n if exclude_mode\n else dev_type not in types_filter\n )\n if exclude:\n continue\n dev_id = device.object_id()\n if type_prefix:\n dev_id = f\"{dev_type}-{dev_id}\"\n config_list[dev_id] = f\"{device.name()} ({dev_type})\"\n\n return config_list",
"def device_type(self, device_type):\n allowed_values = [\"active\", \"inactive\", \"all\"]\n if device_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `device_type` ({0}), must be one of {1}\"\n .format(device_type, allowed_values)\n )\n\n self._device_type = device_type",
"def filter_scalings(scaling_list, scaling_type):\n return filter(\n lambda _f: True if scaling_type in _f[\"runname\"] else False,\n scaling_list)",
"def batch_events_by_type(session, type_name):\n subquery = latest_batch_events(session).subquery()\n query = session.query(subquery).filter(subquery.c.event_type == type_name)\n return query",
"def check_device_type(device_type):\n\n if device_type not in (_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU):\n raise ValueError('Invalid device_type \"%s\"'%device_type)",
"def check_channel_subsystem_request(self, channel, name):\n handler_class, larg, kwarg = channel.get_transport()._get_subsystem_handler(name)\n if handler_class is None:\n return False\n handler = handler_class(channel, name, self, *larg, **kwarg)\n handler.start()\n return True",
"def RunGroupFilter(self, context, args):\n if self._parent_group:\n self._parent_group.RunGroupFilter(context, args)\n self._common_type().Filter(context, args)",
"def filter_inspection_type(data, inspection_type):\n return [row for row in data if row['inspection_type'] == inspection_type]",
"def check_subsystem_parents(self, device: Device, subsystem: UdevSubsystem) -> bool:\n udev_device: pyudev.Device = pyudev.Devices.from_sys_path(\n self._udev, str(device.sysfs)\n )\n return udev_device.find_parent(subsystem.value) is not None",
"def get_events(self, type_filter=None):\n\n if type_filter:\n filtered_events = self.__events.get(type_filter, [])\n else:\n filtered_events = [ev for ev_type_list in self.__events.values() for ev in ev_type_list]\n\n return filtered_events",
"def manage_channel_filter():\n if cfg.CONF.standalone:\n return flask.jsonify({\n 'status': False,\n 'code': 'The standalone mode does not support channel filter function'\n })\n if flask.request.method in ['POST', 'DELETE']:\n filter_dict = flask.request.get_json()\n if flask.request.method == 'POST':\n for item in filter_dict['filter']:\n cfg.CONF.rabbit_mq.filter[item['type']][item['value']] = None\n elif flask.request.method == 'DELETE':\n for item in filter_dict['filter']:\n if item['value'] in cfg.CONF.rabbit_mq.filter[item['type']]:\n cfg.CONF.rabbit_mq.filter[item['type']].pop(item['value'])\n return flask.jsonify({\n 'status': True,\n 'code': None\n })\n elif flask.request.method == 'GET':\n filter_list = []\n for filter_type in channel_filter.FILTER_TYPE_LIST:\n for item in cfg.CONF.rabbit_mq.filter[filter_type].keys():\n filter_list.append({\n 'type': filter_type,\n 'value': item\n })\n return flask.jsonify({\n 'filter': filter_list\n })",
"def is_device_type(self, device_type):\n return self.device_type == device_type",
"def _filter(self, location, component=\"Hosting\", compute_type=None):\n filters = [\n [\"TERM_MATCH\", \"location\", location],\n [\"TERM_MATCH\", \"productFamily\", \"ML Instance\"],\n [\"TERM_MATCH\", \"currentGeneration\", \"Yes\"],\n [\"TERM_MATCH\", \"component\", component]\n ]\n if compute_type:\n filters.append([\"TERM_MATCH\", \"computeType\", compute_type])\n return [{\n 'Type': x[0],\n 'Field': x[1],\n 'Value': x[2]\n } for x in filters]",
"def get_measurement_by_type(self, requested_type: str):\n if requested_type == \"continuous\":\n return [x for x in self.measurements if type(x) == ContinuousMeasurement]\n if requested_type == \"discrete\":\n return [x for x in self.measurements if type(x) == DiscreteMeasurement]\n if requested_type == \"complex\":\n return [x for x in self.measurements if type(x) == ComplexMeasurement]\n raise ValueError(\"request_type must be one of: 'continuous', 'discrete', or 'complex'\")",
"def test_filter_device_group(self):\n pass",
"def allowed_device_type(self, device_type):\r\n\r\n assert device_type, \"Device Type is required.\"\r\n\r\n allowed_device_type = ['Intellian_V100_E2S', 'Intellian_V110_E2S',\r\n 'Intellian_V80_IARM', 'Intellian_V100_IARM',\r\n 'Intellian_V100', 'Intellian_V80_E2S',\r\n 'Sailor_900', 'Cobham_500']\r\n\r\n if device_type in allowed_device_type:\r\n return 1\r\n\r\n return 0",
"def subscribe_to(type_to_subscribe_to=None):\n def func_wrapper(func):\n queue.filter(lambda x:isinstance(x, type_to_subscribe_to) or type_to_subscribe_to == None).observe_on(scheduler).subscribe(func)\n return func\n return func_wrapper",
"def process(self, entity):\n\n cid = entity.get_cid()\n \n # If the cid does not match any of the pointers' cid in the\n # all_thread_ptrs list.\n if not any(cid == ptr.get_cid() for ptr in self.all_thread_ptrs):\n # Then, pass the event back to the pipeline and exit the\n # function since it is not a pointer that is handled by\n # the filter.\n self.send(entity)\n return \n\n # All of the events processed by this filter have the Nachos\n # timer ticks at the time the event was created as extra\n # data. So update teh current nachos_time with the event's\n # timer ticks data.\n self.nachos_time = entity.get_extra_data()\n\n # The thread's pid is the tag in the data stream event.\n thread_pid = entity.get_tag()\n if thread_pid not in self.inactives:\n # fill in entry for this pid\n self.inactives[thread_pid] = {'last':None}\n \n\n # We want to mask execution intervals after a thread has\n # reached the THREAD/EXIT event\n #\n if thread_pid not in self.hasExited:\n # fill in entry for this pid\n self.hasExited[thread_pid] = False\n\n # inactivity intervals are from the last thread creation event or\n # switch from event to the next machine::run event or switch to event \n #\n if cid == self.switch_to_ptr.get_cid() or cid == self.machine_run_ptr.get_cid():\n # record timestamp for activity period tracking\n self.lastswitch = self.nachos_time\n\n last_tsc = self.inactives[thread_pid]['last']\n if last_tsc and not self.hasExited[thread_pid]:\n inactive_interval = entities.Interval(self.inactivity_period_ptr.get_cid(), \n { 'tsc' : NachosTime(last_tsc) }, \n { 'tsc' : NachosTime(self.nachos_time) }, \n entity.get_tag())\n self.send(inactive_interval)\n \n \n\n # activity intervals are from the last switch to event to the next\n # switch from event\n #\n if cid == self.switch_from_ptr.get_cid():\n # note switch from for inactivity calculation\n self.inactives[thread_pid]['last'] = self.nachos_time\n\n if self.lastswitch:\n active_interval = entities.Interval(self.activity_period_ptr.get_cid(),\n { 'tsc' : NachosTime(self.lastswitch) }, \n { 'tsc' : NachosTime(self.nachos_time) }, \n entity.get_tag())\n self.lastswitch = None\n self.send(active_interval)\n \n \n\n # Mark when a thread has exited so inactivity intervals after a thread\n # has exited are not tabulated \n if cid == self.reached_exit_ptr.get_cid():\n self.hasExited[thread_pid] = True\n entity = entity.change_tag([self.nachos_time])\n \n # Thread class construction is noted to start inactivity periods\n if cid == self.thread_constructor_ptr.get_cid():\n self.inactives[thread_pid]['last'] = self.nachos_time",
"def device_type(self, device_type):\n # type: (string_types) -> None\n\n if device_type is not None:\n if not isinstance(device_type, string_types):\n raise TypeError(\"Invalid type for `device_type`, type has to be `string_types`\")\n\n self._device_type = device_type",
"def test_filter_device(self):\n pass",
"def setFilter(self, channel, group, node, type, frequency, gain, bandwidth, unitCode=0):\n resp = self.XAPCommand('FILTER', channel, group, node, type, frequency, gain, bandwidth, unitCode=unitCode, rtnCount=4)\n type = None\n freq = None\n gain = None\n bandwidth = None\n if int(resp[5]) is not 0:\n type = int(resp[5])\n freq = float(resp[6])\n if type is 4 or type is 5:\n gain = float(resp[7])\n elif type is 6:\n gain = float(resp[7])\n bandwidth = float(resp[8])\n elif type is 8 or type is 9 or type is 10:\n gain = int(resp[7])\n bandwidth = int(resp[8])\n elif type is 11:\n gain = float(resp[7])\n bandwidth = float(resp[8])\n return {\"type\": type,\n \"frequency\": freq,\n \"gain\": gain,\n \"bandwidth\": bandwidth,\n }",
"def type_filter(self, items, types=None):",
"def get_sessions_by_type_and_filters(self, request):\n return self.session_service.get_sessions_by_type_and_filters(\n request.websafeConferenceKey,\n request.typeOfSession, request.filters)",
"def devices_command_generic(*, device_id, command=None, request_type, **kwargs):\n if command is None:\n command = [{\"id\": device_id, \"type\": request_type}]\n return devices_command(command, **kwargs)",
"def testFilterBySuite(self):\n self.setUpPyfakefs()\n gpu_json = {\n 'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},\n 'Android Tester': {\n 'isolated_scripts': [\n {\n 'args': [\n 'webgl_conformance',\n ],\n 'isolate_name': 'not_telemetry',\n },\n ],\n },\n 'Linux Tester': {\n 'isolated_scripts': [\n {\n 'args': [\n 'not_a_suite',\n ],\n 'isolate_name': 'telemetry_gpu_integration_test',\n },\n ],\n },\n 'Windows Tester': {\n 'isolated_scripts': [\n {\n 'args': [\n 'webgl_conformance',\n ],\n 'isolate_name': 'telemetry_gpu_integration_test',\n },\n ],\n },\n }\n\n self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,\n 'chromium.json'),\n contents=json.dumps(gpu_json))\n\n gpu_builders = builders.GetCiBuilders('webgl_conformance')\n self.assertEqual(gpu_builders, set(['Windows Tester']))",
"def add_filters(self, filter_object, fmt=\"%.6f\", **kwargs):\n if not isinstance(filter_object, UnitFilter):\n msg = \"Argument of type Filter expected. Got type {0}\"\n raise TypeError(msg.format(type(filter_object)))\n\n if filter_object.wavelength_unit is None:\n msg = \"Filter wavelength must have units for storage.\"\n raise AttributeError(msg)\n fname = \"{0:s}/{1:s}.csv\".format(self.source, filter_object.name)\n filter_object.write_to(fname.lower(),\n fmt=fmt, **kwargs)",
"def _mergeDevicesByTypeAVR(self, devices):\n\t\tdevs = list(devices)\n\t\tmerged = []\n\n\t\twhile len(devs) > 0:\n\t\t\tcurrent = devs[0]\n\t\t\tdevs.remove(current)\n\n\t\t\tprops = current.id\n\t\t\tif props.valid == False:\n\t\t\t\tcontinue\n\n\t\t\tmatches = []\n\t\t\tsuffix = self._getCategoryTypeAVR(current)\n\n\t\t\tself.log.info(\"ByType: Searching for device ending in \" + str(suffix))\n\n\t\t\tfor dev in devs:\n\t\t\t\tif dev.id.name == props.name and dev.id.type in suffix:\n\t\t\t\t\tmatches.append(dev)\n\n\t\t\tfor match in matches:\n\t\t\t\tdevs.remove(match)\n\t\t\t\tcurrent = current.getMergedDevice(match)\n\n\t\t\tif len(matches) == 0:\n\t\t\t\tself.log.info(\"ByType: No match for device: \" + current.id.string)\n\n\t\t\tself.log.debug(\"ByType:\\nResulting device:\\n\" + str(current))\n\t\t\tmerged.append(current)\n\n\t\treturn merged",
"def filter_room_type(self, criterias, index, bool_room_type):\n # if the user input doesn't pass verification, don't filter by this criteria\n # so always return True\n if bool_room_type == False:\n return True\n\n criterias = {key: self.convert_room_type_input(\n val) for key, val in criterias.items()}\n listing_room_type = self.process_room_type(\n self.data[index]['roomType'])\n bool_bed = criterias['Bedrooms'][1](\n listing_room_type['Bedrooms'], criterias['Bedrooms'][0])\n bool_bath = criterias['Bathrooms'][1](\n listing_room_type['Bathrooms'], criterias['Bathrooms'][0])\n return bool_bed & bool_bath"
] | [
"0.5975349",
"0.45121318",
"0.44813666",
"0.4332891",
"0.41228282",
"0.40819362",
"0.4061883",
"0.40150812",
"0.39757392",
"0.39721748",
"0.39716902",
"0.3967306",
"0.3962228",
"0.39582562",
"0.39029753",
"0.38913316",
"0.38811502",
"0.38712072",
"0.38544807",
"0.38515154",
"0.38511425",
"0.3846749",
"0.3842641",
"0.38190764",
"0.38149053",
"0.3790543",
"0.37749878",
"0.37581992",
"0.37562698",
"0.3743287"
] | 0.79289705 | 0 |
Filter incoming events by the given ``tag``. ``tag`` is a byte or unicode string with the name of a tag. Only events for devices which have this tag attached pass the filter and are handed to the caller. | def filter_by_tag(self, tag):
self._libudev.udev_monitor_filter_add_match_tag(
self, ensure_byte_string(tag))
self._libudev.udev_monitor_filter_update(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def crossfilter_by_tag(self,df, tag):\n col,spec= list(tag.items())[0]\n return df[df[col]==spec]",
"def search_tag(self, tag):\n self.driver.get(self.tag_url.format(tag))",
"def filter_queryset(self, queryset):\n tags = self.request.GET.getlist(\"tag\")\n if tags:\n for tag in tags:\n queryset = queryset.filter(tag__tag=tag)\n return super().filter_queryset(queryset)",
"def filter(ctx, fil, filter_host, filter_port):\n if not fil:\n raise ValueError(\"Must specify at least one filtering operaion (of the form '<filter>=<value>'\")\n client = aceclient.FilterClient(host=filter_host, port=filter_port)\n filters = {}\n for f in fil:\n filters.update(parse_tag(f))\n client.update(**filters)",
"def filter_by_tag(self, tags):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n data_collection = DataCollection()\n for item in self.iteritems():\n if tags == [] or tags == None or all([tag in item.tags for tag in tags]):\n data_collection.add_data(item)\n return data_collection",
"def filter_for_tags(cls, image: np.ndarray, tags: Set[Tag]) -> np.ndarray:\n masks = []\n for tag in tags:\n color = tag.color;\n red_mask = image[:, :, 0] == color[0]\n green_mask = image[:, :, 1] == color[1]\n blue_mask = image[:, :, 2] == color[2]\n masks.append(np.logical_and(red_mask, green_mask, blue_mask))\n\n final_mask = np.logical_not(np.logical_or.reduce(masks))\n image[final_mask] = 255\n return image",
"def get_by_tag(cls, tag):\n out = []\n \n tags = Tag.expand_implied_by([tag])\n \n for t in tags:\n results = cls.objects.filter(owner=tag.owner, tags=t)\n \n for b in results:\n if b not in out:\n out.append(b)\n \n return out",
"def search_videos_tag(self, video_tag):\n results = []\n for video in self._video_library.get_all_videos():\n if video_tag.lower() in video.tags and video.flag is None:\n results.append(video)\n self.output_search_results(results, video_tag)",
"def handle_starttag(self, tag, attrs):\n \n if self.intermediate_tags > 0:\n self.intermediate_tags += 1\n return\n \n self.filtering = self.typogrify._should_be_filtered(tag, attrs)\n self.intermediate_tags = 1 if not self.filtering else 0",
"def handle_endtag(self, tag):\n\n if self.intermediate_tags > 0:\n self.intermediate_tags -= 1\n \n # Widont filter needs to be handled here\n if self.filtering:\n content = self.data_buffer[-self.filtered_data_length:]\n content = self.typogrify.widont(tag, content)\n self.data_buffer = self.data_buffer[:-self.filtered_data_length] + content",
"def filter_tag(tags=None):\n tagdict = defaultdict(list)\n Besarkecil = lambda f: ' '.join(re.findall('[A-Z][^A-Z]*', f))\n for obj in list(tags):\n if len(obj.split(':')) == 2:\n k, v = obj.split(':')\n # filtering key Besarkecil, lowercase\n k = str(Besarkecil(k)).lower()\n # print(k)\n if k in ['cari', 'jadwal', 'keberangkatan', 'maskapai', 'type', 'ibadah', 'jumlah hari', 'rute',\n 'tour']:\n res = re.findall(r\"(^[A-Z][^A-Z]+)|([^\\W\\d_]+|[\\d+]+)\", v)\n arres = []\n for resple in res:\n arres.append(filter(None, resple)[0])\n # print([e for e in resple])\n # print(' '.join(arres))\n tagdict[k].append(' '.join(arres))\n return tagdict",
"def use_tag(self, tag):\n try:\n self.available_tags.remove(tag)\n except ValueError:\n return False\n return True",
"def filter_tag(tags=None):\n tagdict = defaultdict(list)\n Besarkecil = lambda f: ' '.join(re.findall('[A-Z][^A-Z]*', f))\n for obj in list(tags):\n if len(obj.split(':')) == 2:\n k, v = obj.split(':')\n # filtering key Besarkecil, lowercase\n k = str(Besarkecil(k)).lower()\n # print(k)\n if k in ['cari', 'jadwal', 'keberangkatan', 'maskapai', 'type', 'ibadah', 'jumlah hari', 'rute', 'tour']:\n res = re.findall(r\"(^[A-Z][^A-Z]+)|([^\\W\\d_]+|[\\d+]+)\", v)\n arres = []\n for resple in res:\n arres.append(filter(None, resple)[0])\n # print([e for e in resple])\n # print(' '.join(arres))\n tagdict[k].append(' '.join(arres))\n return tagdict",
"def SensorTagDeviceScan(bt_adapter, scan_ttl):\n service = DiscoveryService(bt_adapter) \n devices = service.discover(scan_ttl)\n for key in devices.keys():\n if 'SensorTag' not in devices[key]:\n del devices[key]\n \n return devices",
"def filter(self, event: \"TraceEvent\") -> Union[None, \"TraceEvent\", Sequence[\"TraceEvent\"]]:\n raise NotImplementedError()",
"def filter(self, pkt):\n return pkt",
"def handle_tag_search(self, tag_text):\n log.debug(\"Handling tag search: %s\", tag_text)\n tags = tag_text.split()\n self.filter_tags = tags\n self.current_selected = 0\n self._refresh()",
"def filter_entries_by_tag(search, entry):\n \n entry_tags = entry.tags\n if '&' in search:\n splits = search.split('&')\n\n return all(split.lower() in entry_tags for split in splits)\n elif '|' in search:\n splits = search.split('|')\n return any(split.lower() in entry_tags for split in splits)\n else:\n return search.lower() in entry_tags",
"def by_tags(self, tags):\n if tags:\n self._filters.append(\n models.Note.tags.any(models.Tag.id.in_(tags)),\n )\n return self",
"def filter_tags(d, tags):\n\treturn dict([(k,filter(lambda x: x in tags, v))\n\t\t\t\tfor k, v in d.iteritems() if v])",
"def subscribe(self, tag):\n self.socket.setsockopt(constants.SUBSCRIBE, tag)",
"def _should_be_filtered(self, tag, attrs):\n \n # Test if the node's tag should be filtered\n if self.__ignores[0] and tag in self.__ignores[0]:\n return False\n \n # Test if the node's attributes should be filtered\n filters = self.__ignores[1][any_tag]\n if tag in self.__ignores[1]:\n filters |= self.__ignores[1][tag]\n\n try:\n if any('.%s' % attr[1] in filters for attr in attrs if attr[0] == 'class'):\n return False\n except KeyError:\n pass\n\n try:\n if any('#%s' % attr[1] in filters for attr in attrs if attr[0] == 'id'):\n return False\n except KeyError:\n pass\n\n return True",
"def tag(self, tag):\n\n self._tag = tag",
"def tag(self, tag):\n\n self._tag = tag",
"def tag(self, tag):\n\n self._tag = tag",
"def tag(self, tag):\n\n self._tag = tag",
"def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []",
"def search(self, tag):\n\n url = \"https://api.instagram.com/v1/tags/search?q={0}&access_token={1}\".format(tag, self.access_token)\n request = requests.get(url)\n return request.json()",
"def filter(self, param, container = None):\n\n\t\tif param[0] in self.filters:\n\t\t\ttry:\n\t\t\t\tfilter_method = getattr(self, \"filter_\" + param[0])\n\t\t\t\tself.__printer.debug(\"Command\", \"Executing filter \" + param[0])\n\t\t\t\treturn filter_method(param, container)\n\t\t\texcept AttributeError:\n\t\t\t\tself.__printer.warning(\"Command\", \"Filter \" + param[0] + \" not implemented passing\")\n\t\t\t\tpass\n\n\t\t\treturn param",
"def unsubscribe(self, tag):\n self.socket.setsockopt(constants.UNSUBSCRIBE, tag)"
] | [
"0.5666642",
"0.51973397",
"0.51657885",
"0.5159696",
"0.5134646",
"0.51267135",
"0.50107723",
"0.49824739",
"0.49283835",
"0.4923795",
"0.48922583",
"0.48665416",
"0.486598",
"0.48503694",
"0.48403606",
"0.48226506",
"0.47957942",
"0.47786885",
"0.47646055",
"0.4741162",
"0.47268963",
"0.46619973",
"0.46619093",
"0.46619093",
"0.46619093",
"0.46619093",
"0.46537566",
"0.46533152",
"0.4640641",
"0.46349132"
] | 0.79313517 | 0 |
Set the receive buffer ``size``. ``size`` is the requested buffer size in bytes, as integer. | def set_receive_buffer_size(self, size):
try:
self._libudev.udev_monitor_set_receive_buffer_size(self, size)
except EnvironmentError:
self._reraise_with_socket_path() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strace_set_buffer_size(self, size):\n size = ctypes.c_uint32(size)\n res = self._dll.JLINK_STRACE_Control(enums.JLinkStraceCommand.SET_BUFFER_SIZE, size)\n if res < 0:\n raise errors.JLinkException('Failed to set the STRACE buffer size.')\n\n return None",
"def __set_size(self, size):\n if not isinstance(size, int):\n raise TypeError('The size should be an integer')\n if size < 64 or size > 1500: # It should be in the Standard Ethernet Payload range\n raise ValueError('The size should be in the range of Standard Ethernet frames [64,1500] bytes')\n self.__size = size",
"def setBufferSize(self, buffer_size):\n DPxSetDinBuffSize(buffer_size)",
"def trace_set_buffer_capacity(self, size):\n cmd = enums.JLinkTraceCommand.SET_CAPACITY\n data = ctypes.c_uint32(size)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to set trace buffer size.')\n return None",
"def setsize(self, size):\n self.__size = size",
"def size(self, size: int):\n\n self._size = size",
"def recv_size(s, size):\n print 'Receive data in fixed size mode'\n reply = s.recv(size)\n print reply",
"def size(self, size):\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = size",
"def set_binary_reply_size(self, size: int):\n self._binary_reply = bytearray(size)",
"def set_flush_size(self, size):\n assert isinstance(size, six.integer_types)\n self._flush_size = size",
"def size(self, size):\n\n self._size = size",
"def size(self, size):\n\n self._size = size",
"def size(self, size):\n\n self._size = size",
"def size(self, size):\n\n self._size = size",
"def use_buffer(self, buffer_size):\n self.__buffer_size = buffer_size\n if self.__buffer is None:\n self.__buffer = []",
"def size(self, size):\n self._size = size",
"def size(self, size):\n if size is None:\n raise ValueError(\"Invalid value for `size`, must not be `None`\") # noqa: E501\n\n self._size = size",
"def size(self, size):\n if size is None:\n raise ValueError(\"Invalid value for `size`, must not be `None`\") # noqa: E501\n\n self._size = size",
"def set_size(self, size):\n \n self.width = size[0]\n self.height = size[1]",
"def swo_set_emu_buffer_size(self, buf_size):\n buf = ctypes.c_uint32(buf_size)\n res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.SET_BUFFERSIZE_EMU,\n ctypes.byref(buf))\n if res < 0:\n raise errors.JLinkException(res)\n\n return None",
"def swo_set_host_buffer_size(self, buf_size):\n buf = ctypes.c_uint32(buf_size)\n res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.SET_BUFFERSIZE_HOST,\n ctypes.byref(buf))\n if res < 0:\n raise errors.JLinkException(res)\n\n return None",
"def setFrameSize(self, frame_size):\n \n self.frame_size = frame_size",
"def resize(self, size):\n assert size >= 0 and size <= self._cap, \\\n \"invalid size[%d] for resize\" % (size)\n\n self._size = size",
"def set_size(self, size):\n self.dtSize = size",
"def set_frame_size(self, frame_size_selector):\n raise NotImplementedError",
"def change_window_size(self, size):\n value = 0\n try:\n value = int(size)\n except ValueError:\n raise ValueError(\"Please type in a valid number.\")\n\n if value >= 0:\n self.__window_size = value\n else:\n raise ValueError(\"Please type in a valid positive number.\")",
"def do_buffer_size(num: int):\n if num == '':\n print(len(cmd_parser.daq.data))\n else:\n try:\n # TODO support rest of args to buffer resize\n cmd_parser.daq.buffer_resize(int(num))\n except ValueError:\n print('invalid input, [num] must be of type <int>')",
"def recv_size(self, size, timeout=_UNSET):\n with self._recv_lock:\n if timeout is _UNSET:\n timeout = self.timeout\n chunks = []\n total_bytes = 0\n try:\n start = time.time()\n self.sock.settimeout(timeout)\n nxt = self.rbuf or self.sock.recv(self._recvsize)\n while nxt:\n total_bytes += len(nxt)\n if total_bytes >= size:\n break\n chunks.append(nxt)\n if timeout:\n cur_timeout = timeout - (time.time() - start)\n if cur_timeout <= 0.0:\n raise socket.timeout()\n self.sock.settimeout(cur_timeout)\n nxt = self.sock.recv(self._recvsize)\n else:\n msg = ('connection closed after reading %s of %s requested'\n ' bytes' % (total_bytes, size))\n raise ConnectionClosed(msg) # check recv buffer\n except socket.timeout:\n self.rbuf = b''.join(chunks)\n msg = 'read %s of %s bytes' % (total_bytes, size)\n raise Timeout(timeout, msg) # check recv buffer\n except Exception:\n # received data is still buffered in the case of errors\n self.rbuf = b''.join(chunks)\n raise\n extra_bytes = total_bytes - size\n if extra_bytes:\n last, self.rbuf = nxt[:-extra_bytes], nxt[-extra_bytes:]\n else:\n last, self.rbuf = nxt, b''\n chunks.append(last)\n return b''.join(chunks)",
"def size(self, value):\n if not isinstance(value, int):\n raise TypeError(\"size must be an integer\")\n if value < 0:\n raise ValueError(\"size must be >= 0\")\n self.__size = value",
"def set_cache_size(self, size):\n spotify.Error.maybe_raise(lib.sp_session_set_cache_size(\n self._sp_session, size))"
] | [
"0.6981181",
"0.6979146",
"0.68849653",
"0.68056643",
"0.6791033",
"0.650424",
"0.6397327",
"0.63909835",
"0.63701284",
"0.63119274",
"0.63112855",
"0.63112855",
"0.63112855",
"0.63112855",
"0.6291892",
"0.6289078",
"0.6160049",
"0.6160049",
"0.61325973",
"0.6129285",
"0.60514385",
"0.60222256",
"0.5929404",
"0.5904858",
"0.5899696",
"0.58859694",
"0.5869437",
"0.5769505",
"0.5731469",
"0.57192504"
] | 0.7975507 | 0 |
Receive a single device from the monitor. The caller must make sure, that there are events available in the event queue. The call blocks, until a device is available. If a device was available, return ``(action, device)``. ``device`` | def receive_device(self):
try:
device_p = self._libudev.udev_monitor_receive_device(self)
except EnvironmentError:
self._reraise_with_socket_path()
if not device_p:
raise EnvironmentError('Could not receive device')
action = ensure_unicode_string(
self._libudev.udev_device_get_action(device_p))
return action, Device(self.context, device_p) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_device(self, device):\n\t\tself.ise.headers.update({'Accept': 'application/vnd.com.cisco.ise.network.networkdevice.1.0+xml'})\n\n\t\tresult = {\n\t\t\t'success': False,\n\t\t\t'response': '',\n\t\t\t'error': '',\n\t\t}\n\n\t\tresp = self.ise.get('{0}/config/networkdevice?filter=name.EQ.{1}'.format(self.url_base, device))\n\t\tfound_device = ERS._to_json(resp.text)\n\n\t\tif found_device['ns3:searchResult']['@total'] == '1':\n\t\t\tresp = self.ise.get('{0}/config/networkdevice/{1}'.format(\n\t\t\t\t\tself.url_base, found_device['ns3:searchResult']['ns3:resources']['ns5:resource']['@id']))\n\t\t\tif resp.status_code == 200:\n\t\t\t\tresult['success'] = True\n\t\t\t\tresult['response'] = ERS._to_json(resp.text)['ns4:networkdevice']\n\t\t\t\treturn result\n\t\t\telif resp.status_code == 404:\n\t\t\t\tresult['response'] = '{0} not found'.format(device)\n\t\t\t\tresult['error'] = resp.status_code\n\t\t\t\treturn result\n\t\t\telse:\n\t\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\t\tresult['error'] = resp.status_code\n\t\t\t\treturn result\n\t\telif found_device['ns3:searchResult']['@total'] == '0':\n\t\t\t\tresult['response'] = '{0} not found'.format(device)\n\t\t\t\tresult['error'] = 404\n\t\t\t\treturn result\n\t\telse:\n\t\t\tresult['response'] = ERS._to_json(resp.text)['ns3:ersResponse']['messages']['message']['title']\n\t\t\tresult['error'] = resp.status_code\n\t\t\treturn result",
"def GetDevice(self, arg):\n\n if not arg: return None\n\n deviceSpec = DeviceId(arg)\n\n for device in self.YieldAllDevices():\n if deviceSpec.Matches(device): return device",
"async def get_device(hass: HomeAssistant, device_id: str) -> Optional[DeviceEntry]:\n device_registry = await hass.helpers.device_registry.async_get_registry()\n return device_registry.async_get(device_id)",
"def find_device(device):\n return usb.core.find(idVendor=device['idVendor'], idProduct=device['idProduct'])",
"def get_device():\n c_dev = ct.c_int(0)\n safe_call(backend.get().af_get_device(ct.pointer(c_dev)))\n return c_dev.value",
"def get_device(l):\n if not l.device:\n l.device = find_device()\n setup_device(l.device)\n return l.device",
"def get_device(self, device_id: str) -> Doorbell | Lock | None:\n return self._locks_by_id.get(device_id) or self._doorbells_by_id.get(device_id)",
"def getdevice(self, device, position=None, brand=None):\n url = self.__ApiUrl + 'getdevice'\n postdata = {'brand': brand,\n 'device': device,\n 'position': position,\n 'token': self.__ApiKey}\n headers = {'content-type': 'application/json'}\n result = self.sendpostdata(url, postdata, headers)\n try:\n return result.json()\n except AttributeError:\n return result",
"def device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device\"), kwargs)",
"def device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device\"), kwargs)",
"def device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device\"), kwargs)",
"def device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device\"), kwargs)",
"def act(self, device):\n with open(device, 'r') as fd:\n result = fcntl.ioctl(fd, self.ioctl)\n if result:\n raise Exception(\"ioctl failed with result {0}\".format(result))",
"def __read_device(self):\n state = XinputState()\n res = self.manager.xinput.XInputGetState(\n self.__device_number, ctypes.byref(state))\n if res == XINPUT_ERROR_SUCCESS:\n return state\n if res != XINPUT_ERROR_DEVICE_NOT_CONNECTED:\n raise RuntimeError(\n \"Unknown error %d attempting to get state of device %d\" % (\n res, self.__device_number))\n # else (device is not connected)\n return None",
"def find_device():\n device = usb.core.find(\n idVendor=LuxaforFlag.DEVICE_VENDOR_ID,\n idProduct=LuxaforFlag.DEVICE_PRODUCT_ID\n )\n return device",
"def getDevice(self, port):\n try:\n return self._interactor.devices[port]\n except:\n raise ValueError(f\"No device on port {port} found.\")",
"def device(self):\n return self.broker.device(**{\"DeviceRouteID\": self.DeviceRouteID})",
"def getDevice(driver):\n devices = list(listDevices(driver))\n if not devices:\n print('No devices found. Ensure your camera is connected.')\n elif len(devices) != 1:\n print('Too many devices found. Only one camera is supported')\n else:\n return devices[0]",
"def get_device(catalog_name: Optional[str] = None,\n device_group_name: Optional[str] = None,\n device_name: Optional[str] = None,\n product_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDeviceResult:\n __args__ = dict()\n __args__['catalogName'] = catalog_name\n __args__['deviceGroupName'] = device_group_name\n __args__['deviceName'] = device_name\n __args__['productName'] = product_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:azuresphere:getDevice', __args__, opts=opts, typ=GetDeviceResult).value\n\n return AwaitableGetDeviceResult(\n chip_sku=pulumi.get(__ret__, 'chip_sku'),\n device_id=pulumi.get(__ret__, 'device_id'),\n id=pulumi.get(__ret__, 'id'),\n last_available_os_version=pulumi.get(__ret__, 'last_available_os_version'),\n last_installed_os_version=pulumi.get(__ret__, 'last_installed_os_version'),\n last_os_update_utc=pulumi.get(__ret__, 'last_os_update_utc'),\n last_update_request_utc=pulumi.get(__ret__, 'last_update_request_utc'),\n name=pulumi.get(__ret__, 'name'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n system_data=pulumi.get(__ret__, 'system_data'),\n type=pulumi.get(__ret__, 'type'))",
"def get_device(self, field):\n return self._devices[field]",
"def get_device(self, dev_id):\n return self.api_request('GET', self.url + '/device/' + str(dev_id), {})",
"def get_device(arn=None):\n pass",
"def _get_device():\n return context.get_context('device_target')",
"def find_device(devices, *, usage_page, usage):\n if hasattr(devices, \"send_report\"):\n devices = [devices]\n for device in devices:\n if (\n device.usage_page == usage_page\n and device.usage == usage\n and hasattr(device, \"send_report\")\n ):\n return device\n raise ValueError(\"Could not find matching HID device.\")",
"async def get_device(self, device_id: str) -> dict:\r\n return await self.get(API_DEVICE.format(device_id=device_id))",
"def get_device(self, device=None, only_active=True):\n devices = self.devices().devices\n device_names = \", \".join([d.name for d in devices])\n device_name_or_id = device\n if not device_name_or_id:\n if only_active:\n device = first(devices, key=attrgetter(\"is_active\"))\n if not device:\n raise ValueError(\n f\"\"\"\n There's no active device.\n Possible devices: {device_names}\"\"\"\n )\n else:\n device = first(devices, key=attrgetter(\"is_active\")) or first(devices)\n else:\n device = first(devices, key=lambda d: device_name_or_id in (d.name, d.id))\n if not device:\n raise ValueError(\n f\"\"\"\n Device {device_name_or_id} doesn't exist.\n Possible devices: {device_names}\"\"\"\n )\n\n return device",
"def device(self):\n return self.broker.device(**{\"JobDetailID\": self.JobDetailID})",
"def get_device(self):\n raise NotImplementedError()",
"def get_device(mac, devices=None):\n print('get_device:', get_device)\n if not mac:\n return None\n if not devices:\n devices = discover_wemo()\n\n normal_mac = normalize_mac_address(mac)\n\n for dev in devices:\n if dev.mac and normalize_mac_address(dev.mac) == normal_mac:\n print('get_device dev:', dev)\n return dev\n\n return None",
"def getDevice(self, sn='', make=''):\n if len(self) == 0:\n # printLog('NO DEVICE FOUND. QUIT.', logging.ERROR)\n return None\n if len(self) == 1:\n return self[0]\n if len(sn) == 0 and len(make) == 0:\n printLog(\"More than one device is connected. Please unplug and leave just one.\", logging.ERROR)\n return None\n for dc in self:\n print dc.make, make\n if len(make) > 0 and len(sn) == 0:\n if dc.make == make:\n # print 'make match'\n return dc\n elif len(sn) > 0 and len(make) == 0:\n if dc.deviceId == sn:\n return dc\n elif len(sn) > 0 and len(make) > 0:\n if dc.make == make and dc.deviceId == sn:\n return dc\n else:\n return dc\n return None"
] | [
"0.59522355",
"0.58867776",
"0.58853173",
"0.5813893",
"0.57472056",
"0.56929654",
"0.56529856",
"0.56129146",
"0.5546824",
"0.5546824",
"0.5546824",
"0.5546824",
"0.55230916",
"0.55181956",
"0.5491791",
"0.54799294",
"0.54527694",
"0.5444138",
"0.53849095",
"0.5365675",
"0.5327324",
"0.53213274",
"0.52753735",
"0.5270765",
"0.5254903",
"0.5249645",
"0.5247571",
"0.5238653",
"0.5233613",
"0.52318114"
] | 0.75275433 | 0 |
Takes the module name and tries to identify a list of functions to return. | def _getModFunctions(modName, modSearch):
# First find all callable functions they want
try:
mod = sys.modules[modName]
modNames = dir(mod)
callables = []
for m in modNames:
a = getattr(mod, m)
if(hasattr(a, '__call__') and hasattr(a, '__class__')):
if(a.__module__ == modSearch and a.__name__[0] != "_"):
callables.append(a)
return callables
except Exception as e:
print('!! Unable to functionalize the module: %s' % str(e))
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_function_list_from_modlist(self):\n function_list = []\n function_name_list = []\n for module in self.module_list:\n for name, obj in inspect.getmembers(module, inspect.isfunction):\n if inspect.getmodule(obj) == module:\n function_list.append(obj)\n function_name_list.append(name)\n return function_list",
"def get_functions():\n\treturn [f for f in globals() if f.startswith('make_')]",
"def methods_importer(\n method_name: str, modules: List[Union[str, ModuleType]]\n) -> List[Callable]:\n result = []\n for module in modules:\n try:\n if isinstance(module, ModuleType):\n mod = module\n elif isinstance(module, str):\n mod = importlib.import_module(module)\n else:\n raise TypeError('Must be list of strings or ModuleType')\n\n met = getattr(mod, method_name, None)\n\n if met:\n result.append(mod)\n # return met\n\n except ImportError:\n continue\n\n return result",
"def find(name: str):\n return _functions[name]",
"def getGlobalFunctions(self, name: unicode) -> List[ghidra.program.model.listing.Function]:\n ...",
"def find_functions(module):\n for attrname in dir(module):\n attr = getattr(module, attrname)\n # iteratively get __module__ or __class__ (where __module__ fails for clas\n if callable(attr) and getattr(attr, '__module__', getattr(attr, '__class__', '')) == module.__name__:\n yield attr",
"def visit_Module(self, node):\n self.generic_visit(node)\n return self.functions",
"def get_test_functions():\r\n\r\n test_funcs = [obj for name,obj in inspect.getmembers(sys.modules[__name__])\r\n if (inspect.isfunction(obj) and name.startswith('test'))]\r\n src = inspect.getsource(sys.modules[__name__])\r\n lines = src.split('\\n')\r\n\r\n # Create a dictionary with key=function name and value is 0-based order\r\n # in the module\r\n ordered_func_names = dict()\r\n ordered_funcs = list()\r\n func_index = 0\r\n for line in lines:\r\n if line.find(\"def test\") > -1 and not line.find('line.find') > -1:\r\n func_name = line.split(\"(\")[0].split()[1]\r\n ordered_func_names[func_name] = func_index\r\n # Create an empty list with sampe number of elements as test\r\n # functions\r\n ordered_funcs.append('')\r\n func_index += 1\r\n for test_func in test_funcs:\r\n index = ordered_func_names[test_func.__name__]\r\n ordered_funcs[index] = test_func\r\n return ordered_funcs",
"def get_module_names(config):\n lambdas_path = config['lambdas_path']\n return [f.strip('.py') for f in os.listdir(lambdas_path) if f.endswith('py') and not f.startswith(\"__\")]",
"def findModule(name):",
"def get_functions():\n\n filenames = set()\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'functions')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n functions_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'functions')\n sys.path.append(functions_path)\n for file_ in os.listdir(functions_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n functions = {}\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n if hasattr(module, 'function_name'):\n functions[module.function_name] = module\n except ImportError:\n pass\n\n return functions",
"def load_functions(self, module_name, path=None):\n# try:\n if True:\n if not path:\n path = os.getcwd()\n if not isinstance(path,list):\n path = [path]\n file,filename,desc = imp.find_module(module_name,path)\n funcs = imp.load_module(module_name, file, filename, desc)\n if hasattr(funcs,'_init'):\n getattr(funcs,'_init')(self)\n attrs = [attr for attr in funcs.__dict__ \n if not attr.startswith('__')\n and attr is not '_init'\n and not hasattr(getattr(funcs,attr),'__base__')]\n for attr in attrs:\n try:\n print 'Adding', attr, 'to', self._name\n self.add_function(getattr(funcs,attr))\n except:\n print 'Error adding', attr, 'to', self._name",
"def find_functions(code):\n regex = \"^\\s*\" + re_func_decl + \"\\s*{\"\n \n funcs = []\n while True:\n m = re.search(regex, code, re.M)\n if m is None:\n return funcs\n \n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]\n funcs.append((name, args, rtype))\n \n code = code[m.end():]",
"def get_task_functions(tasknames):\n task_fns = []\n for task_name in tasknames:\n # task_func = globals().get('do_%s' % task_name, None)\n task_func = getattr(tasks, 'do_%s' % task_name, None)\n if not task_func or not type(task_func) is types.FunctionType:\n logging.error(\"Task name '%s' is invalid.\", task_name)\n raise ValueError(\"Task name '%s' is invalid.\", task_name)\n task_fns.append(task_func)\n\n return task_fns",
"def all_decorated_module_functions( this, module, exclude_methods = False, exclude_functions = False) :\n\t\tmodule_names = []\n\t\tfor el in dir( module) :\n\t\t\tfn = module.__dict__.get( el)\n\n\t\t\t# lookup for functions\n\t\t\tif not exclude_functions and type( fn) in [types.FunctionType, staticmethod, classmethod] :\n\t\t\t\tfn = this._getfn( fn)\n\t\t\t\tif len( this.get_decorators( fn)) > 0 :\n\t\t\t\t\tfname = fn.__annotations__[this.NATIVE_FUNCTION].__name__\n\t\t\t\t\tif fname not in module_names :\n\t\t\t\t\t\tyield { fname : module.__dict__.get( fname) }\n\t\t\t\t\t\tmodule_names += [fname]\n\t\t\t\n\t\t\t# lookup for class methods\n\t\t\tif not exclude_methods and type( fn) is type :\n\t\t\t\tfor cls_el in dir( fn) :\n\t\t\t\t\tmethod = fn.__dict__.get( cls_el)\n\t\t\t\t\tif type( method) in [types.FunctionType, staticmethod, classmethod] :\n\t\t\t\t\t\tmethod = this._getfn( method)\n\t\t\t\t\t\tif len( this.get_decorators( method)) > 0:\n\t\t\t\t\t\t\tfname = method.__annotations__[this.NATIVE_FUNCTION].__name__\n\t\t\t\t\t\t\tif fname not in module_names :\n\t\t\t\t\t\t\t\tyield { \"%s.%s\" %(fn.__name__, fname) : fn.__dict__.get( fname) }\n\t\t\t\t\t\t\t\tmodule_names += [fname]",
"def getModules(runName=\"run\", ofClass=None):\n # Container dict for all modules found with a runName function\n modules = {}\n \n # Cycle through all python files, excluding any starting with '_' in this\n # package dir\n for f in os.listdir(os.path.dirname(__file__)):\n # Split into module name and extension\n mod_name, ext = os.path.splitext(f)\n # Must be a .py file and not start with '_'\n if ext != '.py' or mod_name.startswith('_'):\n continue\n # Import the module relative to the current package\n mod = importlib.import_module(\".\"+mod_name, __package__)\n\n # Cycle through all members in the module, looking for the entry point\n # function and subclasses if needed\n members = {'runName': None, 'subClass': []}\n for obj_name, obj in inspect.getmembers(mod):\n # The .getmembers() method returns a tuple with the first element\n # the full member name , and the second the member definition.\n \n # Check for our entry function if we have not found it yet\n if members['runName'] is None and \\\n inspect.isfunction(obj) and \\\n obj.__name__ == runName:\n members['runName'] = obj\n continue\n\n # Check for any subclasses\n if ofClass is not None and \\\n inspect.isclass(obj) and \\\n issubclass(obj, ofClass) and \\\n obj != ofClass:\n members['subClass'].append(obj)\n continue\n\n # Only add this module if we found a runName\n if members['runName'] is not None:\n modules[mod_name] = members\n\n return modules",
"def _get_functions():\n\n # Get all functions that start with _office.\n fcts = {fct_name[len(FCT_PREFIX):]: fct for (fct_name, fct) in\n globals().iteritems() if fct_name.startswith(FCT_PREFIX) and\n hasattr(fct, \"__call__\")}\n\n return fcts",
"def modules():",
"def matches_panic_funcs(name):\n for func in panic_functions:\n if func in name:\n return func\n return \"\"",
"def allFunctions(self):\n\t\tmodulos=sublime.decode_value(open(RutasPython.funciones()).read())\n\t\tlista=[]\n\t\tfor modulo in modulos:\n\t\t\tlista+=[ (funcion+\"\\t•\"+modulo, self.ponerCursor(modulo+\".\"+funcion)) for funcion in modulos[modulo]]\n\t\treturn sorted(lista)",
"def imports():\n for name, val in globals().items():\n if isinstance(val, getattr(types, \"ModuleType\")):\n yield val.__name__",
"def module_functionalities(module: types.ModuleType, MARA_XXX: str, type) -> []:\n if MARA_XXX in dir(module):\n functionalities = getattr(module, MARA_XXX)\n if isinstance(functionalities, typing.Callable):\n functionalities = functionalities()\n if isinstance(functionalities, typing.Dict):\n functionalities = functionalities.values()\n if not isinstance(functionalities, typing.Iterable):\n raise TypeError(\n f'{module.__name__}.{MARA_XXX} should be or return a list or dict of {type.__name__}. Got \"{functionalities}\".')\n for functionality in functionalities:\n if not isinstance(functionality, type):\n raise TypeError(f'In {module.__name__}.{MARA_XXX}: Expected a {type.__name__}, got \"{functionality}\"')\n return functionalities\n else:\n return []",
"def get_rewards():\n this = modules[__name__]\n names, funcs = [], []\n for name, func in inspect.getmembers(this):\n\n # Is a definition a function\n if inspect.isfunction(func):\n # Is defined in this module\n if inspect.getmodule(func) == this:\n names.append(name)\n funcs.append(func)\n\n return tuple(names), tuple(funcs)",
"def funcs_in_script(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n\n N = len(lines)\n funcs = []\n for n in range(N):\n line = lines[n]\n\n ###################################################\n # RETRIEVE FUNCTION NAME #\n ###################################################\n if not line[:4] == 'def ':\n continue\n if not '(' in line:\n continue\n end = line.index('(')\n name = line[4:end]\n\n ###################################################\n # RETRIEVE DOCSTRING HEADER #\n ###################################################\n header = ''\n for m in range(n, N - 1):\n line = lines[m]\n\n # this should not happen (when coded in python syntax, a closing\n # parenthesis must appear first)\n if m > n and line[:4] == 'def ':\n break\n\n # this marks the end of the function definition\n if '):' in line:\n hline = lines[m + 1] # potential docstring header line\n # if it exists, then here\n\n\n # remove leading white spaces:\n while hline[0] == ' ':\n hline = hline[1:]\n\n # check whether it is in fact (the start of) a docstring\n if hline[:3] not in ['\"\"\"', \"'''\"]:\n break\n\n # take the first line of this docstring\n header = hline[3:-1]\n\n # remove docstring closing:\n if header[-3:] in ['\"\"\"', \"'''\"]:\n header = header[:-3]\n\n # ignore outdated functions if labelled as such:\n if header.lower()[:10] == '[outdated]':\n name = None\n if header.lower()[:1] == '*':\n name = None\n break\n\n if name is None:\n continue\n\n funcs.append([name, header])\n\n return funcs",
"def process_module_list(self, modules):",
"def _group_functions(mod):\n groups = {}\n for name, obj in inspect.getmembers(mod):\n if inspect.isbuiltin(obj) or inspect.isfunction(obj):\n if obj.__module__ and obj.__module__ in function_groups:\n try:\n flist = groups[function_groups[obj.__module__]]\n flist.append(obj)\n except KeyError:\n groups[function_groups[obj.__module__]] = [\n obj,\n ]\n else:\n # Special case for _sycl_device_factory\n if (\n obj.__module__ == \"dpctl._sycl_device_factory\"\n and \"select_\" in obj.__name__\n ):\n try:\n flist = groups[\"Device Selection Functions\"]\n flist.append(obj)\n except KeyError:\n groups[\"Device Selection Functions\"] = [\n obj,\n ]\n else:\n try:\n flist = groups[\"Other Functions\"]\n flist.append(obj)\n except KeyError:\n groups[\"Other Functions\"] = [\n obj,\n ]\n return groups",
"def find_test_functions(tree: ast.AST, skip_noqa: bool = False) -> List[ast.FunctionDef]:\n function_finder = TestFuncLister(skip_noqa)\n function_finder.visit(tree)\n return function_finder.get_found_funcs()",
"def fab_steps(module):\n return tuple(getattr(module, func) for func in get_sorted_strnum(dir(module)))",
"def create_included_function_list():\n import iteration_utilities\n from iteration_utilities import Iterable\n from itertools import chain\n from operator import itemgetter\n from astropy.table import Table\n from astropy.io.ascii import RST\n\n it = Iterable(chain(iteration_utilities._cfuncs.__dict__.items(),\n iteration_utilities._helpers._performance.__dict__.items(),\n iteration_utilities._recipes._core.__dict__.items(),\n iteration_utilities._recipes._additional.__dict__.items())\n # Exclude PY2 variable and private functions\n ).filterfalse(lambda x: x[0].startswith(('PY2', '_'))\n # Exclude everything that has no __module__\n ).filter(lambda x: hasattr(x[1], '__module__')\n # Only include functions that come from the package\n ).filter(lambda x: x[1].__module__.startswith('iteration_utilities')\n # Remove duplicate names\n ).unique_everseen(itemgetter(0)\n # Sort lexically\n ).get_sorted(key=lambda x: x[0].lower())\n\n it = Iterable(it\n # Create a Sphinx link from function name and module\n ).map(lambda i: ':py:func:`~{}.{}`'.format(i[1].__module__, i[0])\n # Group into 4s so we get a 4 column Table\n ).grouper(4, fillvalue=''\n # Convert to list because Table expects it.\n ).as_list()\n\n return '\\n'.join(RST().write(Table(rows=it)))",
"def import_function(name: str):\n module_name, function_name = name.rsplit(\".\", 1)\n module = importlib.import_module(module_name)\n return getattr(module, function_name)"
] | [
"0.70581394",
"0.661115",
"0.6598877",
"0.65795875",
"0.6404035",
"0.63618064",
"0.6332475",
"0.6321175",
"0.6285262",
"0.6178784",
"0.6146512",
"0.6116439",
"0.6094806",
"0.60747135",
"0.59446853",
"0.59231937",
"0.59136623",
"0.5896091",
"0.58631843",
"0.58433676",
"0.5836459",
"0.58358794",
"0.58257055",
"0.5822376",
"0.58157593",
"0.5792999",
"0.5789448",
"0.57575285",
"0.5750736",
"0.57268715"
] | 0.7192133 | 0 |
Creates an optimal policy for solving the contextual bandit environment. | def get_contextual_bandit_policy(contextual_bandit_env,
epsilon_explore=0.0,
py=True,
return_distribution=True):
if epsilon_explore < 0 or epsilon_explore > 1:
raise ValueError('Invalid exploration value %f' % epsilon_explore)
optimal_action = np.argmax(contextual_bandit_env.rewards, axis=-1)
policy_distribution = np.ones([
contextual_bandit_env.num_contexts, contextual_bandit_env.num_arms
]) / contextual_bandit_env.num_arms
policy_distribution *= epsilon_explore
policy_distribution[np.arange(policy_distribution.shape[0]),
optimal_action] += 1 - epsilon_explore
def obs_to_index_fn(observation):
if py:
return np.array(observation, dtype=np.int32)
else:
return tf.cast(observation, tf.int32)
if py:
return common_utils.create_py_policy_from_table(policy_distribution,
obs_to_index_fn)
else:
return common_utils.create_tf_policy_from_table(
policy_distribution,
obs_to_index_fn,
return_distribution=return_distribution) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_optimal_policy(self):\n self.Policy = np.argmax(self.Q, axis=1)\n if self.mode == 'debug':\n print(\"Optimal Policy:\",self.Policy)",
"def getPolicy(self, state):\n \"\"\"Description:\n Find all of q-values of current state, and choose the action \n with the hight q-value as optimal policy\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n legalActions = self.getLegalActions(state)\n action = None\n policy = util.Counter() # use counter to store action and its q-value\n \n if len(legalActions) == 0:\n return action\n \n for a in legalActions:\n policy[a] = self.getQValue(state, a)\n action = policy.argMax()\n return action\n\n \"\"\" END CODE \"\"\"",
"def maximize(self, budget, optimizer):\n\n\t\tpass",
"def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy",
"def _set_up_acq_opt_rand(self):\n def _random_max_wrap(*args):\n \"\"\" A wrapper so as to only return optimal point.\"\"\"\n _, opt_pt = random_maximise(*args)\n return opt_pt\n # Set this up in acq_optimise\n self.acq_optimise = lambda obj, max_evals: _random_max_wrap(obj, self.domain_bounds,\n max_evals)\n if self.get_acq_opt_max_evals is None:\n lead_const = 10 * min(5, self.domain_dim)**2\n self.get_acq_opt_max_evals = lambda t: np.clip(\n lead_const * np.sqrt(min(t, 1000)), 2000, 3e4)\n # Acquisition function should be evaluated via multiple evaluations\n self.acq_query_type = 'multiple'",
"def fit(self):\n if self.minimizer == \"differential_evolution\":\n kwargs = {\"maxiter\": self._maxiter}\n elif self.minimizer == \"shgo\":\n kwargs = {\"options\": {\"maxiter\": self._maxiter,\n \"jac\": self.cost_func.jac_cost}}\n elif self.minimizer == \"dual_annealing\":\n kwargs = {\"maxiter\": self._maxiter, \"local_search_options\": {\n \"jac\": self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif \"Maximum number of iteration\" in result.message:\n self._status = 1\n else:\n self._status = 2",
"def __init__(self, policy_to_optimize, logging_policy, click_model,\n eta=1.0):\n super().__init__(policy_to_optimize=policy_to_optimize)\n with super().init_scope():\n self.bandify = RankingBandify(logging_policy, click_model)\n self.propensity_estimator = PresentationBiasPropensityEstimator(eta)",
"def update_policy(env, policy, V, discount_factor):\n\n for state in range(env.nS):\n # for a given state compute state-action value.\n action_values = one_step_lookahead(env, state, V, discount_factor)\n\n # choose the action which maximizes the state-action value.\n policy[state] = np.argmax(action_values)\n\n return policy",
"def minimize(self):\n self.normalize()\n p0s = self.spacedvals(method='random')\n if self.n_spots > 1:\n opts = self.multifit(p0s)\n else:\n opts = self.singlefit(p0s)\n self.yf = [self.solve(theta) for theta in opts]\n self.bestps = opts\n return opts",
"def solve(self):\n\n if self.optimizer == 'pulp':\n for constraint in self.constraints:\n self.engine_model += constraint\n\n self.engine_model += self.objective\n status = self.engine_model.solve(PULP_CBC_CMD(msg=False))\n solution = (\n np.vectorize(self._var_sol)(self.variable_set)\n if status == LpStatusOptimal\n else np.array([])\n )\n\n else:\n for constraint in self.constraints:\n self.engine_model.addConstr(constraint)\n\n self.engine_model.setObjective(self.objective, self.sense)\n self.engine_model.optimize()\n solution = (\n np.vectorize(self._var_sol)(self.variable_set)\n if self.engine_model.status == GRB.OPTIMAL\n else np.array([])\n )\n\n return solution",
"def get_softmax_policy(intensity_of_choice):\n beta = intensity_of_choice\n \n def softmax_policy(Qvalues_oa):\n \"\"\"Returns softmax action probabilites from Qvalues\"\"\"\n betaQoa = beta * Qvalues_oa\n betaQoa_ = betaQoa - betaQoa.mean(-1, keepdims=True)\n expQoa = np.exp(betaQoa_)\n assert not np.any(np.isinf(expQoa)), \"behavior policy contains infs\"\n return expQoa / expQoa.sum(axis=-1, keepdims=True)\n \n return softmax_policy",
"def create_greedy_policy(self):\n\n def policy_fn(state):\n return self.actor_baseline.predict([[state]])[0][0]\n\n return policy_fn",
"def get_dqn_policy(locals_: dict = {}, globals_: dict = {}):\n from attackgraph.rl.dqn.dqn import DQN\n best_responder = DQN(\n is_attacker=locals_[\"training_attacker\"],\n input_size=locals_[\"observation_space\"],\n hidden_sizes=gin.REQUIRED,\n output_size=locals_[\"n_actions\"],\n parameter_noise=locals_[\"self\"].param_noise)\n return best_responder",
"def create_optimizer(self, context, optimizer, host):\n pass",
"def optimize(self):\n # Loop through every WD and WS individually\n wd_array = self.fi_subset.floris.flow_field.wind_directions\n ws_array = self.fi_subset.floris.flow_field.wind_speeds\n for nwsi, ws in enumerate(ws_array):\n\n self.fi_subset.reinitialize(wind_speeds=[ws])\n\n for nwdi, wd in enumerate(wd_array):\n # Find turbines to optimize\n turbs_to_opt = self._turbs_to_opt_subset[nwdi, nwsi, :]\n if not any(turbs_to_opt):\n continue # Nothing to do here: no turbines to optimize\n\n # Extract current optimization problem variables (normalized)\n yaw_lb = self._minimum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n yaw_ub = self._maximum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n bnds = [(a, b) for a, b in zip(yaw_lb, yaw_ub)]\n x0 = self._x0_subset_norm[nwdi, nwsi, turbs_to_opt]\n\n J0 = self._farm_power_baseline_subset[nwdi, nwsi]\n yaw_template = self._yaw_angles_template_subset[nwdi, nwsi, :]\n turbine_weights = self._turbine_weights_subset[nwdi, nwsi, :]\n yaw_template = np.tile(yaw_template, (1, 1, 1))\n turbine_weights = np.tile(turbine_weights, (1, 1, 1))\n\n # Define cost function\n def cost(x):\n x_full = np.array(yaw_template, copy=True)\n x_full[0, 0, turbs_to_opt] = x * self._normalization_length\n return (\n - 1.0 * self._calculate_farm_power(\n yaw_angles=x_full,\n wd_array=[wd],\n turbine_weights=turbine_weights\n )[0, 0] / J0\n )\n\n # Perform optimization\n residual_plant = minimize(\n fun=cost,\n x0=x0,\n bounds=bnds,\n method=self.opt_method,\n options=self.opt_options,\n )\n\n # Undo normalization/masks and save results to self\n self._farm_power_opt_subset[nwdi, nwsi] = -residual_plant.fun * J0\n self._yaw_angles_opt_subset[nwdi, nwsi, turbs_to_opt] = (\n residual_plant.x * self._normalization_length\n )\n\n # Finalize optimization, i.e., retrieve full solutions\n df_opt = self._finalize()\n return df_opt",
"def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n def policyFunction(state):\n\n Action_probabilities = np.ones(num_actions,\n dtype = float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction",
"def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n\n def policyFunction(state):\n Action_probabilities = np.ones(num_actions,\n dtype=float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction",
"def compute_optimal_policy(self):\n\n self.theta_history.append(self.theta)\n\n since = time()\n for it in range(self.n_itr):\n print(\"lr: {} | Iteration N: {} \\r\".format(self.lr, it), end=\"\")\n\n self.policy = GaussianPolicy(self.theta, self.sigma)\n\n # Simulate N trajectories\n paths = collect_episodes(\n self.sim, policy=self.policy, horizon=self.T, n_episodes=self.n_episodes)\n\n avg_return = self._compute_performance(paths=paths)\n self.avg_returns.append(avg_return)\n\n # Gradient update\n self.theta += self.update_rule(self.policy.grad_J(\n paths, self.discounts, n_ep=self.n_episodes, T=self.T), lr=self.lr)\n\n # History update\n self.theta_history.append(self.theta)\n\n # print(\"\\nTook {}s\".format(round(time() - since, 2)))\n print(\"lr: {} | Iteration N: {} | Took: {}s\".format(self.lr, self.n_itr, round(time() - since, 2)))",
"def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val",
"def build_problem(forecast, parameters):\n # --------------------------------------------------------------------------\n # CONSTANTS\n # --------------------------------------------------------------------------\n\n # length of optimization window, in time steps\n NN = len(forecast)\n # big number for penalties which approximate constraints\n BIGM = 1e4\n\n # number of pieces in piecewise-linear efficiency curves\n KK = 5\n\n # --------------------------------------------------------------------------\n # PRE-PROCESS FORECASTS & PARAMETERS\n # --------------------------------------------------------------------------\n\n # convert forecast from list of dicts to dict of lists\n _forecast = {}\n for f_c in forecast:\n for key, value in list(f_c.items()):\n try:\n _forecast[key].append(value)\n except KeyError:\n _forecast[key] = [value]\n forecast = _forecast\n\n # # uncomment to log forecasts and parameters\n # LOG.debug(\"============================================================\")\n # for k, v in forecast.items():\n # LOG.debug((k, v))\n # LOG.debug(\"============================================================\")\n # import json\n # LOG.debug(json.dumps(parameters, indent=4, sort_keys=True))\n # LOG.debug(\"============================================================\")\n\n component_names = sorted(list(parameters[\"IOComponent\"].keys()))\n component_para = OrderedDict()\n for name in component_names:\n component_para[name] = parameters[\"IOComponent\"][name]\n\n storage_para = OrderedDict()\n elec_storage_names = sorted(list(parameters.get(\"battery\", {}).keys()))\n for name in elec_storage_names:\n _params = parameters[\"battery\"][name]\n storage_para[name] = Storage(\n emax=_params[\"capacity\"],\n pmax=_params[\"max_power\"],\n eta_ch=_params[\"charge_eff\"],\n eta_disch=_params[\"discharge_eff\"],\n soc_min=_params[\"min_soc\"],\n soc_max=_params[\"max_soc\"],\n now_soc=_params[\"soc\"],\n name=name,\n )\n\n heat_storage_names = sorted(list(parameters.get(\"thermal_storage\", {}).keys()))\n for name in heat_storage_names:\n _params = parameters[\"thermal_storage\"][name]\n storage_para[name] = Storage(\n emax=_params[\"heat_cap\"],\n pmax=_params[\"max_power\"],\n eta_ch=_params[\"eff\"],\n eta_disch=_params[\"eff\"],\n now_soc=_params[\"soc\"],\n name=name,\n )\n\n # --------------------------------------------------------------------------\n # INDEXES\n # --------------------------------------------------------------------------\n\n index_hour = (list(range(NN)),)\n index_without_first_hour = (list(range(1, NN)),)\n\n index_storage = elec_storage_names + heat_storage_names, list(range(NN))\n index_component = component_names, list(range(NN))\n index_component_piecewise = component_names, list(range(NN)), list(range(KK))\n\n index_ramp_up = ([name for name in component_names if component_para[name][\"ramp_up\"] is not None],)\n index_ramp_down = ([name for name in component_names if component_para[name][\"ramp_down\"] is not None],)\n\n index_gas_in = [name for name in component_names if component_para[name][\"input_commodity\"] == \"gas\"]\n # no component outputs gas\n index_elec_in = [name for name in component_names if component_para[name][\"input_commodity\"] == \"elec\"]\n index_elec_out = [name for name in component_names if component_para[name][\"output_commodity\"] == \"elec\"]\n index_cool_in = [name for name in component_names if component_para[name][\"input_commodity\"] == \"cool\"]\n index_cool_out = [name for name in component_names if component_para[name][\"output_commodity\"] == \"cool\"]\n index_heat_in = [name for name in component_names if component_para[name][\"input_commodity\"] == \"heat\"]\n index_heat_out = [name for name in component_names if component_para[name][\"output_commodity\"] == \"heat\"]\n\n # --------------------------------------------------------------------------\n # VARIABLES\n # --------------------------------------------------------------------------\n\n # market commodities\n elec_from_grid = VariableGroup(\"elec_from_grid\", indexes=index_hour)\n elec_to_grid = VariableGroup(\"elec_to_grid\", indexes=index_hour)\n\n # include these in balances with high penalty to ensure feasibility\n # if these are nonzero, problem definition is likely wrong\n elec_unserve = VariableGroup(\"elec_unserve\", indexes=index_hour)\n elec_dump = VariableGroup(\"elec_dump\", indexes=index_hour)\n heat_unserve = VariableGroup(\"heat_unserve\", indexes=index_hour)\n heat_dump = VariableGroup(\"heat_dump\", indexes=index_hour)\n cool_unserve = VariableGroup(\"cool_unserve\", indexes=index_hour)\n cool_dump = VariableGroup(\"cool_dump\", indexes=index_hour)\n\n # for heat recovery unit accounting\n heat_hru_out = VariableGroup(\"heat_hru_out\", indexes=index_hour)\n\n # IO Components\n component_input = VariableGroup(\"component_input\", indexes=index_component)\n component_output = VariableGroup(\"component_output\", indexes=index_component)\n component_output_k = VariableGroup(\"component_output_k\", indexes=index_component_piecewise)\n component_status = VariableGroup(\n \"component_status\", indexes=index_component, upper_bound_func=constant(1), is_integer_var=True\n )\n component_status_k = VariableGroup(\n \"component_status_k\", indexes=index_component_piecewise, upper_bound_func=constant(1), is_integer_var=True\n )\n component_start = VariableGroup(\n \"component_start\", indexes=index_component, upper_bound_func=constant(1), is_integer_var=True\n )\n\n # storage\n def storage_upper_bound(index):\n \"\"\"storage (dis)charge upper bound is max power\"\"\"\n i = index[0]\n return storage_para[i].pmax\n\n storage_disch = VariableGroup(\"storage_disch\", indexes=index_storage, upper_bound_func=storage_upper_bound)\n storage_ch = VariableGroup(\"storage_ch\", indexes=index_storage, upper_bound_func=storage_upper_bound)\n\n def storage_state_lb(index):\n \"\"\"storage state lower bound is minimum state of charge * capacity\"\"\"\n i = index[0]\n return storage_para[i].emax * storage_para[i].soc_min\n\n def storage_state_ub(index):\n \"\"\"storage state upper bound is maximum state of charge * capacity\"\"\"\n i = index[0]\n return storage_para[i].emax * storage_para[i].soc_max\n\n storage_state = VariableGroup(\n \"storage_state\", indexes=index_storage, lower_bound_func=storage_state_lb, upper_bound_func=storage_state_ub\n )\n\n # --------------------------------------------------------------------------\n # CONSTRAINTS\n # --------------------------------------------------------------------------\n\n # constraints should take the form `(con, name)` where `con` is a boolean\n # statement containing PuLP variables and `name` is a string label\n\n constraints = []\n\n def add_constraint(name, indexes, constraint_func):\n \"\"\"Add a constraint for each index in a group\n\n :param name: base for constraint names\n :param indexes: iterable of indexes to apply constraints to\n :param constraint_func: applied to each element in product of indexes\n \"\"\"\n name_base = name\n for _ in range(len(indexes)):\n name_base += \"_{}\"\n\n for index in itertools.product(*indexes):\n name = name_base.format(*index)\n con = constraint_func(index)\n\n constraints.append((con, name))\n\n #################\n # Energy Balances\n #################\n\n def elec_balance(index):\n \"\"\"Balance electricity supply/demand for each timestep\"\"\"\n t = index[0]\n return (\n pulp.lpSum([component_output[i, t] for i in index_elec_out])\n - pulp.lpSum([component_input[i, t] for i in index_elec_in])\n + elec_from_grid[t]\n - elec_to_grid[t]\n + pulp.lpSum([storage_disch[i, t] for i in elec_storage_names])\n - pulp.lpSum([storage_ch[i, t] for i in elec_storage_names])\n + elec_unserve[t]\n - elec_dump[t]\n == forecast[\"elec_load\"][t]\n )\n\n add_constraint(\"elec_balance\", index_hour, elec_balance)\n\n def heat_balance(index):\n \"\"\"Balance heat supply/demand for each timestep\"\"\"\n t = index[0]\n return (\n heat_hru_out[t]\n + pulp.lpSum([component_output[i, t] for i in index_heat_out])\n - pulp.lpSum([component_input[i, t] for i in index_heat_in])\n + heat_unserve[t]\n - heat_dump[t]\n == forecast[\"heat_load\"][t]\n )\n\n add_constraint(\"heat_balance\", index_hour, heat_balance)\n\n def cool_balance(index):\n \"\"\"Balance cool supply/demand for each timestep\"\"\"\n t = index[0]\n return (\n pulp.lpSum([component_output[i, t] for i in index_cool_out])\n - pulp.lpSum([component_input[i, t] for i in index_cool_in])\n + pulp.lpSum([storage_disch[i, t] for i in heat_storage_names])\n - pulp.lpSum([storage_ch[i, t] for i in heat_storage_names])\n + cool_unserve[t]\n - cool_dump[t]\n == forecast[\"cool_load\"][t]\n )\n\n add_constraint(\"cool_balance\", index_hour, cool_balance)\n\n ##################\n # Storage Behavior\n ##################\n\n storage_start_state = {i: storage_para[i].now_soc * storage_para[i].emax for i in index_storage[0]}\n\n def storage_init(index):\n \"\"\"Storage balance at first timestep\"\"\"\n i, t = index[0], 0\n return (\n storage_state[i, t]\n == storage_start_state[i]\n + storage_para[i].eta_ch * storage_ch[i, t]\n - 1 / storage_para[i].eta_disch * storage_disch[i, t]\n )\n\n add_constraint(\"storage_init\", (index_storage[0],), storage_init)\n\n def storage_state_constraint(index):\n \"\"\"Storage balance at 0<t<N-1\"\"\"\n i, t = index\n return (\n storage_state[i, t]\n == storage_state[i, t - 1]\n + storage_para[i].eta_ch * storage_ch[i, t]\n - 1 / storage_para[i].eta_disch * storage_disch[i, t]\n )\n\n add_constraint(\"storage_state_constraint\", (index_storage[0],) + index_without_first_hour, storage_state_constraint)\n\n def storage_final(index):\n \"\"\"Storage balance at last timestep\n\n Forces storage[N-1] >= storage[0]\n \"\"\"\n i, t = index[0], NN - 1\n return storage_state[i, t] >= storage_start_state[i]\n\n add_constraint(\"storage_final\", (index_storage[0],), storage_final)\n\n #######################\n # IO Component Behavior\n #######################\n\n def component_input_constraint(index):\n \"\"\"Input is piecewise-linear function of output\"\"\"\n i, t = index\n return component_input[i, t] == [\n a * v for a, v in zip(component_para[i][\"fundata\"][\"a\"], component_output_k[i, t, RANGE])\n ] + [b * v for b, v in zip(component_para[i][\"fundata\"][\"b\"], component_status_k[i, t, RANGE])]\n\n add_constraint(\"component_input_constraint\", index_component, component_input_constraint)\n\n def component_output_constraint(index):\n \"\"\"Output is sum of output pieces\"\"\"\n i, t = index\n return component_output[i, t] == pulp.lpSum(component_output_k[i, t, RANGE])\n\n add_constraint(\"component_output_constraint\", index_component, component_output_constraint)\n\n def component_piece_lower(index):\n \"\"\"Lower bounds of output pieces\"\"\"\n i, t, k = index\n xmin = component_para[i][\"fundata\"][\"min\"][k]\n return xmin * component_status_k[i, t, k] <= component_output_k[i, t, k]\n\n add_constraint(\"component_piece_lower\", index_component_piecewise, component_piece_lower)\n\n def component_piece_upper(index):\n \"\"\"Upper bounds of output pieces\"\"\"\n i, t, k = index\n xmax = component_para[i][\"fundata\"][\"max\"][k]\n return xmax * component_status_k[i, t, k] >= component_output_k[i, t, k]\n\n add_constraint(\"component_piece_upper\", index_component_piecewise, component_piece_upper)\n\n def component_status_constraint(index):\n \"\"\"Status is sum of status pieces\"\"\"\n i, t = index\n return component_status[i, t] == pulp.lpSum(component_status_k[i, t, RANGE])\n\n add_constraint(\"component_status_constraint\", index_component, component_status_constraint)\n\n def component_start_status_init(index):\n \"\"\"Whether component starts up on first timestep\"\"\"\n i, t = index[0], 0\n status = component_para[i][\"command_history\"][-1]\n return component_start[i, t] >= component_status[i, t] - status\n\n add_constraint(\"component_start_status_init\", (component_names,), component_start_status_init)\n\n def component_start_status(index):\n \"\"\"Whether component starts up on t>0\"\"\"\n i, t = index\n return component_start[i, t] >= component_status[i, t] - component_status[i, t - 1]\n\n add_constraint(\"component_start_status\", (component_names,) + index_without_first_hour, component_start_status)\n\n def component_ramp_up_init(index):\n \"\"\"Do not increase output by too much on first timestep\"\"\"\n i, t = index[0], 0\n ramp_up = component_para[i][\"ramp_up\"]\n output_init = component_para[i][\"output\"]\n return component_output[i, t] <= output_init + ramp_up\n\n add_constraint(\"component_ramp_up_init\", index_ramp_up, component_ramp_up_init)\n\n def component_ramp_up(index):\n \"\"\"Do not increase output by too much for t>0\"\"\"\n i, t = index\n ramp_up = component_para[i][\"ramp_up\"]\n return component_output[i, t] <= component_output[i, t - 1] + ramp_up\n\n add_constraint(\"component_ramp_up\", index_ramp_up + index_without_first_hour, component_ramp_up)\n\n def component_ramp_down_init(index):\n \"\"\"Ramp down constraint on first timestep\"\"\"\n i, t = index[0], 0\n ramp_down = component_para[i][\"ramp_down\"]\n output_init = component_para[i][\"output\"]\n return output_init + ramp_down <= component_output[i, t]\n\n add_constraint(\"component_ramp_down_init\", index_ramp_down, component_ramp_down_init)\n\n def component_ramp_down(index):\n \"\"\"Do not decrease output by too much for t>0\"\"\"\n i, t = index\n ramp_down = component_para[i][\"ramp_down\"]\n return component_output[i, t - 1] + ramp_down <= component_output[i, t]\n\n add_constraint(\"component_ramp_down\", index_ramp_down + index_without_first_hour, component_ramp_down)\n\n # it was easier to define these constraints in a for-loop instead of with\n # the `add_constraint` function, but either could be used.\n name = \"component_lock_on_{}_{}\"\n for i in component_names:\n history = component_para[i][\"command_history\"]\n min_on = component_para[i][\"min_on\"]\n if min_on == 0:\n continue\n # history all from parameters\n for t in range(0, 1):\n con = min_on * (history[-1] - component_status[i, t]) <= pulp.lpSum(history[NN + t - min_on : NN])\n constraints.append((con, name.format(i, t)))\n # history partially from parameters, partially decision\n for t in range(1, min_on):\n con = min_on * (component_status[i, t - 1] - component_status[i, t]) <= pulp.lpSum(\n [component_status[i, tau] for tau in range(0, t)]\n ) + pulp.lpSum(history[NN + t - min_on : NN])\n constraints.append((con, name.format(i, t)))\n # history all decision\n for t in range(min_on, NN):\n con = min_on * (component_status[i, t - 1] - component_status[i, t]) <= pulp.lpSum(\n [component_status[i, tau] for tau in range(t - min_on, t)]\n )\n constraints.append((con, name.format(i, t)))\n\n name = \"component_lock_off_{}_{}\"\n for i in component_names:\n history = component_para[i][\"command_history\"]\n min_off = component_para[i][\"min_off\"]\n if min_off == 0:\n continue\n # history all from parameters\n for t in range(0, 1):\n con = min_off * (component_status[i, t] - history[-1]) <= pulp.lpSum(\n [1 - v for v in history[NN + t - min_off : NN]]\n )\n constraints.append((con, name.format(i, t)))\n # history partially from parameters, partially decision\n for t in range(1, min_off):\n con = min_off * (component_status[i, t] - component_status[i, t - 1]) <= pulp.lpSum(\n [1 - component_status[i, tau] for tau in range(0, t)]\n ) + pulp.lpSum([1 - v for v in history[NN + t - min_off : NN]])\n constraints.append((con, name.format(i, t)))\n # history all decision\n for t in range(min_off, NN):\n con = min_off * (component_status[i, t] - component_status[i, t - 1]) <= pulp.lpSum(\n [1 - component_status[i, tau] for tau in range(t - min_off, t)]\n )\n constraints.append((con, name.format(i, t)))\n\n def hru_limit(index):\n \"\"\"Heat recovery unit behavior\"\"\"\n t = index[0]\n partial = []\n for name in component_names:\n eff = component_para[name][\"hru_eff\"]\n if eff == 0:\n continue\n convert = component_para[name][\"hru_convert\"]\n partial.append(eff * (component_input[name, t] - convert * component_output[name, t]))\n return heat_hru_out[t] <= pulp.lpSum(partial)\n\n add_constraint(\"hru_limit\", index_hour, hru_limit)\n\n # --------------------------------------------------------------------------\n # OBJECTIVE FUNCTION\n # --------------------------------------------------------------------------\n\n objective_components = []\n\n for var, _lambda in zip(elec_from_grid[RANGE], forecast[\"electricity_cost\"]):\n objective_components.append(var * _lambda)\n\n # # uncomment to sell electricity at purchase price\n # for var, _lambda in zip(elec_to_grid[RANGE],\n # forecast[\"electricity_cost\"]):\n # objective_components.append(var * _lambda * -1.)\n\n for i in index_gas_in:\n for var, _lambda in zip(component_input[i, RANGE], forecast[\"natural_gas_cost\"]):\n objective_components.append(var * _lambda)\n\n for i in component_names:\n for var in component_start[i, RANGE]:\n objective_components.append(var * component_para[i][\"start_cost\"])\n\n for i in component_names:\n for var in component_status[i, RANGE]:\n objective_components.append(var * component_para[i][\"run_cost\"])\n\n for group in (elec_unserve, elec_dump, heat_unserve, heat_dump, cool_unserve, cool_dump):\n for var in group[RANGE]:\n objective_components.append(var * BIGM)\n\n # --------------------------------------------------------------------------\n # BUILD PROBLEM\n # --------------------------------------------------------------------------\n\n prob = pulp.LpProblem(\"Economic-Dispatch-Optimization\", pulp.LpMinimize)\n prob += pulp.lpSum(objective_components), \"Objective Function\"\n\n for con in constraints:\n try:\n prob += con\n except TypeError as ex:\n LOG.error(con)\n LOG.error(type(con[0]))\n LOG.error(\"PuLP variable problem with constraint {}: {}\" \"\".format(con[1], con[0]))\n raise ex\n\n return prob",
"def __init__(self,\n weight_decay,\n global_step,\n max_matrix_size=768,\n gbar_decay=0.0,\n gbar_weight=1.0,\n mat_gbar_decay=1.0,\n mat_gbar_weight=1.0,\n learning_rate=1.0,\n svd_interval=1,\n precond_update_interval=1,\n epsilon=1e-4,\n alpha=0.5,\n use_iterative_root=False,\n use_locking=False,\n name=\"ShampooW\"):\n super(ShampooWOptimizer, self).__init__(\n weight_decay,\n global_step=global_step,\n max_matrix_size=max_matrix_size,\n gbar_decay=gbar_decay,\n gbar_weight=gbar_weight,\n mat_gbar_decay=mat_gbar_weight,\n learning_rate=learning_rate,\n svd_interval=svd_interval,\n precond_update_interval=precond_update_interval,\n epsilon=epsilon,\n alpha=alpha,\n use_iterative_root=use_iterative_root,\n use_locking=use_locking,\n name=name)",
"def _create_objective(self, meta, m):\n ## cashflow eval\n rule = partial(self._cashflow_rule, meta)\n m.obj = pyo.Objective(rule=rule, sense=pyo.maximize)",
"def __init__(self, graph: Graph, code=qubit, energies=(1,), cost_function=True, use_Z2_symmetry=False):\n # If MIS is true, create an MIS Hamiltonian. Otherwise, make a MaxCut Hamiltonian\n self.code = code\n self.energies = energies\n # Make sure all edges have weight attribute; default to 1\n\n self.graph = graph\n self.optimization = 'max'\n self.n = self.graph.n\n if use_Z2_symmetry:\n c = np.zeros([self.code.d ** (self.code.n * (self.n - 1)), 1])\n else:\n c = np.zeros([self.code.d ** (self.code.n * self.n), 1])\n if tools.is_diagonal(self.code.Z):\n self._is_diagonal = True\n z = np.expand_dims(np.diagonal(self.code.Z), axis=0).T\n\n def my_eye(n):\n return np.ones((np.asarray(self.code.d ** self.code.n) ** n, 1))\n else:\n self._is_diagonal = False\n # Compute the optimum first. We don't care that this takes extra time, since it only needs to run once\n z = np.expand_dims(np.diagonal(qubit.Z), axis=0).T\n\n def my_eye(n):\n return np.ones((np.asarray(qubit.d) ** n, 1))\n\n for a, b in self.graph.edges:\n if b < a:\n a, b = b, a\n if use_Z2_symmetry:\n if cost_function:\n if a == min(self.graph.nodes):\n c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(b - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n - 1))\n else:\n c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(a - 1), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n - 1))\n else:\n if a == min(self.graph.nodes):\n c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(b - 1), z, my_eye(self.n - b - 1)]))\n else:\n c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(a - 1), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]))\n else:\n if cost_function:\n c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(a), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n))\n else:\n c = c + graph.graph[a][b]['weight'] * tools.tensor_product(\n [my_eye(a), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)])\n self._optimum = np.max(c).real\n c = sparse.csr_matrix((self.code.d ** (self.code.n * self.n), self.code.d ** (self.code.n * self.n)))\n\n z = sparse.csr_matrix(self.code.Z)\n\n def my_eye(n):\n return sparse.csr_matrix(np.ones(np.asarray(z.shape[0]) ** n),\n (np.asarray(z.shape[0]) ** n, np.asarray(z.shape[0]) ** n))\n\n for a, b in self.graph.edges:\n if b < a:\n a, b = b, a\n\n if cost_function:\n if use_Z2_symmetry:\n if a == min(self.graph.nodes):\n c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(b - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n - 1))\n else:\n c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(a - 1), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]) - my_eye(self.n - 1))\n else:\n c = c - 1 / 2 * graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(a), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)],\n sparse=(not self._is_diagonal)) - my_eye(\n self.n))\n else:\n if use_Z2_symmetry:\n if a == min(self.graph.nodes):\n c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(b - 1), z, my_eye(self.n - b - 1)]))\n else:\n c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(a - 1), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)]))\n\n else:\n c = c + graph.graph[a][b]['weight'] * (tools.tensor_product(\n [my_eye(a), z, my_eye(b - a - 1), z, my_eye(self.n - b - 1)],\n sparse=(not self._is_diagonal)))\n if self._is_diagonal:\n self._diagonal_hamiltonian = c\n self._optimum = np.max(c).real\n if use_Z2_symmetry:\n c = sparse.csr_matrix((c.flatten(), (np.arange(self.code.d ** (self.code.n * (self.n - 1))),\n np.arange(self.code.d ** (self.code.n * (self.n - 1))))),\n shape=(self.code.d ** (self.code.n * (self.n - 1)),\n self.code.d ** (self.code.n * (self.n - 1))))\n else:\n c = sparse.csr_matrix((c.flatten(), (np.arange(self.code.d ** (self.code.n * self.n)),\n np.arange(self.code.d ** (self.code.n * self.n)))),\n shape=(self.code.d ** (self.code.n * self.n),\n self.code.d ** (self.code.n * self.n)))\n else:\n # c is already the right shape, just convert it to a csr matrix\n c = sparse.csr_matrix(c)\n self._hamiltonian = c\n self._left_acting_hamiltonian = None\n self._right_acting_hamiltonian = None",
"def __init__(self, optimizer='BFGS', optimizer_kwargs=None,\n lossprime=True, max_iterations = 1000000):\n\n user_kwargs = optimizer_kwargs\n optimizer_kwargs = {}\n print(f\"in {optimizer}: max_iterations = {max_iterations}\")\n if optimizer == 'BFGS':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method' : 'BFGS',\n 'options': {'gtol': 1e-15,\n 'maxiter': max_iterations}\n }\n #optimizer_kwargs = {'method':'BFGS', 'gtol': 1e-15, }\n elif optimizer == 'L-BFGS-B':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'L-BFGS-B',\n 'options': {'ftol': 1e-05,\n 'gtol': 1e-08,\n 'maxfun': max_iterations,\n 'maxiter': max_iterations}\n }\n import scipy\n from distutils.version import StrictVersion\n if StrictVersion(scipy.__version__) >= StrictVersion('0.17.0'):\n optimizer_kwargs['options']['maxls'] = 2000\n elif optimizer == 'TNC':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'TNC',\n 'options': {'ftol': 0.,\n 'xtol': 0.,\n 'gtol': 1e-08,\n 'maxiter': max_iterations, }\n }\n elif optimizer == 'Newton-CG':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'Newton-CG',\n 'options': {'xtol': 1e-15,\n 'maxiter': max_iterations,}\n }\n\n elif optimizer == 'Nelder-Mead':\n from scipy.optimize import minimize as optimizer\n optimizer_kwargs = {\n 'method': 'Nelder-Mead',\n 'options': {'maxfun': max_iterations,\n 'maxiter': max_iterations, }\n }\n lossprime = False\n\n if user_kwargs:\n optimizer_kwargs.update(user_kwargs)\n self.optimizer = optimizer\n self.optimizer_kwargs = optimizer_kwargs\n self.lossprime = lossprime",
"def optimizer(self) -> Optimizer | Minimizer:\n return self._optimizer",
"def _inst_optimizer(self):\n optimizer = Optimizers(self.m_cfg['configs']['lr_politics']['optimizer']).value\n lr_schedule = self.m_cfg['configs']['lr_politics']['lr']\n opt = optimizer(learning_rate=lr_schedule)\n return opt",
"def compute_heuristic(self, state):\n if self._shape_reward_mode == \"optimal\":\n problem = self.problems[self._problem_idx]\n\n # Add action literals to state to enable planning\n state_lits = set(state.literals)\n action_lits = set(\n self.action_space.all_ground_literals(state, valid_only=False))\n state_lits |= action_lits\n\n problem_path = \"\"\n try:\n # generate a temporary file to hand over to the external planner\n fd, problem_path = tempfile.mkstemp(dir=TMP_PDDL_DIR, text=True)\n with os.fdopen(fd, \"w\") as f:\n problem.write(f, initial_state=state_lits, fast_downward_order=True)\n\n return get_fd_optimal_plan_cost(\n self.domain.domain_fname, problem_path)\n finally:\n try:\n os.remove(problem_path)\n except FileNotFoundError:\n pass\n else:\n return self._heuristic(state)",
"def _get_optimizer(self):\n raise NotImplementedError",
"def _optimize(self, objective):\n # Initial value\n initial = self.get_initial()[0]\n\n if self.vector_to_matrix_transform is not None:\n initial = self.vector_to_matrix_transform(initial)\n\n if self.solver_type is 'NelderMead' or self.solver_type is 'ParticleSwarm':\n initial = None\n\n # Create tensorflow variable\n if self.matrix_manifold_dimension is None:\n x_tf = tf.Variable(tf.zeros(self.dimension, dtype=tf.float64))\n else:\n x_tf = tf.Variable(tf.zeros([self.matrix_manifold_dimension, self.matrix_manifold_dimension], dtype=tf.float64))\n\n # Cost function for pymanopt\n def objective_fct(x):\n if self.matrix_to_vector_transform_tf is not None:\n # Reshape x from matrix to vector form to compute the objective function (tensorflow format)\n x = self.matrix_to_vector_transform_tf(x, self.matrix_manifold_dimension)\n return objective(x)[0]\n\n # Transform the cost function to tensorflow function\n cost = tf.py_function(objective_fct, [x_tf], tf.float64)\n\n # Gradient function for pymanopt\n def objective_grad(x):\n if self.matrix_to_vector_transform is not None:\n # Reshape x from matrix to vector form to compute the gradient\n x = self.matrix_to_vector_transform(x)\n\n # Compute the gradient\n grad = np.array(objective(x)[1])[0]\n\n if self.vector_to_matrix_transform is not None:\n # Reshape the gradient in matrix form for the optimization on the manifold\n grad = self.vector_to_matrix_transform(grad)\n return grad\n\n # Define pymanopt problem\n problem = pyman.Problem(manifold=self.manifold, cost=cost, egrad=objective_grad, arg=x_tf, verbosity=2)\n\n # Optimize the parameters of the problem\n opt_x, opt_log = self.solver.solve(problem, x=initial)\n\n if self.matrix_to_vector_transform is not None:\n # Reshape the optimum from matrix to vector form\n opt_x = self.matrix_to_vector_transform(opt_x)\n\n # Format the result to fit with GPflowOpt\n result = sc_opt.OptimizeResult(x=opt_x, fun=opt_log['final_values']['f(x)'], nit=opt_log['final_values']['iterations'], message=opt_log['stoppingreason'], success=True)\n\n return result",
"def _optimise_acquisition(acq_fn, acq_optimiser, anc_data):\n return acq_optimiser(acq_fn, anc_data.max_evals)"
] | [
"0.64927095",
"0.60700333",
"0.6013502",
"0.5950279",
"0.5819569",
"0.58070433",
"0.5794501",
"0.5701989",
"0.5638407",
"0.55973166",
"0.5588712",
"0.55322045",
"0.5500936",
"0.5482766",
"0.5460455",
"0.54048747",
"0.53948706",
"0.53906006",
"0.5379852",
"0.5373095",
"0.53423756",
"0.5285699",
"0.5271091",
"0.52700436",
"0.52684736",
"0.5254355",
"0.52432144",
"0.5242013",
"0.52377224",
"0.5236926"
] | 0.6236816 | 1 |
Wrapper for tf.matmul (sparse vs dense). | def dot(x,y,sparse=False):
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dot(x, y, sparse=False):\n if sparse:\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x, y)\n return res",
"def sparse_matmul(A: SparseTensor, B: SparseTensor, out: torch.Tensor) -> torch.Tensor:\n if A.nnz() == 0 or B.nnz() == 0:\n return out\n\n if A.is_cuda:\n return _sparse_matmul_cuda(A, B, out)\n else:\n return _sparse_matmul_cpu(A, B, out)",
"def dot(x, y):\n if isinstance(x, tf.SparseTensor) and isinstance(y, tf.SparseTensor):\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x,y)\n return res",
"def matmul_any_tensor_dense_tensor(a,\n b,\n a_is_sparse = True,\n transpose_a = False):\n if a_is_sparse:\n _check_type('a', a, tf.SparseTensor)\n return tf.sparse.sparse_dense_matmul(\n b, a, adjoint_a=False, adjoint_b=not transpose_a)\n else:\n return tf.transpose(\n a=tf.matmul(a, tf.transpose(a=b), transpose_a=transpose_a))",
"def matmul_sparse_dense(A, B, name=None, transpose_a=False, transpose_b=False): # pylint: disable=invalid-name\n with tf.name_scope(name, \"matmul_sparse_dense\", [A, B]):\n if A.indices.shape.ndims != 1 or A.values.shape.ndims != 2:\n raise ValueError(\"A must represent a matrix. Found: %s.\" % A)\n if B.shape.ndims != 2:\n raise ValueError(\"B must be a matrix.\")\n new_values = tf.matmul(\n A.values, B, transpose_a=transpose_a, transpose_b=transpose_b)\n return tf.IndexedSlices(\n new_values,\n A.indices,\n dense_shape=tf.stack([A.dense_shape[0], new_values.shape[1]]))",
"def __matmul__(self, tensor):\n return self.matmul(tensor)",
"def my_matmul(activations, units):\n # the dimension to broadcast has to be first [batch, channels, time, cond]\n a = tf.transpose(activations, perm=[0, 3, 1, 2])\n # output tf.matmul -> [batch, channels, time, freqs]\n output = tf.matmul(a, units)\n # back to [batch, freqs, time, channels], original feature map input\n return tf.transpose(output, perm=[0, 3, 2, 1])",
"def calculate_matmul(mat_a, mat_b):\n assert mat_a.shape[-2] == 1 and mat_b.shape[-1] == 1\n return tf.reduce_sum(tf.squeeze(mat_a, -2) * tf.squeeze(mat_b, -1), axis=2, keepdims=True)",
"def matmul(A, B):\n # type: (Optional[Tensor], Tensor) -> Tensor\n if A is None:\n return B\n if is_sparse(A):\n return torch.sparse.mm(A, B)\n return torch.matmul(A, B)",
"def _sparse_matmul_cuda(A: SparseTensor, B: SparseTensor, out: torch.Tensor):\n from falkon.sparse.sparse_helpers import spspmm, csr2dense\n\n if not A.is_csr:\n raise ValueError(\"A must be CSR matrix\")\n if not B.is_csr:\n raise ValueError(\"B must be CSR matrix\")\n\n # 2. MatMul\n out_indexptr, out_index, out_data = spspmm(\n A.indexptr, A.index, A.data, B.indexptr, B.index, B.data, A.shape[1])\n # 3. Convert to dense\n out = csr2dense(out_indexptr, out_index, out_data, out)\n return out",
"def test_matmul_adjoint(self):\n x = tf.constant([[9, 4, 1, 3], [1, 3, 1, 6], [7, 2, 2, 8]],\n dtype=tf.float32)\n self.assertAllClose(\n tf.linalg.matmul(self.linop, x, adjoint_a=True),\n tf.linalg.matmul(self.matrix, x, adjoint_a=True))\n self.assertAllClose(\n tf.linalg.matmul(self.linop, x, adjoint_a=False, adjoint_b=True),\n tf.linalg.matmul(self.matrix, x, adjoint_a=False, adjoint_b=True))",
"def mul_dense(x, y): # pragma: no cover\n return x * y",
"def _matmul(t1: Tensor, t2: Tensor) -> Tensor:\n data = t1.data @ t2.data\n requires_grad = t1.requires_grad or t2.requires_grad\n depends_on: List[Dependency] = []\n\n if t1.requires_grad:\n def grad_fn1(grad: np.ndarray) -> np.ndarray:\n # handel broadcasting properly\n return grad @ t2.data.T\n\n depends_on.append(Dependency(t1, grad_fn1))\n\n if t2.requires_grad:\n def grad_fn2(grad: np.ndarray) -> np.ndarray:\n return t1.data.T @ grad\n\n depends_on.append(Dependency(t2, grad_fn2))\n\n return Tensor(data, requires_grad, depends_on)",
"def test_sparse_matmul(self, device, dtype, coalesced):\n\n def _indices2csr(indices, dim):\n nnz = len(indices)\n r = [0] * (dim + 1)\n last_i = 0\n for i in indices:\n if i != last_i:\n for _i in range(last_i, i + 1):\n r[_i + 1] = r[last_i + 1]\n last_i = i\n r[last_i + 1] += 1\n for _i in range(last_i, dim):\n r[_i + 1] = r[last_i + 1]\n assert r[-1] == nnz\n return r\n\n def sparse_mm(a, b, method='scipy'):\n a = a.to('cpu')\n b = b.to('cpu')\n if method == 'scipy':\n indices_1 = a._indices().numpy()\n values_1 = a._values().numpy()\n indices_2 = b._indices().numpy()\n values_2 = b._values().numpy()\n\n mat1 = scipy.sparse.coo_matrix((values_1, (indices_1[0], indices_1[1])), shape=a.shape)\n mat2 = scipy.sparse.coo_matrix((values_2, (indices_2[0], indices_2[1])), shape=b.shape)\n result = mat1.dot(mat2).tocoo()\n return torch.sparse_coo_tensor([result.row, result.col], result.data, result.shape,\n dtype=dtype, device=device)\n else:\n assert a.shape[1] == b.shape[0]\n n, p = a.shape\n p, m = b.shape\n indices_a = a._indices()\n values_a = a._values()\n indices_b = b._indices()\n values_b = b._values()\n nnz1 = len(indices_a[0])\n nnz2 = len(indices_b[0])\n\n if a.is_coalesced() and b.is_coalesced():\n r2 = _indices2csr(indices_b[0], b.shape[0])\n d = defaultdict(values_b.numpy().dtype.type)\n for n1 in range(nnz1):\n for n2 in range(r2[indices_a[1][n1]], r2[indices_a[1][n1] + 1]):\n d[indices_a[0][n1].item(), indices_b[1][n2].item()] += values_a[n1] * values_b[n2]\n\n else:\n d = defaultdict(values_b.numpy().dtype.type)\n for n1 in range(nnz1):\n for n2 in range(nnz2):\n if indices_b[0][n2] == indices_a[1][n1]:\n d[indices_a[0][n1].item(), indices_b[1][n2].item()] += values_a[n1] * values_b[n2]\n i3 = []\n j3 = []\n values = []\n for i, j in sorted(d):\n i3.append(i)\n j3.append(j)\n values.append(d[i, j])\n return torch.sparse_coo_tensor(torch.tensor([i3, j3]), torch.tensor(values), (n, m),\n dtype=dtype, device=device)\n\n def grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b):\n def test_grad_dense(a_s, b_s, g_s):\n a = a_s.to_dense().detach()\n b = b_s.to_dense().detach()\n g = g_s.to_dense().detach()\n\n a.requires_grad_(True)\n b.requires_grad_(True)\n c = a @ b\n c.backward(g)\n return a.grad.sparse_mask(a_s.coalesce()), b.grad.sparse_mask(b_s.coalesce())\n\n a, _, _ = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)\n b, _, _ = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)\n a.requires_grad_(True)\n b.requires_grad_(True)\n\n c = torch.sparse.mm(a, b)\n c2 = c.to_dense().detach()\n c2 = torch.rand_like(c2)\n g = c2.sparse_mask(c.coalesce())\n\n c.backward(g)\n\n a_grad, b_grad = test_grad_dense(a, b, g)\n self.assertEqual(a.grad, a_grad)\n self.assertEqual(b.grad, b_grad)\n\n def test_sparse_matmul(sparse_dims, nnz, shape_a, shape_b):\n a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)\n b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)\n\n # python implementation\n r1 = sparse_mm(a, b, 'scipy' if TEST_SCIPY else 'direct')\n\n self.assertEqual(r1.to_dense(), torch.mm(a.to_dense(), b.to_dense()))\n\n # cpp implementation\n r2 = torch.sparse.mm(a, b)\n self.assertEqual(r1, r2)\n\n a.requires_grad_(True)\n b.requires_grad_(True)\n\n # check autograd support on sparse matmul\n def fn(D1, D2):\n return torch.sparse.mm(D1, D2).to_dense()\n\n if a.is_cuda:\n # For cuda, `nondet_tol` is set with `1e-5`\n # This is because cuSparse sometimes returns approximate zero values like `~e-323`\n # TODO: Check this cuSparse issue.\n # This happens when you do chain multiplication `torch.sparse.mm` operations\n gradcheck(fn, (a, b), check_sparse_nnz=True, nondet_tol=1e-5)\n else:\n gradcheck(fn, (a, b), check_sparse_nnz=True)\n grad_with_custom_sparsity_pattern_test_helper(sparse_dims, nnz, shape_a, shape_b)\n\n def test_error_cases():\n def fn(sparse_dims, nnz, shape_a, shape_b):\n a, i_a, v_a = self._gen_sparse(sparse_dims, nnz, shape_a, dtype, device, coalesced)\n b, i_b, v_b = self._gen_sparse(sparse_dims, nnz, shape_b, dtype, device, coalesced)\n r2 = torch.sparse.mm(a, b)\n\n # This is not a matrix\n self.assertRaises(RuntimeError, lambda: fn(3, 4, [2, 2, 2], [2, 2, 2]))\n\n # Shapes does not\n self.assertRaisesRegex(RuntimeError,\n r\"mat1 and mat2 shapes cannot be multiplied \\(2x3 and 4x2\\)\",\n lambda: fn(2, 10, [2, 3], [4, 2]))\n\n def different_dtypes():\n a, i_a, v_a = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)\n b, i_b, v_b = self._gen_sparse(2, 10, [2, 2], dtype, device, coalesced)\n r2 = torch.sparse.mm(a.to(torch.float64), a.to(torch.float32))\n\n self.assertRaisesRegex(RuntimeError, 'mat1 dtype Double does not match mat2 dtype Float', different_dtypes)\n\n for n in range(2, 5):\n for m in range(2, 8):\n for p in range(2, 8):\n test_sparse_matmul(2, 10, [n, m], [m, p])\n\n test_sparse_matmul(2, 0, [0, 0], [0, 0])\n test_sparse_matmul(2, 0, [0, 10], [10, 0])\n test_error_cases()",
"def __matmul__(self, other):\n return F.MatMul.apply(self, other)",
"def convert_matmul(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[0]), g.get_node(op.input(\"Y\")[0])]\n a_shape = infer_shape(inputs[0])\n b_shape = infer_shape(inputs[1])\n if op.has_attr(\"trans_x\"):\n # for matmul_v2\n trans_x = op.attr(\"trans_x\")\n trans_y = op.attr(\"trans_y\")\n else:\n # for matmul\n trans_x = op.attr(\"transpose_X\")\n trans_y = op.attr(\"transpose_Y\")\n if trans_x:\n perm = list(range(len(a_shape)))\n perm[-2] = len(a_shape) - 1\n perm[-1] = len(a_shape) - 2\n inputs[0] = _op.transpose(inputs[0], axes=perm)\n if trans_y:\n perm = list(range(len(b_shape)))\n perm[-2] = len(b_shape) - 1\n perm[-1] = len(b_shape) - 2\n inputs[1] = _op.transpose(inputs[1], axes=perm)\n\n # This implemention almost keeps same with ONNX\n # Need to check input shape as batch matmul must be supported.\n a_shape = shape_of(inputs[0], dtype=\"int32\")\n a_rank = infer_shape(a_shape)[0]\n b_shape = shape_of(inputs[1], dtype=\"int32\")\n b_rank = infer_shape(b_shape)[0]\n # When performing a batch matmul, we need to properly handle N-dim shapes.\n if a_rank > 2 or b_rank > 2:\n\n def flatten_to_nd(x, x_shape, nd=3):\n ndims = infer_shape(x_shape)[0]\n if ndims == nd:\n return x\n newshape = _op.concatenate(\n [\n _expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),\n _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),\n ],\n 0,\n )\n out = _op.reshape(x, fold_constant(newshape))\n return out\n\n b_type = infer_type(inputs[1])\n # Convert to dense if the second matrix is 2d and non-dynamic\n if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):\n a = flatten_to_nd(inputs[0], a_shape, 2)\n b = _op.transpose(inputs[1])\n output = _op.nn.dense(a, b)\n else:\n # Convert a and b into 3 dimensional tensors.\n a = flatten_to_nd(inputs[0], a_shape, 3)\n b = flatten_to_nd(inputs[1], b_shape, 3)\n # Transpose matrix dimensions of b.\n b = _op.transpose(b, [0, 2, 1])\n # Perform a batch matmul.\n output = _op.nn.batch_matmul(a, b)\n # Determine the output batch dimension.\n if a_rank > b_rank:\n out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])\n elif a_rank < b_rank:\n out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])\n # If its unclear how broadcasting should be applied, the output\n # shape is determined by choosing the maximum value from each input.\n else:\n out_batch = _op.concatenate(\n [\n _op.maximum(\n _op.strided_slice(a_shape, [i], [i + 1]),\n _op.strided_slice(b_shape, [i], [i + 1]),\n )\n for i in range(a_rank - 2)\n ],\n 0,\n )\n # Reshape output to original dimensions.\n final_shape = _op.concatenate(\n [\n out_batch,\n _op.strided_slice(\n a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]\n ),\n _op.strided_slice(\n b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]\n ),\n ],\n 0,\n )\n out = _op.reshape(output, fold_constant(final_shape))\n else:\n if b_rank == 1:\n inputs[1] = _op.expand_dims(inputs[1], 1, 1)\n # Otherwise a simple dense op will get the job done.\n input_1_t = _op.transpose(inputs[1], axes=(1, 0))\n out = _op.nn.dense(inputs[0], input_1_t)\n if b_rank == 1:\n out = _op.squeeze(out, axis=[-1])\n if op.has_attr(\"alpha\"):\n alpha = op.attr(\"alpha\")\n if not np.isclose(alpha, 1.0):\n out = out * _expr.const(alpha).astype(\"float32\")\n g.add_node(op.output(\"Out\")[0], out)",
"def _matmul_broadcast(x, y, name):\n with tf.variable_scope(name) as scope:\n return tf.reduce_sum(\n tf.nn.dropout(x[..., tf.newaxis] * y[..., tf.newaxis, :, :],1), axis=-2\n )",
"def np_matmul(mat1, mat2):\n return np.matmul(mat1, mat2)",
"def dense(x, size, name, weight_init=xavier):\n w = tf.get_variable(name + \"/w\", [x.get_shape()[1], size], initializer=weight_init)\n b = tf.get_variable(name + \"/b\", [size], initializer=tf.constant_initializer(0.0))\n return tf.matmul(x, w) + b",
"def nonsquare_matrix_mult(matrix):\n\n #Setup openCL\n dev, ctx, queue = setup_CL()\n\n #openCL Kernel\n #Naive approach\n kernel_code = \"\"\"\n #define MATRIX_ROW_SIZE %(matrix_row_size)s\n #define MATRIX_COL_SIZE %(matrix_col_size)s\n\n __kernel void func(__global float* a, __global float* b, __global float* transposed) {\n\n unsigned int i = get_local_id(0);\n __local float tmp[MATRIX_ROW_SIZE*MATRIX_COL_SIZE];\n\n //Initialize tmp to 0\n //Initialize output b to 0 for this thread\n for(int k=0; k<MATRIX_COL_SIZE*MATRIX_ROW_SIZE; k++){\n tmp[k] = 0;\n }\n\n for(int k=0; k<MATRIX_ROW_SIZE; k++){\n b[k + MATRIX_ROW_SIZE*get_group_id(0)] = 0;\n }\n\n //Transpose output\n transposed[i*MATRIX_ROW_SIZE+get_group_id(0)]=a[i+get_local_size(0)*get_group_id(0)];\n\n for(int j=0; j < MATRIX_ROW_SIZE; j++){\n tmp[j+MATRIX_ROW_SIZE*i] = a[i+get_local_size(0)*get_group_id(0)]*a[i+j*MATRIX_COL_SIZE];\n }\n\n // Store to output\n for(int j=0; j < MATRIX_ROW_SIZE; j++){\n for(int k=0; k < MATRIX_COL_SIZE; k++){\n if(i==0){\n b[j + MATRIX_ROW_SIZE*get_group_id(0)] += tmp[j+MATRIX_ROW_SIZE*k];\n }\n }\n }\n barrier(CLK_LOCAL_MEM_FENCE);\n }\n \"\"\"\n\n #Move data to device\n matrix_float = matrix.astype(np.float32)\n matrix_gpu = cl.array.to_device(queue, matrix_float)\n transposeMult_gpu = cl.array.empty(queue, (matrix.shape[0], matrix.shape[0]), np.float32)\n transposed_gpu = cl.array.empty(queue, (matrix.shape[1],matrix.shape[0]), np.float32)\n\n matrix_row_size = np.int32(matrix.shape[0])\n matrix_col_size = np.int32(matrix.shape[1])\n\n #Calculate workItems, workGroup size, workGroups for input\n matrix_val_count = matrix_float.shape[0]*matrix_float.shape[1]\n xWorkItems = min(int(matrix_row_size),1024)\n yWorkItems = min(int(matrix_col_size),1024)\n totalWorkItems = float(xWorkItems*yWorkItems)\n groups = np.int(max(np.ceil(matrix_val_count / xWorkItems),1))\n\n # print(\"workItems: %s, matrix_val_count: %s, groups: %s\" % (totalWorkItems, matrix_val_count, groups))\n\n # update template with current runtime requirements\n kernel = kernel_code % {\n 'matrix_row_size': matrix_row_size,\n 'matrix_col_size': matrix_col_size\n }\n\n #Launch kernel and time it\n #Set global ID, workItems, workGroups\n prg = cl.Program(ctx, kernel).build()\n start = time.time()\n event = prg.func(queue, (yWorkItems*xWorkItems,1),(groups,1), matrix_gpu.data, transposeMult_gpu.data, transposed_gpu.data)\n\n #event.wait()\n runtime = time.time()-start\n\n #Save output\n transposedMult = transposeMult_gpu.get()\n transposed = transposed_gpu.get()\n\n # print('openCL_opt0 %d x %d transpose-mult time: %.2E' % (matrix.shape[0], matrix.shape[1], runtime))\n # print('openCL_opt0 transposed==goldenTransposed: %s' % np.allclose(transposed, np.transpose(matrix)))\n # print('openCL_opt0 mult==goldenMult: %s' % np.allclose(transposedMult, matrix.dot(np.transpose(matrix))))\n if not(np.allclose(transposedMult, matrix.dot(np.transpose(matrix)))):\n # print('Original Matrix:\\n %s' % matrix)\n print('openCL_opt0 transposed val:\\n %s' % transposed)\n print('golden transpose-mult:\\n %s' % matrix.dot(np.transpose(matrix)))\n transposedMult[(transposedMult>0) & (transposedMult<1)] = -1\n print('openCL_opt0 mult val:\\n %s' % transposedMult)\n print('openCL_opt0 transpose-mult:\\n %s' % np.isclose(transposedMult,matrix.dot(np.transpose(matrix))))\n # print('--------------------')\n\n return [transposedMult, runtime]",
"def dot(x, y):\n if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):\n x_shape = []\n for i, s in zip(x.get_shape().as_list(), tf.unstack(tf.shape(x))):\n if i is not None:\n x_shape.append(i)\n else:\n x_shape.append(s)\n x_shape = tuple(x_shape)\n y_shape = []\n for i, s in zip(y.get_shape().as_list(), tf.unstack(tf.shape(y))):\n if i is not None:\n y_shape.append(i)\n else:\n y_shape.append(s)\n y_shape = tuple(y_shape)\n y_permute_dim = list(range(ndim(y)))\n y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim\n xt = tf.reshape(x, [-1, x_shape[-1]])\n yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])\n return tf.reshape(tf.matmul(xt, yt),\n x_shape[:-1] + y_shape[:-2] + y_shape[-1:])\n if isinstance(x, tf.SparseTensor):\n out = tf.sparse_tensor_dense_matmul(x, y)\n else:\n out = tf.matmul(x, y)\n return out",
"def __matmul__(\n self,\n other: Union[TensorWrappedPhiTensorPointer, MPCTensor, int, float, np.ndarray],\n ) -> Union[TensorWrappedPhiTensorPointer, MPCTensor]:\n return TensorWrappedPhiTensorPointer._apply_op(self, other, \"__matmul__\")",
"def np_matmul(mat1, mat2):\n return mat1.dot(mat2)",
"def calculate_matmul_n_times(n_components, mat_a, mat_b):\n res = np.zeros(mat_a.shape)\n mat_a = tf.cast(mat_a, tf.double)\n mat_b = tf.cast(mat_b, tf.double)\n for i in range(n_components):\n mat_a_i = tf.squeeze(mat_a[:, i, :, :], -2)\n mat_b_i = tf.squeeze(mat_b[0, i, :, :])\n res[:, i, :, :] = tf.expand_dims(tf.matmul(mat_a_i, mat_b_i), 1)\n\n return tf.convert_to_tensor(res)",
"def dot(x, y):\r\n if K.ndim(x) is not None and (K.ndim(x) > 2 or K.ndim(y) > 2):\r\n x_shape = []\r\n for i, s in zip(x.get_shape().as_list(), tf.unstack(tf.shape(x))):\r\n if i is not None:\r\n x_shape.append(i)\r\n else:\r\n x_shape.append(s)\r\n x_shape = tuple(x_shape)\r\n y_shape = []\r\n for i, s in zip(y.get_shape().as_list(), tf.unstack(tf.shape(y))):\r\n if i is not None:\r\n y_shape.append(i)\r\n else:\r\n y_shape.append(s)\r\n y_shape = tuple(y_shape)\r\n y_permute_dim = list(range(K.ndim(y)))\r\n y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim\r\n xt = tf.reshape(x, [-1, x_shape[-1]])\r\n yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])\r\n return tf.reshape(tf.matmul(xt, yt),\r\n x_shape[:-1] + y_shape[:-2] + y_shape[-1:])\r\n if isinstance(x, tf.SparseTensor):\r\n out = tf.sparse_tensor_dense_matmul(x, y)\r\n else:\r\n out = tf.matmul(x, y)\r\n return out",
"def nonsquare_matrix_mult_opt1(matrix):\n\n #Setup openCL\n dev, ctx, queue = setup_CL()\n\n #openCL Kernel\n #Naive approach with local/private memory\n kernel_code = \"\"\"\n #define MATRIX_ROW_SIZE %(matrix_row_size)s\n #define MATRIX_COL_SIZE %(matrix_col_size)s\n\n __kernel void func(__global float* a, __global float* b, __global float* transposed) {\n\n unsigned int i = get_local_id(0);\n __local float tmp[MATRIX_ROW_SIZE*MATRIX_COL_SIZE];\n\n //Initialize tmp to 0\n //Initialize output b to 0 for this thread\n for(int k=0; k<MATRIX_COL_SIZE*MATRIX_ROW_SIZE; k++){\n tmp[k] = 0;\n }\n\n for(int k=0; k<MATRIX_ROW_SIZE; k++){\n b[k + MATRIX_ROW_SIZE*get_group_id(0)] = 0;\n }\n\n float localMatrix[MATRIX_ROW_SIZE*MATRIX_COL_SIZE];\n //Copy matrix to local\n for(int j=0; j < MATRIX_COL_SIZE; j++){\n localMatrix[i+get_local_size(0)*get_group_id(0)]=a[i+get_local_size(0)*get_group_id(0)];\n }\n\n\n //Transpose output\n transposed[i*MATRIX_ROW_SIZE+get_group_id(0)]=localMatrix[i+get_local_size(0)*get_group_id(0)];\n\n for(int j=0; j < MATRIX_ROW_SIZE; j++){\n tmp[j+MATRIX_ROW_SIZE*i] = localMatrix[i+get_local_size(0)*get_group_id(0)]*a[i+j*MATRIX_COL_SIZE];\n }\n\n // Store to output\n for(int j=0; j < MATRIX_ROW_SIZE; j++){\n for(int k=0; k < MATRIX_COL_SIZE; k++){\n if(i==0){\n b[j + MATRIX_ROW_SIZE*get_group_id(0)] += tmp[j+MATRIX_ROW_SIZE*k];\n }\n }\n }\n barrier(CLK_LOCAL_MEM_FENCE);\n }\n \"\"\"\n\n #Move data to device\n matrix_float = matrix.astype(np.float32)\n matrix_gpu = cl.array.to_device(queue, matrix_float)\n transposeMult_gpu = cl.array.empty(queue, (matrix.shape[0], matrix.shape[0]), np.float32)\n transposed_gpu = cl.array.empty(queue, (matrix.shape[1],matrix.shape[0]), np.float32)\n\n matrix_row_size = np.int32(matrix.shape[0])\n matrix_col_size = np.int32(matrix.shape[1])\n\n #Calculate workItems, workGroup size, workGroups for input\n matrix_val_count = matrix_float.shape[0]*matrix_float.shape[1]\n xWorkItems = min(int(matrix_row_size),1024)\n yWorkItems = min(int(matrix_col_size),1024)\n totalWorkItems = float(xWorkItems*yWorkItems)\n groups = np.int(max(np.ceil(matrix_val_count / xWorkItems),1))\n\n # print(\"workItems: %s, matrix_val_count: %s, groups: %s\" % (totalWorkItems, matrix_val_count, groups))\n\n # update template with current runtime requirements\n kernel = kernel_code % {\n 'matrix_row_size': matrix_row_size,\n 'matrix_col_size': matrix_col_size\n }\n\n #Launch kernel and time it\n #Set global ID, workItems, workGroups\n prg = cl.Program(ctx, kernel).build()\n start = time.time()\n event = prg.func(queue, (xWorkItems*yWorkItems,1),(groups,1), matrix_gpu.data, transposeMult_gpu.data, transposed_gpu.data)\n\n #event.wait()\n runtime = time.time()-start\n\n #Save output\n transposedMult = transposeMult_gpu.get()\n transposed = transposed_gpu.get()\n\n # print('openCL_opt1 %d x %d transpose-mult time: %.2E' % (matrix.shape[0], matrix.shape[1], runtime))\n # print('openCL_opt1_transposed==goldenTransposed: %s' % np.allclose(transposed, np.transpose(matrix)))\n # print('openCL_opt1_mult==goldenMult: %s' % np.allclose(transposedMult, matrix.dot(np.transpose(matrix))))\n if not(np.allclose(transposedMult, matrix.dot(np.transpose(matrix)))):\n # print('Original Matrix:\\n %s' % matrix)\n print('openCL_opt1 transposed val:\\n %s' % transposed)\n print('golden transpose-mult:\\n %s' % matrix.dot(np.transpose(matrix)))\n transposedMult[(transposedMult>0) & (transposedMult<1)] = -1\n print('openCL_opt1 mult val:\\n %s' % transposedMult)\n print('openCL_opt1 transpose-mult:\\n %s' % np.isclose(transposedMult,matrix.dot(np.transpose(matrix))))\n # print('--------------------')\n\n return [transposedMult, runtime]",
"def call(self, reshaped_input):\n \"\"\"\n In Keras, there are two way to do matrix multiplication (dot product)\n 1) K.dot : AxB -> when A has batchsize and B doesn't, use K.dot\n 2) tf.matmul: AxB -> when A and B both have batchsize, use tf.matmul\n \n Error example: Use tf.matmul when A has batchsize (3 dim) and B doesn't (2 dim)\n ValueError: Shape must be rank 2 but is rank 3 for 'net_vlad_1/MatMul' (op: 'MatMul') with input shapes: [?,21,64], [64,3]\n \n tf.matmul might still work when the dim of A is (?,64), but this is too confusing.\n Just follow the above rules.\n \"\"\"\n \n ''' Computation of N_v in Equation 3 of the paper '''\n activation = K.dot(reshaped_input, self.cluster_weights)\n \n activation += self.cluster_biases\n \n activation = tf.nn.softmax(activation)\n\n activation = tf.reshape(activation,\n [-1, self.max_samples, self.cluster_size])\n\n activation = tf.transpose(activation,perm=[0,2,1])\n \n reshaped_input = tf.reshape(reshaped_input,[-1,\n self.max_samples, self.feature_size])\n\n vlad = tf.matmul(activation,reshaped_input)\n vlad = tf.transpose(vlad,perm=[0,2,1])\n vlad = tf.nn.l2_normalize(vlad,1)\n vlad = tf.reshape(vlad,[-1, self.cluster_size*self.feature_size])\n Nv = tf.nn.l2_normalize(vlad,1)\n \n # Equation 3 in the paper\n # \\hat{y} = W_N N_v\n vlad = K.dot(Nv, self.Wn)\n\n return vlad",
"def _multi_matmul(arrays, order, i, j, constant=False) -> Tensor:\n if i == j:\n return arrays[i]\n else:\n return matmul(\n _multi_matmul(arrays, order, i, order[i, j], constant),\n _multi_matmul(arrays, order, order[i, j] + 1, j, constant),\n constant,\n )",
"def lazy_matrix_mul(m_a, m_b):\n return (np.matmul(m_a, m_b))",
"def lazy_matrix_mul(m_a, m_b):\n return np.matmul(np.array(m_a), np.array(m_b))"
] | [
"0.7717564",
"0.739388",
"0.7382497",
"0.71858466",
"0.71548396",
"0.7092191",
"0.7060006",
"0.69942117",
"0.69575405",
"0.6784286",
"0.6765532",
"0.67141557",
"0.66577524",
"0.6653065",
"0.6623352",
"0.6549582",
"0.65280193",
"0.6438002",
"0.6416873",
"0.64110273",
"0.64069945",
"0.6358362",
"0.6355897",
"0.6352835",
"0.6340505",
"0.6310087",
"0.6246013",
"0.62411267",
"0.6212169",
"0.6208466"
] | 0.77353585 | 0 |
Inserts rows into specified table. | def insert(self, table_name, rows, bulk=True):
table = self._create_table(table_name)
return self._perform_query(table.insert(), rows, bulk) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bq_insert_rows(bq_client, table, rows):\n _batch_insert(bq_client, table, rows)",
"def insert_row(self, tablename, fields):\n insert_params = \"(\" + \",\".join(['?' for x in fields]) + \")\"\n self.cursor.execute(\"insert into \" + tablename + \" values \" +\n insert_params, fields)",
"def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()",
"def insert_into_tables(cur, conn):\n for query in insert_table_queries:\n cur.execute(query)\n conn.commit()",
"def insertall(self, rows):\n method = 'tabledata().insertAll().execute()'\n body = {}\n body['rows'] = [{'json': row} for row in rows]\n body[\"kind\"] = \"bigquery#tableDataInsertAllRequest\"\n return self._make_request(method, body)",
"def insert(conn, table_info, table_data):\n\n sql = ''' INSERT INTO ''' + table_info \n + ''' VALUES(''' + \"?,\" * (len(table_data)-1) + \"?)\"\n cursor = conn.cursor()\n cursor.execute(sql, table_data)\n conn.commit()",
"def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()",
"def insert_records(self, insert_query, insert_query_columns, wiki_data, table_name):\n print(\"Inserting {} rows into {}\".format(len(wiki_data), table_name))\n for index, item in enumerate(wiki_data):\n values_to_insert = [item[column]['value'] for column in insert_query_columns]\n try:\n self.cur.execute(insert_query, values_to_insert)\n except ValueError as ve:\n print(\"Could not execute query : {} with values\".format(insert_query, values_to_insert))\n raise ve\n\n if index % 1000 == 0:\n print(\"Inserted {} rows\".format(index))\n print(\"Inserted {} rows\".format(len(wiki_data)))\n print(\"Finished inserting {}\".format(table_name))",
"def insert(self, table, *args, **kwargs):\n\n values = None\n query = \"INSERT INTO %s \" % table\n if kwargs:\n keys = kwargs.keys()\n values = kwargs.values()\n query += \"(\" + \",\".join([\"`%s`\"]*len(keys)) % tuple(keys) + \\\n \") VALUES(\" + \",\".join([\"%s\"]*len(values)) + \")\"\n elif args:\n values = args\n query += \" VALUES(\" + \",\".join([\"%s\"]*len(values)) + \")\"\n\n self.__open()\n self.__cursor.execute(query, values)\n self.__connection.commit()\n self.__close()\n return self.__cursor.lastrowid",
"def test_table_insert_rows(data):\n dataset_id = 'eu_cbec_bi_data'\n table_id = 'marketplaces'\n dataset = bigquery.Dataset(bigquery_client.dataset(dataset_id))\n \n table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)\n\n # [START table_insert_rows]\n rows_to_insert = [data]\n\n errors = bigquery_client.insert_rows(table, rows_to_insert) # API request\n\n assert errors == []\n\n # [END table_insert_rows]",
"def _batch_insert(bq_client, table, rows):\n total_rows = len(rows)\n inserted_rows = 0\n batch = 1\n logger.info(\"Inserting %d rows into table %s\", total_rows,\n table.full_table_id)\n while inserted_rows < total_rows:\n start = (batch - 1) * MAX_BQ_INSERT_SIZE\n end = batch * MAX_BQ_INSERT_SIZE\n batch_rows = rows[start:end]\n inserted_rows += len(batch_rows)\n errors = _insert_rows(bq_client, table, batch_rows)\n if errors:\n print_bq_insert_errors(batch_rows, errors)\n logger.error(\n \"The program has been terminated due to BigQuery insertion \"\n \"errors.\")\n exit(1)\n else:\n logger.info(\"Batch %d: inserted rows %d to %d\", batch, start + 1,\n min(end, len(rows)))\n batch += 1\n logger.info(\"All rows inserted.\")",
"def insert_row(self, table: str, row_data: dict):\r\n\r\n columns = \"\".join([f\"'{i}',\" for i in row_data]).rstrip(\",\")\r\n keys = \"\".join([f\"'{row_data[i]}',\" for i in row_data]).rstrip(\",\")\r\n sql_statement = f\"INSERT INTO {table} ({columns}) VALUES({keys});\"\r\n try:\r\n self.__cursor(sql_statement)\r\n self.__db_conn.commit()\r\n except sqlite3.Error as error:\r\n print(\"[!] Couldn't add record\")\r\n print(\"[!]\", str(error).capitalize())\r\n return\r\n print(\"[*] Record added successfully.\")",
"def insertmany(self, table, values):\n col_name = self.table_cols[table][1:]\n sql = 'INSERT INTO %s(%s) VALUES (%s)' % (table, ','.join(col_name), ','.join(['%s'] * len(values[0])))\n Log.debug('DB -> %s' % sql)\n self.execute(sql, values)",
"def insert_data(self, table_name, data):\n for data_point in data:\n query = \"INSERT INTO %s(%s) VALUES (%s)\"\n\n fields = \", \".join(data_point.keys())\n values = \", \".join([self.pack_data(value) for value in data_point.values()])\n self.cursor.execute(query % (table_name, fields, values))\n self.db_connection.commit()",
"def insert_bulk(self, frame, table_name, conn=None, index=False, index_label=None, \n schema=None, chunksize=None, copy=True, auto_adjust=True):\n\n table = SQLTable(table_name, self, frame=frame, table_setup=False, index=index,\n if_exists='append', index_label=index_label, schema=schema)\n \n table.insert(conn=conn, bulk=True, chunksize=chunksize, copy=copy, \n auto_adjust=auto_adjust)",
"def insert_many(self, frame, table_name, conn=None, index=False, index_label=None, \n schema=None, chunksize=None, copy=True, auto_adjust=True):\n \n table = SQLTable(table_name, self, frame=frame, table_setup=False, index=index,\n if_exists='append', index_label=index_label, schema=schema)\n \n table.insert(conn=conn, bulk=False, chunksize=chunksize, copy=copy, \n auto_adjust=auto_adjust)",
"def _insert_table_row(self, db: str, table: str, row: Dict[str, Any]):\n pass",
"def insert(db, table, name, row):\n\n # Build insert prepared statement\n columns = [name for name, _ in table.items()]\n insert = INSERT_ROW.format(table=name, columns=\", \".join(columns), values=(\"?, \" * len(columns))[:-2])\n\n try:\n db.execute(insert, values(table, row, columns))\n except Exception as ex:\n print(\"Error inserting row: {}\".format(row), ex)",
"def insert_into_table(self, conn, insert_into_table_sql):\n try:\n c = conn.cursor()\n c.execute(insert_into_table_sql)\n conn.commit()\n\n except Error as e:\n print(e)",
"def insert_data(stored_data, table_name):\r\n for item in stored_data:\r\n cursor.execute('''INSERT INTO {} VALUES(?, ?, ?, ?)'''.format(table_name), item)",
"def insert_to_table(self, tableName, dataRow, colNames):\n queryString = f\"INSERT INTO {tableName} VALUES \"\n # building the Values list\n valueString = \"\"\n # looping \n for value in dataRow:",
"def insert(self, table, value):\n col_name = self.table_cols[table][1:]\n sql = \"INSERT INTO %s(%s) VALUES (%s)\" % (table, str(','.join(col_name)), array_join(value, ','))\n Log.debug('DB -> %s' % sql)\n self.execute(sql)",
"def insertRows(self, p_int, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass",
"def insert(self, data, table, **kwargs):\n logging.info(f'Inserting into `{table}`')\n\n try:\n data.to_sql(table, self.engine, **kwargs)\n try:\n self.execute(f'ALTER TABLE `{table}` ADD PRIMARY KEY (`id`);')\n except:\n pass\n return True\n except:\n logging.exception('Something went wrong inserting. Check trace.')\n return False",
"def add_rows(self):\n for row in self.rows:\n self.table.add_row(row)",
"def insert_rows(cursor, table_name, row_dict_list):\n\tkeys = row_dict_list[0].keys()\n\tcolumns = \", \".join(keys)\n\tvalues_template = \", \".join([\"?\"] * len(keys))\n\n\tsql = \"INSERT INTO %s (%s) VALUES (%s)\" % \\\n\t\t(table_name, columns, values_template)\n\ttuples = [tuple(row_dict[key] for key in keys) for row_dict in row_dict_list]\n\tcursor.executemany(sql, tuples)",
"def insert_data(db, metadata, data):\n with Tx(db) as c:\n lock_tables(c)\n metadata['set_id'] = _insert_metadata(c, metadata)\n\n data_iterator = iter(data)\n first_row = next(data_iterator)\n headers = list(first_row.keys())\n for table in _tables_from_headers(headers):\n _insert_data_rows(c, table, metadata, chain([first_row], data_iterator))",
"def insert(self, table_name, fields):\n LOGGER.debug(\"%r: Inserting %r\" % (table_name, fields))\n return self.db.table(table_name).insert(fields)",
"def insert_tables(cur, conn):\n for query in insert_table_queries:\n cur.execute(query)\n conn.commit()",
"def insert_tables(cur, conn):\n for query in insert_table_queries:\n cur.execute(query)\n conn.commit()"
] | [
"0.76099175",
"0.7304086",
"0.72266775",
"0.70127386",
"0.70006967",
"0.69257027",
"0.6918206",
"0.6911293",
"0.6864757",
"0.68546313",
"0.6818461",
"0.6810073",
"0.678777",
"0.6763292",
"0.6754389",
"0.6733423",
"0.6727673",
"0.66795295",
"0.6675443",
"0.66739845",
"0.66597855",
"0.66545194",
"0.6650116",
"0.6635743",
"0.6609471",
"0.65986675",
"0.6583949",
"0.6568122",
"0.6549091",
"0.6549091"
] | 0.7594534 | 1 |
Update a row of the specified database table based on the ID. | def update(self, table_name, data, id_column_name='id'):
table = self._create_table(table_name)
for row in data:
try:
statement = table.update() \
.where(table.c[id_column_name] == row[id_column_name]) \
.values(**row)
self.cursor.execute(statement)
except Exception as e:
print (e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(table, id_):\n\n # your code\n\n return table",
"def sqlite3_update_record(data_base, table, param_column, param_value, id_column, record_id):\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n try:\n query = 'UPDATE ' + table + ' SET ' + param_column + ' = \"' + param_value + '\" WHERE ' + id_column + \\\n \" = '\" + record_id + \"'\"\n cur.execute(query)\n except sqlite3.OperationalError:\n pass\n con.commit()\n cur.close()\n con.close()",
"def update_row(self, row_id, update_data):\n #Check to make sure all the column names given by user match the column names in the table.\n data = self.__scrub_data(update_data)\n path = self.__data_file_for_row_id(row_id)\n if data:\n #Create a temp data file with the updated row data.\n if self.__modify_data_file(path, {row_id: data}, 'update'):\n print('Row ' + str(row_id) + ' has been updated.') \n else:\n raise Exception('There was a problem updating row at ' + str(row_id) +'.')\n else:\n raise Exception('Sorry, the data you tried to insert is invalid.')",
"def update(self, table, id, **kwargs):\n pairs = [column + ' = %s' for column in kwargs.keys()]\n values = kwargs.values()\n if 'changed_by' in self.get_columns(table):\n pairs += ['changed_by = %s', 'date_changed = now()']\n values += [self.user_id]\n self.db.execute(\n 'update %s set %s where %s = %%s' %\n (table, ', '.join(pairs), table + '_id'), *(values + [id]))\n return id",
"def update(cls, row_id, **kwargs):\n cls.delete(row_id)\n # obj = cls.query.filter_by(id=row_id).first()\n # for k, v in kwargs.items():\n # obj[k] = v\n # obj = cls.query.filter_by(id=row_id).update(kwargs)\n kwargs[\"id\"] = row_id\n obj = cls(**kwargs)\n #print(\"the type of updated object is\", type(obj))\n return commit(obj)",
"def update(table, id_):\n id_storage = common.get_values_from_column(table, 0)\n if id_ in id_storage:\n table = manage_data_from_user(table, id_storage, id_, True)\n # Here u can make changes:\n\n else:\n ui.print_error_message('This option does not exist.')\n\n return table",
"def update_row(self, pk, row_dict):\n return self.execute(self.commands.update_row(\n self.name,\n col_val=self._join_equality(row_dict),\n pk_col=self.primary_key_column,\n pk=pk\n ))",
"def on_put(self, req, resp, table, id):\n user = req.context['user']\n pairs = req.context['doc']['values']\n keys = pairs.keys()\n set_clause = [\"`{}`=:{}\".format(k, k) for k in keys]\n set_clause = ','.join(set_clause)\n engine = user_db_engine(user)\n query = \"UPDATE {} SET {} WHERE id=:id\".format(table, set_clause)\n try:\n pairs['id'] = int(id)\n except ValueError:\n raise exceptions.HTTPBadRequestError(\"Invalid ID\")\n\n with engine.new_session() as conn:\n result = conn.execute(query, pairs)\n\n if config.use_cache():\n key = _make_key(engine, table, \"*\", id, -1)\n cache.invalidate_query_pattern(\"{}\".format(key))\n resp.context['result'] = {'result': 'ok'}\n resp.status = falcon.HTTP_200",
"def update(table, id_):\n # 4\n for row in table:\n if row[0] == id_:\n addnew = ui.get_inputs(\n ['month: ',\n 'day: ',\n 'year: ',\n 'type (in=income, out= outflow): ',\n 'amount (of transaction in USD): '],\n 'Updating item in Accounting table')\n addnew.insert(0, id_)\n row = addnew\n data_manager.write_table_to_file('accounting/items.csv', table)\n\n return table",
"def replace(self, table, _id, indata, fail_on_empty=True):\n try:\n with self.lock:\n for i, _ in self._find(table, self._format_filter({\"_id\": _id})):\n break\n else:\n if fail_on_empty:\n raise DbException(\"Not found entry with _id='{}'\".format(_id), HTTPStatus.NOT_FOUND)\n return None\n self.db[table][i] = deepcopy(indata)\n return {\"updated\": 1}\n except DbException:\n raise\n except Exception as e: # TODO refine\n raise DbException(str(e))",
"def update(table, id_):\n\n # your code\n key = common.check_for_key(id_,table)\n if key == None:\n ui.print_error_message('Key does not exist')\n else:\n return_inputs = ui.get_inputs(['Name', 'Age'], 'Enter New Values')\n modif_index = key\n\n table[modif_index][NAME] = return_inputs[FIRST_PROP]\n table[modif_index][AGE] = return_inputs[SECOND_PROP]\n data_manager.write_table_to_file('hr/persons.csv', table) \n\n return table",
"def update(table, id_):\n\n # your code\n\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n inventory_data = [\"Product: \", \"Manufacturer: \", \"Release date: \", \"Durability: \"]\n inputs = ui.get_inputs(inventory_data, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table",
"def update(self, table, element):\n\n if \"id\" not in element.keys():\n return False\n fields = []\n conditions = []\n values = []\n for key in element.keys():\n if key != \"id\":\n fields.append(key)\n values.append(element[key])\n conditions.append(\"id =\")\n values.append(element[\"id\"])\n result = self.__update(table, fields, conditions, values)\n\n return result",
"def update(self):\n sql = 'UPDATE {} SET {} where {}=%s'.format(\n self.TABLE_NAME,\n ', '.join(map(lambda f: '{}=%s'.format(f), self._dict)),\n self.PRIMARY_KEY\n )\n args = list(map(self._get_value_or_default, self._dict))\n args.append(self._get_value_or_default(self.PRIMARY_KEY))\n cursor = yield self._pool.execute(sql, args)\n count = cursor.rowcount\n result = True if count == 1 else False\n return result",
"def update(table, id_):\n\n record = common.find_id(table, id_[0])\n option, amount_data, data_info = data_to_change()\n\n if option in range(1, amount_data):\n new_data = ui.get_inputs([\"Type \" + data_info], 'Please write new data')\n is_date_number = new_data[0].isdigit() and len(new_data[0]) == 4\n is_durability_number = new_data[0].isdigit()\n\n if option == 1 or option == 2:\n common.insert_new_data(record, new_data[0], option)\n\n elif option == 3 and is_date_number is True:\n common.insert_new_data(record, new_data[0], option)\n\n elif option == 4 and is_durability_number is True:\n common.insert_new_data(record, new_data[0], option)\n\n else:\n ui.print_error_message(\"Wrong format! Record update failed!\")\n\n return table",
"def update(table, id_):\n return common.update_line(table, id_, [(\"Title: \", None),\n (\"Price: \", common.validate_int),\n (\"Month of sale: \", common.validate_month),\n (\"Day of sale: \", common.validate_day),\n (\"Year of sale: \", common.validate_byear),\n (\"Customer ID: \", common.validate_id_possible)])",
"def update(table,record_id='',message='',next='',\n readonly_fields='',hidden_fields='',default_fields=''):\n\n PluginWikiWidgets._set_field_attributes(table, readonly_fields,hidden_fields,default_fields)\n if not record_id: record_id=request.args(-1)\n if not record_id.isdigit(): record_id=None\n return crud.update(db[table],record_id,message=message,next=next)",
"def update(self, table_name: str, id: int, values: dict, not_many=False):\n sql = 'UPDATE ' + table_name + ' SET '\n for key, value in values.items():\n sql += key + ' = '\n if type(value) is str or type(value) is bytes:\n value = value.replace(\"'\", \"''\")\n sql += \"'\" + value + \"'\"\n elif value is None:\n sql += 'null'\n else:\n sql += str(value)\n if not key == list(values.keys())[-1]:\n sql += ', '\n sql += ' WHERE ID = ' + str(id)\n sql += ';'\n if self.__sql_many_state and not_many is False:\n self.__sql_buffer += sql\n else:\n self.cursor.execute(sql)\n self.connection.commit()",
"def update(table, id_):\n\n new_data = ui.get_inputs(\n [\"TITLE\", \"PRICE\", \"MONTH\", \"DAY\", \"YEAR\"],\n \"Please enter the new data to update\"\n )\n\n if common.confirm_option():\n\n ID = 0\n\n for game in table:\n if game[ID] == id_:\n for game_data_index in range(len(new_data)):\n game[game_data_index + 1] = new_data[game_data_index]\n\n return table",
"def update(table, id_):\n\n for i in table:\n if i[0] == id_:\n i[1] = ui.get_inputs([\"What should i update the titel to: \"],\"\")\n i[2] = ui.get_inputs([\"What should I update the manufacturer to? \"],\"\")\n i[3] = ui.get_inputs([\"What should I update the year of purchase to? \"],\"\")\n i[4] = ui.get_inputs([\"What should I update the durability time in year/s? \"],\"\")\n data_manager.write_table_to_file(\"inventory/inventory.csv\", table)\n\n return table",
"def update(table, id_):\n ID = 0\n ids = [item[ID] for item in table]\n if id_ not in ids:\n raise ValueError(\"The given ID not in the table.\")\n titles_sales = [\"Name: \", \"Birth Year: \"]\n inputs = ui.get_inputs(titles_sales, \"Specify new properties\")\n for index, item in enumerate(table):\n if id_ == item[ID]:\n table[index] = inputs\n table[index].insert(0, id_)\n return table",
"def update(self):\n if not self.id:\n raise DataValidationError(\"Update called with empty ID field\")\n db.session.commit()\n db.session.refresh(self)",
"def _update(self):\n with sqlite3.connect(self.dbpath) as connection:\n cursor = connection.cursor()\n UPDATESQL = \"\"\"UPDATE accounts\n SET first_name=:first_name, last_name=:last_name, \n username=:username, email_address=:email_address, \n password_hash=:password_hash, balance=:balance, \n account_number=:account_number, admin=:admin\n WHERE id=:id;\"\"\"\n values = {\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"username\": self.username,\n \"email_address\": self.email_address,\n \"password_hash\": self.password_hash, \n \"balance\": self.balance, \n \"account_number\": self.account_number,\n \"admin\": self.admin,\n \"id\": self.id\n }\n try:\n cursor.execute(UPDATESQL, values)\n except sqlite3.IntegrityError:\n raise ValueError(\"ID (id) does not set in datebase.\")",
"def update(table_name, record_id=None, filters=None, updates=None):\n if not filters:\n filters = {}\n if not updates:\n updates = {}\n\n with get_connection() as conn:\n if record_id:\n return rethink.table(table_name).get(record_id)\\\n .update(updates).run(conn)\n else:\n return rethink.table(table_name).filter(filters)\\\n .update(updates).run(conn)",
"def update(self, table, primaryKeyName, primaryKeyValue, options):\r\n keys = \"\"\r\n if not isinstance(options, dict):\r\n raise ValueError, \"Expected 'options' argument to be a dictionary, instead received: %s\" % type(options).__name__\r\n if options:\r\n for key, value in options.iteritems():\r\n if isinstance(key, str):\r\n key = key.replace(\"'\", \"''\")\r\n\r\n if isinstance(value, str):\r\n value = value.replace(\"'\", \"''\")\r\n keys += \"%s='%s',\" % (key, value)\r\n keys = keys[:-1]\r\n query = \"UPDATE \" + str(table) + \" SET \" + keys + \" WHERE \" + str(primaryKeyName) + \"='\" + str(primaryKeyValue) + \"'\"\r\n self.execute(query)",
"def update(self, tablename, values, condition):\n with self.engine.connect() as conn:\n try:\n update_stmt = update(tablename).values(values)\n if condition:\n update_stmt = update_stmt.where(condition)\n result = conn.execute(update_stmt)\n except SQLAlchemyError as e:\n print(\"DB update error: {}\".format(e))",
"def update(table, id_):\n os.system('clear')\n table_dict = common.creat_dict_from_table(table)\n\n if id_ in list(table_dict.keys()):\n list_labels = [\"Month: \", \"Day: \", \"Year: \", \"Type: \", \"Amount: \"]\n title = \"Please provide product information\"\n updated_record = ui.get_inputs(list_labels, title)\n updated_record.insert(0, table_dict[id_][0])\n table_dict[id_] = updated_record\n table = list(table_dict.values())\n data_manager.write_table_to_file(\"store/games.csv\", table)\n else:\n ui.print_error_message(\"There is no such element.\")\n return table",
"def update(self, table_name, fields, keys=None, any=False, eids=None):\n table = self.db.table(table_name)\n if eids is not None:\n LOGGER.debug(\"%r: update(%r, eids=%r)\" % (table_name, fields, eids))\n if isinstance(eids, list):\n return table.update(fields, eids=eids)\n else:\n return table.update(fields, eids=[eids])\n else:\n LOGGER.debug(\"%r: update(%r, keys=%r)\" % (table_name, fields, keys))\n return table.update(fields, self._getQuery(keys, any))",
"def update(self, id, id_col='name'):\n instance = self.get_one_instance(id_col, id)\n\n if type(instance) != self.Component:\n set_session_var('errors', str(instance))\n return None\n\n errors, data = self.format_and_control(request.form, obj=instance)\n\n if len(errors) > 0:\n set_session_var('errors', dict(errors))\n return None\n\n data = get_only_updated_values(instance, data)\n\n if len(data) == 0:\n return None\n\n res = update_in_db(instance, data)\n\n if res != 'updated':\n set_session_var('errors', str(res))\n return None\n else:\n set_session_var('success', res)\n\n if self.module_fn is not None:\n self.module_fn(instance, data)\n\n return instance",
"def do_update_data(self, *args):\n print(\"Provide data to update :\")\n id_field = dict()\n id_field['id'] = input(\"Provide id to update :\")\n values = {**id_field, **self.__class__.populate_data()}\n self.connection_obj.update_into_table(**values)\n print(\"Data Update Successful\")"
] | [
"0.7131184",
"0.6938645",
"0.6827818",
"0.68115515",
"0.67745304",
"0.66986614",
"0.6474268",
"0.6451171",
"0.63700366",
"0.6360486",
"0.63131547",
"0.62055993",
"0.61969274",
"0.61965156",
"0.6179578",
"0.6170901",
"0.6168173",
"0.6132337",
"0.6125871",
"0.61153567",
"0.6096839",
"0.6093501",
"0.60723966",
"0.60519725",
"0.6046306",
"0.60337675",
"0.60157764",
"0.6010953",
"0.598857",
"0.5985077"
] | 0.70219463 | 1 |
Complete name = "parent_name / name" | def get_complete_name(self):
if self.parent_id:
name = '%s / %s'%(self.parent_id.get_complete_name(), self.name)
else:
name = self.name
return name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _complete_name(self):\n for record in self:\n if record.parent_id:\n record.complete_name = record.parent_id.complete_name + ' / ' + record.name\n else:\n record.complete_name = record.name",
"def longname(self):\n if not self.parent:\n return self.name\n return '%s.%s' % (self.parent.longname, self.name)",
"def fullname(self):\n name = self.name or \"\"\n if self._parent and self._parent.fullname:\n name = path_splitter.join([self._parent.fullname, name])\n return name.replace(\":\", \"_\") # replace for graphviz escaping",
"def _complete_name(self, cr, uid, ids, name, args, context=None):\n res = {}\n#####added \n context=context or {}\n \n for m in self.browse(cr, uid, ids, context=context):\n if context.get('no_complete_name'):\n res[m.id] = m.name\n return res\n names = [m.name]\n parent = m.location_id\n while parent:\n names.append(parent.name)\n parent = parent.location_id\n res[m.id] = ' / '.join(reversed(names))\n return res",
"def parentname(self):\n return _coordsys.coordsys_parentname(self)",
"def get_id(self):\n p = self.get_parent()\n if p:\n parent_id = p.get_id()\n if parent_id:\n return '.'.join((parent_id,self.name))\n else:\n return self.name\n else:\n return self.name",
"def get_absname(self):\n if self.animal == None: # no parent animal\n return self.name\n else:\n return '.'.join((self.animal.name, self.name))",
"def test_get_parent_type_name(self):\n pass",
"def name(self):\n return 'n' + self._name\n # if self.children:\n # return 'fossil_' + self._name\n # else:\n # return 'society_' + self._name",
"def __str__(self):\n return str(self.__parent)",
"def full_name(self) -> str:\n # return self.separator.join(map(lambda x: x.name, self.path()))\n return self.separator.join(map(lambda x: x.tagged_name, self.path()))",
"def __str__(self):\n\t\treturn str(self.__parent)",
"def generate_name_suggestion(self):\n if not self._container.group.parent_node:\n return generate_unique_name(self.text().lstrip(\"NX\"), [])\n return generate_unique_name(\n self.text().lstrip(\"NX\"),\n [\n g\n for g in self._container.group.parent_node.children\n if isinstance(g, Group)\n ],\n )",
"def render(self, parent_name: str = \"\") -> str:\n if parent_name in self.replace_with_dict:\n return Type.DictStrAny.render()\n\n if self.is_stringified():\n return f'\"{self.name}\"'\n\n if parent_name and parent_name == self.name:\n return f'\"{self.name}\"'\n\n return self.name",
"def __path(self):\n if self.parent:\n return self.parent.__path() + os.sep + self.__sanitize(self.name)\n return self.__sanitize(self.name)",
"def hierarchy_name(self, adjust_for_printing=True):\n if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)\n else: adjust = lambda x: x\n if self.has_parent():\n return self._parent_.hierarchy_name() + \".\" + adjust(self.name)\n return adjust(self.name)",
"def get_name():",
"def rel_name(self, other: NamedNode) -> tp.Optional[str]:\n\n # The name relative to the \"void\" in the full name\n if other is None:\n return self.full_name\n\n path = list(takewhile(lambda x: other is not x, self.iter_path_reverse()))\n\n # This means that other is not an ancestor of self\n if not path or path[-1].parent is not other:\n return None\n\n # return self.separator.join(reversed(list(map(lambda x: x.name, path))))\n return self.separator.join(reversed(list(map(lambda x: x.tagged_name, path))))",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def GetParentNameForGetHistory(organization,\n project,\n attribute='root cloud asset'):\n VerifyParentForGetHistory(organization, project, attribute)\n if organization:\n return 'organizations/{0}'.format(organization)\n return 'projects/{0}'.format(project)",
"def visit_name(self, node, children):\n name = ''.join(children)\n return name",
"def _build_fullname(tree: dict) -> None:\n def _apply(item: dict) -> None:\n components = item.pop(\"components\")\n try:\n idx = components[::-1].index(None)\n except ValueError:\n pass\n else:\n components = components[len(components) - idx:]\n if components:\n item[\"fullname\"] = \".\".join(components)\n else:\n item[\"fullname\"] = None\n apply_tree(tree, _apply)",
"def get_name() -> str:",
"def get_name(self):\n return self.children[0]",
"def find_parent_loop_name(node_name, while_loop_name_set):\n ploop_name = \"\"\n name_prefix = node_name.rsplit(\"/\", 1)[0]\n if name_prefix.startswith(\"^\"):\n name_prefix = name_prefix[1:]\n for lname in while_loop_name_set:\n if name_prefix.startswith(lname) and len(ploop_name) < len(lname):\n ploop_name = lname\n\n if len(ploop_name) == 0:\n ploop_name = name_prefix\n\n return ploop_name",
"def get_name(self):"
] | [
"0.73629916",
"0.73120004",
"0.7069055",
"0.6927215",
"0.6807252",
"0.66381985",
"0.6618085",
"0.644751",
"0.6444999",
"0.6424132",
"0.6367957",
"0.63577133",
"0.6339139",
"0.6298488",
"0.62682086",
"0.62632906",
"0.62410146",
"0.62161547",
"0.6157869",
"0.6157869",
"0.6157869",
"0.6157869",
"0.6157869",
"0.6137817",
"0.61322874",
"0.6130363",
"0.6075083",
"0.60673505",
"0.6039524",
"0.5997704"
] | 0.76520973 | 0 |
Fonction name_search de la gamme | def name_search(self, name, args=None, operator='ilike', limit=1000):
args = self.compute_domain_args(args)
recs = self.search([('name', operator, name)] + args, limit=limit)
return recs.name_get() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name_search(self,cr,uid,name='',args=[],operator='ilike',context=None,limit=80):\n if context is None: \n context={}\n ids= []\n if len(name) >= 2:\n ids = self.search(cr, uid, [('vat',operator,name)] + args, limit=limit, context=context)\n if not ids:\n ids = self.search(cr,uid,[('name',operator,name)] + args, limit=limit, context=context)\n return self.name_get(cr,uid,ids,context=context)",
"def search_entity(self, name_filter):\n name_filter=name_filter.lower()\n model_reader=oc.delegator.getModelReader()\n names=model_reader.getEntityNames()\n # print(len(names))\n for name in names:\n if name_filter in name.lower():\n print(name)",
"def name_search(self, search):\n if isinstance(search, str):\n name_re = re.compile(search)\n else:\n name_re = search\n matches = [\n entry\n for entry in self\n if entry is not None and name_re.search(entry.name)\n ]\n return matches",
"def search(self, word):",
"def search():\n pass",
"def search_by_name(self, name):\r\n return self.__filter(self.get_all_persons(), lambda x: name.lower().strip() in x.name.lower().strip())",
"def test_name(self):\n\n self.check_search(\n dict(name=u'flamethrower'),\n [u'Flamethrower'],\n 'searching by name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'durp'),\n [],\n 'searching for a nonexistent name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'quICk AttACk'),\n [u'Quick Attack'],\n 'case is ignored',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'thunder'),\n [ u'Thunder', u'Thunderbolt', u'Thunder Wave',\n u'ThunderShock', u'ThunderPunch', u'Thunder Fang'],\n 'no wildcards is treated as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'*under'),\n [u'Thunder'], # not ThunderShock, etc.!\n 'splat wildcard works and is not used as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'b?te'),\n [u'Bite'], # not Bug Bite!\n 'question wildcard works and is not used as substring',\n exact=True,\n )",
"def search(self, term):",
"def __ui_search_persons_by_name(self):\n searched_name = input(\"Introduce the name: \").strip().lower()\n if searched_name == \"\":\n print(\"You cannot search persons by an empty name!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_name(searched_name)\n\n if len(searched_persons) == 0:\n print('There is no person whose name contains \"{}\"!\\n'.format(searched_name))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")",
"def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')",
"def search_tournament_by_name(tournaments_table, name):\r\n result = tournaments_table.search(Query().Nom == name)\r\n print(result)",
"def search_player_by_name(players_table, name):\r\n result = players_table.search(Query().Nom == name)\r\n print(result)",
"def search(self, name: str) -> \"Navaids\":\n return self.__class__(\n self.data.query(\n \"description == @name.upper() or name == @name.upper()\"\n )\n )",
"def filter_name(self, name):\n return self.form.set_value(\"generating station search\", name)",
"def test_name_search(self):\n # A name in the database\n search_string = \"Umut\"\n # Search For Umut\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string})\n search_result = json.loads(resp.content)\n # Check the name field of the result\n self.assertEqual(search_string,search_result[0]['name'],\"It doesn't return the user with the name {}\".format(search_string))",
"def name_search(self, name, args=None, operator='ilike', limit=100):\n args = args or []\n if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):\n tickets = []\n if name.isdigit():\n number = int(name)\n tickets = self.search([('number', '=', number)] + args,\n limit=limit)\n else:\n tickets = self.search([('name', operator, name)] + args,\n limit=limit)\n if len(tickets) > 0:\n return tickets.name_get()\n return super(Ticket, self.browse()).name_search()",
"def find_names(s):\n \"*** YOUR CODE HERE ***\"",
"def searchByName(database):\n firstname=str(input(\"What is his first name :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(usr)",
"def search_by_name(request):\n search_term = request.GET.get('q')\n minerals = Mineral.objects.filter(name__icontains=search_term)\n messages.success(request, \"Successfully found {} of the following minerals!\".format(len(minerals)))\n return render(request, 'index.html', {'minerals': minerals})",
"def search(self, name):\n\n name = name.lower().strip()\n exact_names = get_close_matches(name, self.possible_names, n=1)\n if not exact_names:\n return None\n else:\n exact_name = exact_names[0]\n id = self.df_possible_names[self.df_possible_names['name'] == exact_name].index[0] \n return self.df_possible_names.loc[id, 'id']",
"def contains(name):",
"def __ui_search_student_by_name(self, search):\n try:\n result = self.__student_controller.search_by_name(search)\n for student in result:\n print(str(student))\n\n except RepositoryException as re:\n print(re)\n return",
"def getindexu(self,name,searchby='name'):\n name = name.replace(':','_').lower()\n result = []\n\n for (i,elem) in enumerate(self.lat):\n if fnmatch.fnmatch(elem[searchby],name):\n result.append(i)\n return result",
"def find_by_name(command, name): # fine\r\n if command == 'FindByFName':\r\n for student in StudentRoster:\r\n if name == student.first:\r\n print(student_format(student))\r\n elif command == 'FindByLName':\r\n for student in StudentRoster:\r\n if name == student.last:\r\n print(student_format(student))",
"def searchByNameSubstring(self, substring):\n if substring.strip() == '':\n return None\n return self.filter(name__icontains=substring)",
"def search_for_meme(self, search):\n cursor = self.conn.cursor()\n cursor.execute(f\"select * from memes where lower(meme_name) like ?\", (f'%{search}%', ))\n results = cursor.fetchall()\n cursor.close()\n return results",
"def search(self, search):\n raise NotImplementedError",
"def search_by_name(self, request, **kwargs):\n self.method_check(request, allowed=['get'])\n self.throttle_check(request)\n\n keyword = request.GET['keyword']\n members = Member.objects.filter(Q(first_name__icontains=keyword) | Q(last_name__icontains=keyword))\n\n bundles = []\n\n for member in members:\n bundle = self.build_bundle(obj=member, request=request)\n bundles.append(self.full_dehydrate(bundle, for_list=True))\n\n return self.create_response(request, bundles)",
"def search(self, *args, **kwargs):",
"def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):\n if context is None:\n context = {}\n\n if 'default_spare_ok' in context:\n ids = []\n ids = self.search(cr, uid, [('name', operator, name)]+ args, limit=limit, context=context)\n ids += self.search(cr, uid, [('e_name', operator, name)]+ args, limit=limit, context=context)\n ids += self.search(cr, uid, [('t_number', operator, name)]+ args, limit=limit, context=context)\n ids += self.search(cr, uid, [('default_code', operator, name)]+ args, limit=limit, context=context)\n ids = list(set(ids))\n args.append(('id','in',ids))\n\n if 'spares_ids' in context:\n idss = []\n product_ids = resolve_o2m_operations(cr, uid, self.pool.get('maintenance.spare'),\n context.get('spares_ids'), [\"product_id\"], context)\n args.append(('id', 'not in', [isinstance(\n d['product_id'], tuple) and d['product_id'][0] or d['product_id'] for d in product_ids]))\n\n '''if 'vehicle_id' in context and context['vehicle_id']:\n vehicle_category = self.pool.get('fleet.vehicle').browse(cr, uid, context['vehicle_id'], context).type.id\n if vehicle_category:\n #idss = self.search(cr, uid, [('vehicle_category','=',vehicle_category)])\n idss = self.search(cr, uid, [('vehicle_category_ids','in',[vehicle_category])])'''\n #args.append(('id','in',idss))\n if ids :\n result = self.name_get(cr, uid, ids, context=context)\n return result\n else:\n return []\n else:\n return super(product_product, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)"
] | [
"0.7332873",
"0.7273161",
"0.71859074",
"0.70795864",
"0.7058341",
"0.6973131",
"0.69059366",
"0.686474",
"0.6849113",
"0.684415",
"0.683499",
"0.6810947",
"0.6781055",
"0.6720904",
"0.669474",
"0.664896",
"0.66237813",
"0.6620524",
"0.658569",
"0.657765",
"0.6539284",
"0.65287286",
"0.6490071",
"0.64863664",
"0.64721",
"0.64484745",
"0.641135",
"0.6402963",
"0.63968956",
"0.63952816"
] | 0.7374638 | 0 |
Sets the tab selection | def select_tab(self, selected_tab):
for tab in self.tabs:
if tab == selected_tab:
tab.selected = True
else:
tab.selected = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SetCurrentTab(self, index):\n if index == self.GetCurSel(): return\n self.OnSelChanging(None) #simulate\n self.SetCurSel(index) #does not cause sel changing and sel change events\n self.OnSelChange(None) #simulate",
"def tabSelected(self):",
"def tabSelected(self):",
"def tabSelected(self):\r\n self.transactionMenuWidget.tabSelected()",
"def set_selection(self, selection):\n self._selection = selection",
"def set_tab(self, tab_name: Optional[str], index: int = 0):\n self.tab_ctrl.set_tab(tab_name, index)",
"def OnTabClicked(self, event):\r\n\r\n if self._textCtrl is not None:\r\n self._textCtrl.StopEditing()\r\n \r\n ctrl = event.GetEventObject()\r\n assert ctrl != None\r\n\r\n wnd = ctrl.GetWindowFromIdx(event.GetSelection())\r\n assert wnd != None\r\n\r\n self.SetSelectionToWindow(wnd)",
"def SetSelectionToWindow(self, win):\r\n\r\n idx = self._tabs.GetIdxFromWindow(win)\r\n \r\n if idx == wx.NOT_FOUND:\r\n raise Exception(\"invalid notebook page\")\r\n\r\n if not self.GetEnabled(idx):\r\n return\r\n \r\n # since a tab was clicked, let the parent know that we received\r\n # the focus, even if we will assign that focus immediately\r\n # to the child tab in the SetSelection call below\r\n # (the child focus event will also let AuiManager, if any,\r\n # know that the notebook control has been activated)\r\n\r\n parent = self.GetParent()\r\n if parent:\r\n eventFocus = wx.ChildFocusEvent(self)\r\n parent.GetEventHandler().ProcessEvent(eventFocus)\r\n\r\n self.SetSelection(idx)",
"def SetSelectionToPage(self, page):\r\n \r\n self.SetSelectionToWindow(page.window)",
"def set_selected(self, selected):\n self.selected = selected",
"def handle_tab(self, index):\n self.current_tab = index\n self.views[index].activate()",
"def setDataSelection(self, selection):\n pass",
"def changeSelection(self, value):\n self.layer.selected_label = value\n self.selectionSpinBox.clearFocus()\n self.setFocus()",
"def update_tab(self):\r\n self.current_tab = self.TabWidget.currentIndex()\r\n# print(\"Tab = \" + str(self.current_tab))\r\n self.update_source_range_type()\r\n self.update_variables_switch[self.current_tab]()\r\n self.update_header_string()\r\n self.update_filter_on()",
"def switch_tab(self, tab):\n\n self.driver.switch_to.window(self.driver.window_handles[tab])",
"def tab_id(self, tab_id):\n\n self._tab_id = tab_id",
"def _select_tab_item(element, name):\n\n try:\n element.SelectTabItem(name)\n except CSharpException as exception:\n raise FlaUiError(FlaUiError.GenericError.format(exception.Message)) from None",
"def SetSelection(self, new_page, force=False):\r\n wnd = self._tabs.GetWindowFromIdx(new_page)\r\n \r\n #Update page access time\r\n self._tabs.GetPages()[new_page].access_time = datetime.datetime.now()\r\n \r\n if not wnd or not self.GetEnabled(new_page):\r\n return self._curpage\r\n\r\n # don't change the page unless necessary\r\n # however, clicking again on a tab should give it the focus.\r\n if new_page == self._curpage and not force:\r\n \r\n ctrl, ctrl_idx = self.FindTab(wnd)\r\n if wx.Window.FindFocus() != ctrl:\r\n ctrl.SetFocus()\r\n \r\n return self._curpage\r\n \r\n evt = AuiNotebookEvent(wxEVT_COMMAND_AUINOTEBOOK_PAGE_CHANGING, self.GetId())\r\n evt.SetSelection(new_page)\r\n evt.SetOldSelection(self._curpage)\r\n evt.SetEventObject(self)\r\n\r\n if not self.GetEventHandler().ProcessEvent(evt) or evt.IsAllowed():\r\n \r\n old_curpage = self._curpage\r\n self._curpage = new_page\r\n\r\n # program allows the page change\r\n evt.SetEventType(wxEVT_COMMAND_AUINOTEBOOK_PAGE_CHANGED)\r\n self.GetEventHandler().ProcessEvent(evt)\r\n\r\n if not evt.IsAllowed(): # event is no longer allowed after handler\r\n return self._curpage\r\n \r\n ctrl, ctrl_idx = self.FindTab(wnd)\r\n \r\n if ctrl:\r\n self._tabs.SetActivePage(wnd)\r\n ctrl.SetActivePage(ctrl_idx)\r\n self.DoSizing()\r\n ctrl.DoShowHide()\r\n ctrl.MakeTabVisible(ctrl_idx, ctrl)\r\n\r\n # set fonts\r\n all_panes = self._mgr.GetAllPanes()\r\n for pane in all_panes:\r\n if pane.name == \"dummy\":\r\n continue\r\n \r\n tabctrl = pane.window._tabs\r\n if tabctrl != ctrl:\r\n tabctrl.SetSelectedFont(self._normal_font)\r\n else:\r\n tabctrl.SetSelectedFont(self._selected_font)\r\n \r\n tabctrl.Refresh()\r\n tabctrl.Update()\r\n \r\n # Set the focus to the page if we're not currently focused on the tab.\r\n # This is Firefox-like behaviour.\r\n if wnd.IsShownOnScreen() and wx.Window.FindFocus() != ctrl:\r\n wnd.SetFocus()\r\n\r\n return old_curpage\r\n \r\n return self._curpage",
"def setModeSelect(self):\n self.scene().mode = fsScene.MODE_SELECT",
"def OnCtrlTabCommand(self, event):\r\n event.Skip()\r\n sel = self.mainToolbook.GetSelection()\r\n pages = self.mainToolbook.GetPageCount()\r\n self.mainToolbook.ChangeSelection((sel+1)%pages)",
"def setSelectionColorScheme(self, focused=None, unfocused=None):\n if focused is None:\n focused = self.selectionColor\n if unfocused is None:\n unfocused = self.unfocusedRegionColor\n self.selection.setColorScheme(focused, unfocused)\n beg = self.selection.getBeginSeconds()\n dur = self.selection.getWidthSeconds()\n wform = self.selection.getSelectedWaveform()\n self.selection.select(beg, dur, wform)",
"def select_dial_digits_tab(self):\n self.click_element(self.dial_digits_tab_locator, True, True)",
"def set_selection(self, index, value):\n if not self._has_cbox[index]:\n return\n i = self._widgets[index][\"values\"].index( str(value) )\n self._widgets[index].current(i)",
"def tab_color(self, tab_color):\n\n self.container['tab_color'] = tab_color",
"def SetSelection(self, s):\r\n\r\n self.selection = s\r\n self._commandInt = s",
"def select_disputes_tab(self):\n self.click_element(self.disputes_tab_locator)",
"def BaseSetSelection(self, start, end):\n super(EditraBaseStc, self).SetSelection(start, end)",
"def select_transactions_tab(self):\n self.click_element(self.transactions_tab_locator)",
"def setSelection(self, current: QModelIndex, old: QModelIndex):\n node = current.internalPointer()\n if node is not None:\n typeInfo = node.typeInfo()\n self.showEditor(typeInfo)\n for type, editor in self._editor_dict.items():\n editor.setSelection(current)",
"def renameTab(self):\n tabname = self.baseUI.tabnameBox.text() #new name text\n tabIndex = self.currentIndex() #index of current tab\n self.setTabText(tabIndex, tabname) #rename tab"
] | [
"0.7228304",
"0.7061206",
"0.7061206",
"0.7045288",
"0.6945325",
"0.6928036",
"0.68255275",
"0.67694014",
"0.6713398",
"0.6493335",
"0.6293864",
"0.62175435",
"0.6192053",
"0.6135601",
"0.6122223",
"0.6078583",
"0.6056166",
"0.5967249",
"0.5932563",
"0.5923162",
"0.58946025",
"0.5849154",
"0.5833917",
"0.5823434",
"0.5821465",
"0.58121604",
"0.58024025",
"0.57809514",
"0.5768119",
"0.5764687"
] | 0.7906762 | 0 |
Ask all GUI elements to check event | def check_event(self, event):
for tab in self.tabs:
tab.check_event(event) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_events(self):\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.ai_game.quit()\r\n elif event.type == pg.KEYDOWN:\r\n self._check_keydown_events(event)\r\n elif event.type == pg.KEYUP:\r\n self._check_keyup_events(event)\r\n elif event.type == pg.MOUSEBUTTONDOWN:\r\n mouse_pos = pg.mouse.get_pos()\r\n self._check_button(mouse_pos)",
"def _check_events(self):\t\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tself._check_keydown_events(event)\n\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tself._check_keyup_events(event)\n\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tself._check_retry_button(pygame.mouse.get_pos())",
"def check_events(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n if self._stats.get_status() == \"Start_game\":\r\n self.check_game_mode_button(mouse_x, mouse_y)\r\n elif self._stats.get_status() == \"replay\":\r\n self.check_replay_button(mouse_x, mouse_y)\r\n else:\r\n self.check_click(mouse_x, mouse_y)",
"def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # if the exit button on screen is clicked close the program\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)",
"def check_event(self, event):\r\n self.tabbed_pane.check_event(event)",
"def _check_events(self):\n for event in pygame.event.get():\n # quit stuff\n if event.type == pygame.QUIT:\n sys.exit()\n # mouse click for 'PLAY' button\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)\n\n # checks for key down/up events and sends it to appropriate method\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)",
"def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_event(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_event(event)",
"def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keyDown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyUP_events(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)",
"def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)",
"def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)",
"def _check_events(self):\n\t\t# Watch for keyboard and mouse events.\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tself._check_keydown_events(event)\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tself._check_keyup_events(event)\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tmouse_pos = pygame.mouse.get_pos()\n\t\t\t\tself._check_play_button(mouse_pos)",
"def _check_event(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self.waiting = not self.waiting\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if self.waiting:\n x,y = pygame.mouse.get_pos()\n cell_addr_y = int(y/self.cell_width)\n cell_addr_x = int(x/self.cell_width)\n self.cells[cell_addr_y][cell_addr_x].update()",
"def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)",
"def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit() \n if event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)",
"def check_events(rk_settings, screen, rock, bullets):\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tsys.exit()\r\n\t\t\r\n\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\tcheck_keydown_events(event, rk_settings, screen, rock, bullets)\r\n\t\t\t\t\r\n\t\telif event.type == pygame.KEYUP:\r\n\t\t\tcheck_keyup_events(event, rock)",
"def check_events(ai_settings,screen,stats,play_button,ship,aliens,bullets,sb):\n\t# Observe eventos de teclado e de mouse\n\tfor event in pygame.event.get():\n\t\t\n\t\tif event.type == pygame.QUIT:\n\t\t\t\n\t\t\tstats.file.stored_high_score(stats.high_score)\n\n\t\t\tsys.exit()\n\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tmouse_x,mouse_y = pygame.mouse.get_pos()\n\t\t\tcheck_play_button(ai_settings,screen,stats,play_button,ship\n\t\t\t\t,aliens,bullets,mouse_x,mouse_y,sb)\n\n\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\n\t\t\tcheck_keydown_events(event,ai_settings,screen,ship,bullets,stats,\n\t\t\t\taliens,sb)\n\n\t\telif event.type == pygame.KEYUP:\n\n\t\t\tcheck_keyup_events(event,ship)",
"def preliminary_check_controls(self):\n\n # is the program still in a binding state?\n if self.is_binding:\n self.error_msg['text'] = 'You are still binding'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # are the controls set all unique?\n elif len({\n self.controller.slide_up_control,\n self.controller.slide_down_control,\n self.controller.slide_left_control,\n self.controller.slide_right_control\n }) != 4:\n self.error_msg['text'] = 'All controls must be unique'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # all tests passed?\n else:\n # save to file - do this\n\n # move to main menu frame\n self.controller.show_frame(MainMenu)",
"def check(self):\r\n for action in self._actions:\r\n action.check()",
"def check_event(self, event):\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if self.selected:\r\n for item in self.buttons:\r\n item.handleMouseDown(event.pos[0], event.pos[1])\r\n else:\r\n self.tab.handleMouseDown(event.pos[0], event.pos[1])",
"def Check(self):\n\t\tself.ids.lblText.text = \"Check des servos\"\n\t\ttime.sleep(0.5)\n\t\tself.tabServo = axControl.checkAllServo(14) # Check tous les servos\n\t\t#time.sleep(2) # Attends 2 secondes",
"def handle_mousedown(self, button, name):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mousedown(button, name):\r\n return True\r\n return False",
"def checkallinputs(self):\n activeinputs=self.getactiveinputs() \n self.checksignature(activeinputs)\n self.showsignatureM()\n self.showactiveinputs(self.inputs,activeinputs)\n #self.showerrors(self.inputs)",
"def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)",
"def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)",
"def check(self):\n if self.widget:\n self.widget.update()\n\n self.check_virtual_display()\n\n return self.runner.check()",
"def onClickCheckbutton(self):\r\n self.app.unbind()\r\n mask = []\r\n for val in self.intvars:\r\n mask.append(val.get())\r\n # Recreate fNIRS Channels with channel mask\r\n self.app.reconfigureChannels(self.app.dataPath,mask)\r\n self.app.bindHotkeys()",
"def handle_uncaught_event(self, event):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_uncaught_event(event):\r\n return True\r\n return False",
"def poll(self):\n\tself.met = self.button.poll()",
"def check_events(si_settings, screen,stats,sb,play_button, ship,aliens, bullets):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, si_settings, screen, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x,mouse_y = pygame.mouse.get_pos()\n check_play_button(si_settings,screen,stats,sb,play_button,ship,aliens,bullets,mouse_x,mouse_y)",
"def handle_mouseup(self, button, name):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_mouseup(button, name):\r\n return True\r\n return False"
] | [
"0.716912",
"0.6734102",
"0.6625884",
"0.655134",
"0.6521154",
"0.64759755",
"0.64363146",
"0.64115113",
"0.63923794",
"0.63923794",
"0.6372732",
"0.6327478",
"0.63157505",
"0.62871295",
"0.61911",
"0.6177678",
"0.60905755",
"0.6080838",
"0.6063604",
"0.60579574",
"0.6054394",
"0.6045213",
"0.60142374",
"0.60142374",
"0.6001982",
"0.59940773",
"0.599232",
"0.5962204",
"0.5957336",
"0.59538776"
] | 0.6774125 | 1 |
Get the IMAP host. | def get_imap_host():
return get_account().Get(GOA_ACCOUNT_MAIL, 'ImapHost',
dbus_interface=PROPERTIES) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mail_host():\n portal = getSite()\n if portal is None:\n return None\n request = portal.REQUEST\n ctrlOverview = getMultiAdapter((portal, request), name='overview-controlpanel')\n mail_settings_correct = not ctrlOverview.mailhost_warning()\n if mail_settings_correct:\n mail_host = getToolByName(portal, 'MailHost', None)\n return mail_host",
"def get_host(self) -> str:\n return self.socket.getsockname()[0]",
"def gethost(self):\n return self.__host",
"def get_host(self):\n return self.host",
"def get_host(self):\r\n return self.host",
"def getHost(self):\n return self._host",
"def getHost(self):\n return self._host",
"def getHost(self):\n return self.conn.transport.transport.getHost()",
"def getHost(self):\n\n\t\treturn HOST",
"def getHost(self):\n host = self.url[self.host_head:self.host_tail]\n return host",
"def host(self) :\n\t\ttry :\n\t\t\treturn self._host\n\t\texcept Exception as e:\n\t\t\traise e",
"def getHost(self, ip):\n try:\n data = socket.gethostbyaddr(ip)\n host = repr(data[0])\n return host\n except Exception:\n # fail gracefully\n return ip",
"def host(self):\n return self._host",
"def host(self):\n return self._host",
"def host(self):\n return self._host",
"def host(self):\n return self._host",
"def host(self):\n return self._host",
"def host(self):\n return self._host",
"def host(self):\n return self._host",
"def host(self):\n if self.url.startswith(\"dns:\"):\n return self.url[4:]\n else:\n return urlparse(self.url).hostname",
"def get_host_name():\n return socket.gethostname()",
"def get_host(self):\n self.server_name = subprocess.check_output(['hostname', '-s']).split('\\n')[0]\n self.ip_addr = subprocess.check_output(['hostname', '-i']).split('\\n')[0]\n return self.server_name, self.ip_addr",
"def host(self) -> str:\n return pulumi.get(self, \"host\")",
"def host(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"host\")",
"def host(self):\n\n return self._host",
"def host(self):\n return self._host[CONF_HOST]",
"def host(self) -> str:\n return self.proto.host",
"def getHost():",
"def getHost():",
"def host(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host\")"
] | [
"0.7323792",
"0.7314019",
"0.728461",
"0.719561",
"0.71634734",
"0.7103244",
"0.7092401",
"0.70497376",
"0.7040019",
"0.70081824",
"0.69928026",
"0.69074327",
"0.6778371",
"0.6778371",
"0.6778371",
"0.6778371",
"0.6778371",
"0.6778371",
"0.6778371",
"0.67529756",
"0.67471695",
"0.67438126",
"0.66878587",
"0.66838133",
"0.6676914",
"0.6647528",
"0.66136855",
"0.66127896",
"0.66127896",
"0.66057867"
] | 0.85919154 | 0 |
Get the IMAP user name. | def get_imap_user_name():
return str(get_account().Get(GOA_ACCOUNT_MAIL, 'ImapUserName',
dbus_interface=PROPERTIES)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_name(self):\n return self._stub.List(self._message).user_name",
"def user_name(self) -> str:\n return pulumi.get(self, \"user_name\")",
"def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")",
"def getUserName(self):\n userType = self.env['res.users']\n \n uiUser = userType.browse(self._uid)\n return uiUser.name",
"def user_name(self):\n return self._user_name",
"def get_name(self):\n return self.user.username if self.user.username else self.user.email",
"def user_name(self):\n\n return self._user_name",
"def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")",
"def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name",
"def get_username(self):\n return str(getattr(self, self.USERNAME_FIELD))",
"def get_username(self) -> str:\n return self._username",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def user_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_name\")",
"def get_user_name(self):\n\t\treturn call_sdk_function('PrlLic_GetUserName', self.handle)",
"def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")",
"def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")",
"def user_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_name\")",
"def get_uname(self):\n return Server.t_usernames.get(threading.get_ident())",
"def get_username(self):\n return self.browser.find_element(*locators.USER_NAME_TEXT).text",
"def _get_user_name():\n return pwd.getpwuid(os.getuid())[0]",
"def user_name(self):\n return utils.to_unicode(lib.sp_session_user_name(self._sp_session))",
"def _get_username_from_api(self):\n result = self.api_query(action=\"query\", meta=\"userinfo\")\n return result[\"query\"][\"userinfo\"][\"name\"]",
"def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()",
"def get_user_info(self) -> str:\n return self._searcher.get_user_info()",
"def get_user_name(self, uid):\n uid = str(uid)\n name = self._username_cache.get(uid)\n if name is None:\n name = self.fbchat_client.fetchUserInfo(uid)[uid].name\n self._username_cache[uid] = name\n return name",
"def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name",
"def _get_user_name(self):\n if self.runtime.get_real_user is None:\n return 'staff'\n else:\n return self.runtime.get_real_user(self.runtime.anonymous_student_id).username",
"def get_username(self, tg_user_id):\n\n data = {\n 'user_id': tg_user_id\n }\n result = self._send_data('getUser', data)\n if result.update:\n return result.update.get('username','')"
] | [
"0.7674771",
"0.7609183",
"0.7536241",
"0.739787",
"0.7250912",
"0.71732175",
"0.7153436",
"0.7127807",
"0.7097307",
"0.708607",
"0.705256",
"0.70507437",
"0.70507437",
"0.70507437",
"0.7041432",
"0.70206535",
"0.7014133",
"0.7014133",
"0.7014133",
"0.6957826",
"0.69508284",
"0.68654984",
"0.6862261",
"0.68587744",
"0.68151754",
"0.6813959",
"0.6803739",
"0.6801531",
"0.6799316",
"0.67990136"
] | 0.8884453 | 0 |
Get the client secret from the online account. | def get_client_secret():
return str(get_account().Get(GOA_ACCOUNT_OAUTH2, 'ClientSecret',
dbus_interface=PROPERTIES)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def client_secret(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> str:",
"def client_secret(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> str:\n return self.get_env_var(self.client_secret_var)",
"def client_secret(self) -> str:\n return self.get_env_var(self.client_secret_var)",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def client_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_secret\")",
"def _get_enc_secret():\n return current_app.config.get('ACCOUNT_SECRET_KEY')",
"def client_basic_secret(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"client_basic_secret\")",
"def get_secret_key():\n return get_config_handler().get_secret_key()",
"def secret(self) -> str:\n return pulumi.get(self, \"secret\")",
"def secret(self) -> str:\n return pulumi.get(self, \"secret\")",
"def get_secret(project_name, secret_name):\n secrets = secretmanager.SecretManagerServiceClient()\n secret_value = (\n secrets.access_secret_version(\n \"projects/\" + project_name + \"/secrets/\" + secret_name + \"/versions/latest\"\n )\n .payload.data.decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n )\n return secret_value",
"def client_basic_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_basic_secret\")",
"def client_basic_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_basic_secret\")",
"def GetSecretKey():\n _LOG.info('Getting webapp2_secret_key.')\n return (Webapp2SecretKey.get_by_id('current_secret_key')\n .secret_key.encode('ascii', 'ignore'))",
"def secret(self):\n return self._secret",
"def getSecret(self):\n\n with open(self._secret_file) as f:\n secret=f.readline().rstrip()\n \n return secret",
"def get_secret():\n\n secret_name = \"Jido-Active-Directory-Service-Account\"\n\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(\n service_name='secretsmanager',\n region_name= os.environ.get(\"AWS_DEFAULT_REGION\")\n )\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId= secret_name\n )\n except ClientError as e:\n print(\"Error getting secret key!: \" + str(e))\n return None\n else:\n # Decrypts secret using the associated KMS CMK.\n if 'SecretString' in get_secret_value_response:\n return get_secret_value_response['SecretString']\n\n return None"
] | [
"0.7722696",
"0.7722696",
"0.7474888",
"0.7474458",
"0.7474458",
"0.7474458",
"0.7474458",
"0.7474458",
"0.7392615",
"0.7392615",
"0.7344622",
"0.7344622",
"0.7344622",
"0.7344622",
"0.7344622",
"0.7344622",
"0.7344622",
"0.7344622",
"0.732468",
"0.71429634",
"0.70245343",
"0.69338983",
"0.69338983",
"0.6915746",
"0.6851041",
"0.6851041",
"0.6847519",
"0.6800664",
"0.6736439",
"0.670794"
] | 0.78848386 | 0 |
Get the access token from the online account. | def get_access_token():
account = get_account()
account.EnsureCredentials(dbus_interface=GOA_ACCOUNT)
access_token, _ = account.GetAccessToken(dbus_interface=GOA_ACCOUNT_OAUTH2)
return str(access_token) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_access_token(self):\n access_token = self._auth_provider._get_auth_value()\n return access_token",
"def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token",
"def get_access_token(self):\n if self.token.is_expired():\n logging.debug('Requesting a new access token')\n self.token.load_from_json(json=self.__get_token_data__())\n else:\n logging.debug('Access token still valid')\n\n return self.token.access_token",
"def access_token(self):\n if self.has_expired():\n self.update()\n\n return self.token['access_token']",
"async def async_get_access_token(self):\n if not self._oauth_session.valid_token:\n await self._oauth_session.async_ensure_token_valid()\n\n return self._oauth_session.token[\"access_token\"]",
"def _request_access_token(self):\n resp = requests.get(self.TOKEN_URL_FORMAT.format(\n self.creds().consumer_key(), self.creds().app_secret())\n )\n status = resp.status_code\n\n # If the token request fails, try to use the configured app id\n # and secret. This probably won't work, but the docs say that it\n # should. for more info, see:\n # https://developers.facebook.com/docs/facebook-login/access-tokens\n token = \"%s|%s\" % (self.creds().consumer_key(),\n self.creds().app_secret())\n if status == 200:\n token = resp.text.split('access_token=')[1]\n else:\n self.logger.error(\n \"Facebook token request failed with status %d\" % status\n )\n return token",
"def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token",
"async def get_access_token(self):\n async with self._access_token_lock:\n if (not self._access_token\n or (not self._access_token_checked\n and not await self.check_access_token(\n self._access_token))):\n await self.receive_new_access_token()\n return self._access_token",
"def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r",
"def _GetAccessToken(self):\n\n # Encoding client authorization \n pair = \"{client_key}:{client_secret}\".format(client_key=self.client_key, client_secret=self.client_secret)\n authorization = 'MUthRmpVa1JUaVlxbDVUTElUYVFnOlRENmpYMTdGbmhPSzNodWdqWUZqVDU0YzVjWGNQeko3'\n\n # Getting the access token\n access_token_headers = { \"Authorization\": \"Basic {authorization}\".format(authorization=authorization) }\n request_endpoint = \"/oauth/token?grant_type=authorization_code&code={code}&redirect_uri=https://80a3bb863001.ngrok.io\".format(code=self.code)\n print(request_endpoint)\n self.conn.request(\"POST\", request_endpoint, headers=access_token_headers)\n res = self.conn.getresponse()\n response = json.loads(res.read().decode(\"utf-8\"))\n\n try:\n return response[\"access_token\"]\n except KeyError:\n print(\"Request for access token failed for the following reason: {reason}\".format(reason=response[\"reason\"]))",
"def get_access_token(*args, **kwargs):\n return get_access_token_async(*args, **kwargs).get_result()",
"def get_access_token(self):\n payload = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'resource': self.resource\n }\n res = requests.post(self.auth_url, data=payload)\n data = res.json()\n if res.status_code == 200:\n return data['access_token'], res\n\n return False, res",
"def access_token(self):\n return self._authentication.access_token",
"def access_token(self):\n return self._authentication.access_token",
"def get_access_token():\n\n scopes = [\n 'https://www.googleapis.com/auth/cloud-platform', 'email', 'profile'\n ]\n\n credentials, _ = default()\n credentials = auth.delegated_credentials(credentials, scopes=scopes)\n\n request = req.Request()\n credentials.refresh(request)\n access_token = credentials.token\n\n return access_token",
"def get_token(self):\n message = {\n \"request\": \"access_token\",\n \"account\": self.account,\n \"min_valid_period\": self.validity,\n \"application_hint\": \"orpy\",\n }\n try:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.connect(self.socket_path)\n self._sock.sendall(json.dumps(message).encode())\n\n data = \"\"\n while True:\n recv = self._sock.recv(16).decode()\n if recv:\n data += recv\n else:\n break\n except socket.error as err:\n raise exceptions.AuthExceptiob(\n err=\"Cannot communicate with the \" \"oidc-agent: %s\" % err\n )\n finally:\n self._sock.close()\n\n token = json.loads(data)\n if token.get(\"status\") == \"failure\":\n raise exceptions.AuthError(err=token.get(\"error\"))\n return token",
"def get_access_token(self, refresh=False):\n return self._token_man.get_access_token(refresh)",
"def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")",
"def get_token(self, code):\n\n # live need post a form to get token\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n data = {\n 'client_id': get_config('login.live.client_id'),\n 'client_secret': get_config('login.live.client_secret'),\n 'redirect_uri': get_config('login.live.redirect_uri'),\n 'grant_type': 'authorization_code',\n 'code': code\n }\n # Following is use urllib to post request\n url = get_config('login.live.access_token_url')\n r = requests.post(url, data=data, headers=headers)\n resp = r.json()\n\n if resp.get(\"error\") is not None:\n raise Exception(resp)\n\n return resp[\"access_token\"]",
"def get_token(self):\n oauth_provider = UserSocialAuth.objects.get(provider='drchrono')\n access_token = oauth_provider.extra_data['access_token']\n return access_token",
"def _get_access_token(self):\n if self._service_token:\n logger.info('Use service token: %s',\n 5 * '*' + self._service_token[50:])\n return self._service_token\n\n if not all([self.app_id, self._login, self._password]):\n raise ValueError(\n 'app_id=%s, login=%s password=%s (masked) must be given'\n % (self.app_id, self._login,\n '*' * len(self._password) if self._password else 'None'))\n\n logger.info(\"Getting access token for user '%s'\" % self._login)\n with self.http_session as s:\n if self._client_secret:\n url_query_params = self.do_direct_authorization(session=s)\n else:\n self.do_login(http_session=s)\n url_query_params = self.do_implicit_flow_authorization(session=s)\n logger.debug('url_query_params: %s', url_query_params)\n\n if 'access_token' in url_query_params:\n logger.info('Access token has been gotten')\n return url_query_params['access_token']\n else:\n raise VkAuthError('OAuth2 authorization error. Url params: %s'\n % url_query_params)",
"def access_token(self):\n access_token = self.session.get('component_access_token')\n if access_token:\n if not self.expires_at:\n # user provided access_token, just return it\n return access_token\n\n timestamp = time.time()\n if self.expires_at - timestamp > 60:\n return access_token\n\n self.fetch_access_token()\n return self.session.get('component_access_token')",
"def get_facebook_access_token(self):\n try:\n req = requests.get(\n self.BASE_URL\n + self.VERSION\n + \"/oauth/access_token?client_id=\"\n + self.KEY\n + \"&client_secret=\"\n + self.TOKEN\n + \"&grant_type=client_credentials\",\n timeout=120,\n )\n req.raise_for_status()\n\n except requests.exceptions.ConnectionError:\n self._logger.error(\"Connection Error while obtaining access token\")\n exit(1)\n except requests.exceptions.HTTPError:\n self._logger.error(\"HTTP Error while obtaining access token\")\n exit(1)\n except requests.exceptions.RequestException as err:\n self._logger.error(\"Request exception while obtaining access token\")\n self._logger.error(str(err))\n exit(1)\n\n if req.status_code != 200:\n self._logger.error(\"Error while obtaining access token\")\n exit(1)\n\n response = json.loads(req.text)\n\n return response[\"access_token\"]",
"def get_access_token(self, code):\n url = get_config(\"login.wechat.access_token_url\") % code\n r = self._access_wxapi_or_raise(url)\n\n return (r[\"access_token\"], r[\"openid\"])",
"def accessToken(self):\n if self.isExpired:\n self.refresh()\n\n return self._accessToken",
"def get_app_access_token(self, app_id, app_secret, offline=False):\n if offline:\n return \"{0}|{1}\".format(app_id, app_secret)\n else:\n args = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": app_id,\n \"client_secret\": app_secret,\n }\n\n return self.request(\n \"{0}/oauth/access_token\".format(self.version), args=args\n )[\"access_token\"]",
"def get_access_token(self):\n signed_jwt = self.generate_jwt(os.path.join(FILE_DIR, KEYFILE))\n if signed_jwt is None:\n return False\n url = HOMEGRAPH_TOKEN_URL\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n data = 'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&assertion=' + signed_jwt.decode(\n 'utf-8')\n\n r = requests.post(url, headers=headers, data=data)\n\n if r.status_code == requests.codes.ok:\n token_data = json.loads(r.text)\n self._access_token = token_data['access_token']\n return token_data['access_token']\n\n r.raise_for_status()\n return",
"def get_access_token(self, token_url):\n # type: (str) -> str\n\n payload = {\n \"grant_type\" : \"client_credentials\",\n \"client_id\" : self.client_id,\n \"client_secret\" : self.client_secret,\n \"scope\" : self.client_scope,\n }\n headers = {\n \"accept\" : \"application/json\",\n }\n resp = requests.post(f\"{self.base_url}/{token_url}\", data=payload, headers=headers)\n try:\n if (resp.ok):\n return resp.json().get('access_token')\n except (ValueError):\n self.__log.error (\"Error obtaining access token with credentials\")",
"def getToken(self):\n \n data = '''\n {\n \"auth\": \n {\n \"username\" : \"%s\",\n \"password\" : \"%s\"\n }\n }\n ''' % (self.username, self.password)\n \n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'api.appnexus.com'\n }\n r = requests.post(self.auth_url, data=data, \n headers=headers)\n ac_data = r.json()\n \n if ac_data['response']['status'] != 'OK':\n self.stream_logger.error('Error while retrieving access token')\n self.stream_logger.error('Status code {0}'\\\n .format(ac_data['response']['status']))\n return False\n\n return ac_data['response']['token']",
"def get_token(client):\n # Begin by looking in token cache, first arg is for scopes,\n # because token is for app rather than user, second arg is None.\n result = client.acquire_token_silent(\n [\"https://graph.microsoft.com/.default\"], account=None\n )\n\n if not result:\n logger.info(\"No suitable token exists in cache. Get new one from Azure AD\")\n result = client.acquire_token_for_client(\n scopes=[\"https://graph.microsoft.com/.default\"]\n )\n\n # If we can't get access token, see what went wrong, otherwise return it.\n if \"access_token\" not in result:\n logger.exception(f'{result[\"error_description\"]} - {result[\"correlation_id\"]}')\n else:\n return result[\"access_token\"]"
] | [
"0.7698096",
"0.7672184",
"0.7662937",
"0.76265717",
"0.75849855",
"0.7543128",
"0.7496568",
"0.7485268",
"0.7455015",
"0.7395089",
"0.7383475",
"0.7366155",
"0.73583156",
"0.73583156",
"0.73305404",
"0.7288126",
"0.72761136",
"0.7240915",
"0.72369367",
"0.7220162",
"0.72062266",
"0.7204026",
"0.71669096",
"0.71637243",
"0.71208745",
"0.711663",
"0.7081361",
"0.70694095",
"0.70635515",
"0.7061383"
] | 0.7807534 | 0 |
Add a Vlan object to the net config object. | def add_vlan(self, vlan):
logger.info('adding vlan: %s' % vlan.name)
data = self._add_common(vlan)
logger.debug('vlan data: %s' % data)
self.interface_data[vlan.name] = data
if vlan.routes:
self._add_routes(vlan.name, vlan.routes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(\n self,\n Enabled=None,\n InternalRootPathCost=None,\n Mac=None,\n PortPriority=None,\n Priority=None,\n UpdateRequired=None,\n VlanId=None,\n ):\n # type: (bool, int, str, int, str, bool, int) -> Vlan\n return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))",
"def add_vovnet_config(cfg):\n _C = cfg\n\n _C.MODEL.VOVNET = CN()\n\n _C.MODEL.VOVNET.CONV_BODY = \"V-39-eSE\"\n _C.MODEL.VOVNET.OUT_FEATURES = [\"stage2\", \"stage3\", \"stage4\", \"stage5\"]\n\n # Options: FrozenBN, GN, \"SyncBN\", \"BN\"\n _C.MODEL.VOVNET.NORM = \"FrozenBN\"\n\n _C.MODEL.VOVNET.OUT_CHANNELS = 256\n\n _C.MODEL.VOVNET.BACKBONE_OUT_CHANNELS = 256",
"def add_network_adapter(self, network_obj):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n nic_spec.device = vim.vm.device.VirtualVmxnet3()\n nic_spec.device.wakeOnLanEnabled = True\n nic_spec.device.addressType = \"assigned\"\n nic_spec.device.deviceInfo = vim.Description()\n nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()\n nic_spec.device.backing.network = network_obj\n nic_spec.device.backing.deviceName = network_obj.name\n nic_spec.device.backing.useAutoDetect = False\n nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.connected = True\n nic_spec.device.connectable.allowGuestControl = True\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)",
"def add_vlan_acl(self, vlan, acl):\n raise NotImplementedError # pragma: no cover",
"def _add_intf_to_vlan(self, conn, vlan_id, interface):\n\n obj = self.VLAN_IFACE_REST_OBJ + quote(interface, safe='')\n\n resp = conn.get(obj)\n intf_info = self._check_process_resp(resp, expected_fields=['vlans', 'pvid'])\n\n crt_vlist = self._get_vlist(intf_info['vlans'])\n if vlan_id in crt_vlist:\n return\n\n new_vlist = crt_vlist[ : ]\n new_vlist.append(vlan_id)\n\n pvid = intf_info['pvid']\n mode = 'trunk'\n\n resp = self._conf_intf(conn, interface, mode, pvid, new_vlist)\n self._check_process_resp(resp)",
"def add(self, host, **kwargs):\n self.configs_[0][1].add(host, **kwargs)",
"def add_network(self, name_of_vm, port_group):\n adapter_type = 'e1000'\n vds = \"yes\"\n try:\n # import sys,pdb;pdb.Pdb(stdout=sys.__stdout__).set_trace()\n vmachine = self.vcenter.get_dc_object([vim.VirtualMachine], name_of_vm)\n\n if vds == 'yes':\n network = self.vcenter.get_dc_object([vim.dvs.DistributedVirtualPortgroup], port_group)\n else:\n network = self.get_network(port_group)\n\n new_nic = vim.vm.ConfigSpec()\n nic_changes = []\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n if adapter_type == 'e1000':\n nic_spec.device = vim.vm.device.VirtualE1000()\n elif adapter_type == 'vmxnet2':\n nic_spec.device = vim.vm.device.VirtualVmxnet2()\n else:\n nic_spec.device = vim.vm.device.VirtualVmxnet3()\n nic_spec.device.deviceInfo = vim.Description()\n if vds == 'yes':\n vir_port = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()\n nic_spec.device.backing = vir_port\n dvs_port_connection = vim.dvs.PortConnection()\n dvs_port_connection.portgroupKey = network.key\n dvs_port_connection.switchUuid = network.config.distributedVirtualSwitch.uuid\n nic_spec.device.backing.port = dvs_port_connection\n else:\n nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()\n nic_spec.device.backing.useAutoDetect = False\n nic_spec.device.backing.network = network\n nic_spec.device.backing.deviceName = port_group\n nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.connected = True\n nic_spec.device.connectable.allowGuestControl = True\n nic_spec.device.connectable.status = 'untried'\n nic_spec.device.wakeOnLanEnabled = True\n nic_spec.device.addressType = 'assigned'\n nic_changes.append(nic_spec)\n new_nic.deviceChange = nic_changes\n add_nic = vmachine.ReconfigVM_Task(spec=new_nic)\n log.info('Adding Network adapter to the VM...')\n while add_nic.info.state not in ['success', 'error']:\n time.sleep(1)\n status = add_nic.info.state\n if status == 'success':\n log.info('Nic added successfully: {}'.format(name_of_vm))\n if status == 'error':\n log.info('Could not add Network adapter {}'.format(name_of_vm))\n return status\n\n except Exception as error:\n log.info(\"Caught exception: {} \\n {}\".format(error, error.message))",
"def add_vlan(self, vlan_number, vlan_pool_name):\n class_query = ClassQuery('fvnsVlanInstP')\n class_query.propFilter = 'eq(fvnsVlanInstP.name, \"' + VLAN_POOL_PREFIX + vlan_pool_name + '\")'\n vp_list = self.moDir.query(class_query)\n # If the vlan pool does not exists, create it with the physical domain and the attachable entity profile\n if len(vp_list) == 0:\n VlanInstP_mo = self.create_vlan_pool(VLAN_POOL_PREFIX + vlan_pool_name, 'static')\n DomP_mo = self.create_physical_domain(PD_PREFIX + vlan_pool_name, str(VlanInstP_mo.dn))\n self.create_attachable_entity_profile(AEP_PREFIX + vlan_pool_name, str(DomP_mo.dn))\n else:\n VlanInstP_mo = vp_list[0]\n encap_mo = EncapBlk(str(VlanInstP_mo.dn), VLAN_PREFIX + str(vlan_number),\n VLAN_PREFIX + str(vlan_number), allocMode='static')\n self.commit(encap_mo)",
"def add_config_object(config_object: \"BaseConfig\") -> None:\n assert (\n len(G_CONFIG_OBJECT) == 0\n ), \"Looks like previous quatize object is alive. Did you call clear() on the object?\"\n G_CONFIG_OBJECT.append(config_object)",
"def add_vlan(self, vlan_name, vlan_id):\n self._vlans[vlan_name] = vlan_id",
"def add(self, config):\n self.__idx(config)",
"def add(self, team):\n ### INVARIANT: team is a Team class object.\n if team not in self._conf_teams:\n self._conf_teams.append(team)",
"def add_link(self, src, dst, src_port, dst_port, weight = 1):\n\t\tif src not in self.switches_adj:\n\t\t\tself.switches_adj[src] = []\n\t\tself.switches_adj[src].append(dst)\t\n\n\n\t\t#add link and it's attributes\n\t\tif src not in self.links:\n\t\t\tself.links[src] = {}\n\t\tself.links[src][dst] = {}\n\t\tself.links[src][dst]['src_port'] = src_port\n\t\tself.links[src][dst]['dst_port'] = dst_port\n\t\tself.links[src][dst]['weight'] = weight",
"def addWlan(self, station): \n phyInt.phy[station] = phyInt.totalPhy[self.currentPhy][3:]\n os.system(\"iw phy phy%s set netns %s\" % (phyInt.phy[station], station.pid)) \n wif = station.cmd(\"iwconfig 2>&1 | grep IEEE | awk '{print $1}'\").split(\"\\n\")\n wif.pop()\n for iface in wif:\n if iface[:4]==\"wlan\":\n try:\n self.nextWlan[str(station)] += 1\n except:\n self.nextWlan[str(station)] = 0\n netxWlan = self.nextWlan[str(station)] \n self.renameIface(station, netxWlan, iface)\n self.currentPhy+=1",
"def add_link_type_vlan(enode, portlbl, name, vlan_id, shell=None):\n assert name\n if name in enode.ports:\n raise ValueError('Port {name} already exists'.format(name=name))\n\n assert portlbl\n assert vlan_id\n port = enode.ports[portlbl]\n\n cmd = 'ip link add link {dev} name {name} type vlan id {vlan_id}'.format(\n dev=port, name=name, vlan_id=vlan_id)\n\n response = enode(cmd, shell=shell)\n assert not response, 'Cannot add virtual link {name}'.format(name=name)\n\n enode.ports[name] = name",
"def add_pvrdma(self, dvs_obj, network_obj, label=\"pvRDMA Network Adapter\"):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n nic_spec.device = vim.vm.device.VirtualVmxnet3Vrdma()\n nic_spec.device.deviceInfo = vim.Description(label=label)\n nic_spec.device.addressType = \"generated\"\n nic_spec.device.backing = (\n vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()\n )\n nic_spec.device.backing.port = vim.dvs.PortConnection(\n switchUuid=dvs_obj.summary.uuid, portgroupKey=network_obj.config.key\n )\n nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.connected = True\n nic_spec.device.connectable.allowGuestControl = True\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)",
"def addBridge(self, bridge):\n self.bridges.append(bridge)",
"def _add_netif(self, instance, netif_number=0,\n host_if=False,\n bridge=FLAGS.ovz_bridge_device):\n # TODO(imsplitbit): fix this to be nova-ish i.e. async\n try:\n # Command necessary to create a bridge networking setup.\n # right now this is the only supported networking model\n # in the openvz connector.\n if not host_if:\n host_if = 'veth%s.%s' % (instance['id'], netif_number)\n\n out, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--netif_add',\n 'eth%s,,%s,,%s' % (netif_number,\n host_if, bridge))\n\n LOG.debug(out)\n\n if err:\n LOG.error(err)\n\n except ProcessExecutionError:\n raise exception.Error(\n 'Error adding network device to container %s' %\n instance['id'])",
"def add_network_interface(self, iface: 'NetworkInterface',\n is_gateway: bool = False):\n self.ifaces.append(iface)\n if is_gateway:\n self._gateway = iface",
"def _add_ip(self, instance, netif='eth0',\n if_file='etc/network/interfaces'):\n ctxt = context.get_admin_context()\n ip = db.instance_get_fixed_address(ctxt, instance['id'])\n network = db.fixed_ip_get_network(ctxt, ip)\n net_path = '%s/%s' % (FLAGS.ovz_ve_private_dir, instance['id'])\n if_file_path = net_path + '/' + if_file\n \n try:\n os.chdir(net_path)\n with open(FLAGS.ovz_network_template) as fh:\n network_file = fh.read() % {'gateway_dev': netif,\n 'address': ip,\n 'netmask': network['netmask'],\n 'gateway': network['gateway']}\n\n # TODO(imsplitbit): Find a way to write to this file without\n # mangling the perms.\n utils.execute('sudo', 'chmod', '666', if_file_path)\n fh = open(if_file_path, 'a')\n fh.write(network_file)\n fh.close()\n utils.execute('sudo', 'chmod', '644', if_file_path)\n\n except Exception as err:\n LOG.error(err)\n raise exception.Error('Error adding IP')\n\n\n # This is how to add ips with venet. At some point we should make this\n # work.\n #try:\n # _, err = utils.execute('sudo vzctl set %s --save --ipadd %s' %\n # (instance['id'], ip))\n # if err:\n # LOG.error(err)\n #except ProcessExecutionError:\n # raise exception.Error('Error adding ip %s to %s' %\n # (ip, instance['id']))",
"def add_neighbor(self, neighbor):\r\n self.neighbors.append(neighbor)",
"def add_node(self, obj, typ_sofi, layer):\n\n n = Node(obj)\n n.layer = layer\n\n self.nodes.add(n)",
"def set_vlan_interface(self, interface, vlan, vdc=None):\n\n assert isinstance(vlan, str)\n assert isinstance(interface, str)\n assert isinstance(vdc, list)\n\n self.logger.debug(\"Adding vlan {} on interface {} on {}\".format(vlan, interface, self.host))\n interface = interface.title()\n vlan_created = None\n\n if len(vdc) != 1:\n raise ValueError(\"Interface {} cannot exist in multiple vdcs {}\".format(interface, self.host))\n vdc = vdc[0]\n if not self.vdcs[vdc].check_interface(interface):\n raise ValueError(\n \"Interface {} does not exist in vdc {} on {}\".format(interface, vdc, self.host))\n if not self.vdcs[vdc].check_vlan(vlan):\n self.set_vlan(vlan)\n vlan_created = [vlan]\n\n self.switchto_vdc(vdc)\n\n commands = ['config t ; interface {}'.format(interface)]\n configured = False\n\n if not self.vdcs[vdc].check_interface_vlan(interface, vlan):\n if self.vdcs[vdc].interfaces[interface].switchport == 'access':\n commands.append('switchport access vlan {}'.format(vlan))\n elif self.vdcs[vdc].interfaces[interface].switchport == 'trunk':\n commands.append('switchport trunk allowed vlan add {}'.format(vlan))\n else:\n raise ValueError(\n \"Interface {} in vdc {} on {} is not access or trunk\".format(interface, self.current_vdc,\n self.host))\n else:\n configured = True\n\n if not configured:\n try:\n self._send_xml_cli(commands)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n stacktrace = traceback.extract_tb(exc_traceback)\n self.logger.error(\"VLAN {} configuration for interface {} on {} failed\".format(vlan, interface, self.host))\n self.logger.debug(sys.exc_info())\n self.logger.debug(stacktrace)\n else:\n self.get_interfaces(vdc=vdc)\n\n return vlan_created",
"def set_new_configuration(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n ip = IPRoute()\r\n index = ip.link_lookup(ifname='eth0')[0]\r\n ip.link('set', index=index, state='up')\r\n ip.addr('add', index, address=configuration_data[0][0], mask=24)\r\n ip.close()",
"def add(ctx, option, value):\n properties = option.split(\".\")\n section = properties[0]\n option = properties[1]\n cfg = ctx.obj['cfg']\n if not cfg.has_section(section):\n cfg.add_section(section)\n cfg.set(section, option, value)\n with open(config_path(), 'w') as fp:\n cfg.write(fp)",
"def add(self, name: str):\n print(\"Adding new connection object '{}'...\\n\".format(name))\n\n drivername = input(\"Drivername: \")\n\n connector = self._load(drivername)\n try:\n config = connector.setup()\n except KeyboardInterrupt:\n return print(\"Setup cancelled.\")\n\n # Add the driver name to the config for this viewer to determine its class\n config['drivername'] = drivername\n self.config[name] = config\n print(\"Successfully added new connection\")",
"def add_virtual_network(self, hVirtNet, nFlags = 0):\n\t\treturn Job(SDK.PrlSrv_AddVirtualNetwork(self.handle, conv_handle_arg(hVirtNet), nFlags)[0])",
"def addNeighbor(self, neighbor):",
"def add(self, obj):\n raise NotImplementedError",
"def add_network(self, addr, netmask):\n\n if len(addr) == 4:\n return ipset.ipset_ipv4_add_network(self.set, addr, netmask)\n\n elif len(addr) == 16:\n return ipset.ipset_ipv6_add_network(self.set, addr, netmask)\n\n else:\n raise ValueError(\"Invalid address\")"
] | [
"0.6077769",
"0.59645206",
"0.5894081",
"0.57518435",
"0.57477623",
"0.5655887",
"0.56225246",
"0.55709255",
"0.5518417",
"0.54682285",
"0.5455874",
"0.5418201",
"0.539219",
"0.5344074",
"0.5312479",
"0.5307091",
"0.5276449",
"0.52353144",
"0.52163297",
"0.52099174",
"0.5207104",
"0.51863253",
"0.51651573",
"0.5145196",
"0.5128561",
"0.51241344",
"0.5113156",
"0.50973684",
"0.5079722",
"0.50622296"
] | 0.603703 | 1 |
Add an OvsBridge object to the net config object. | def add_bridge(self, bridge):
logger.info('adding bridge: %s' % bridge.name)
data = self._add_common(bridge)
logger.debug('bridge data: %s' % data)
self.bridge_data[bridge.name] = data
if bridge.routes:
self._add_routes(bridge.name, bridge.routes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addBridge(self, bridge):\n self.bridges.append(bridge)",
"def remote_createBridge(self, name):\r\n if name in self._bridges:\r\n raise InternalError('Bridge already exists.')\r\n\r\n self._bridges.add(name)\r\n return execute(('/usr/bin/ovs-vsctl', '--', '--may-exist', 'add-br',\r\n 'br-{0}'.format(name)), reactor=self._reactor)",
"def add_br(bridge, external_id=None):\n cmd = ['ovs-vsctl', 'add-br', bridge, '--', 'set', 'bridge', bridge,\n 'protocols=OpenFlow13']\n if external_id:\n cmd.extend(('--', 'br-set-external-id', bridge))\n cmd.extend(external_id)\n _run(*cmd)",
"def add_config_object(config_object: \"BaseConfig\") -> None:\n assert (\n len(G_CONFIG_OBJECT) == 0\n ), \"Looks like previous quatize object is alive. Did you call clear() on the object?\"\n G_CONFIG_OBJECT.append(config_object)",
"def add_vovnet_config(cfg):\n _C = cfg\n\n _C.MODEL.VOVNET = CN()\n\n _C.MODEL.VOVNET.CONV_BODY = \"V-39-eSE\"\n _C.MODEL.VOVNET.OUT_FEATURES = [\"stage2\", \"stage3\", \"stage4\", \"stage5\"]\n\n # Options: FrozenBN, GN, \"SyncBN\", \"BN\"\n _C.MODEL.VOVNET.NORM = \"FrozenBN\"\n\n _C.MODEL.VOVNET.OUT_CHANNELS = 256\n\n _C.MODEL.VOVNET.BACKBONE_OUT_CHANNELS = 256",
"def appendBropts(self, key, value):\n # type: (str, tp.Any)->None\n new_value = value\n if key in self._ifAttributes['bridge-opts']:\n new_value = self._ifAttributes['bridge-opts'][key] + value\n self.replaceBropt(key, new_value)",
"def _add_netif(self, instance, netif_number=0,\n host_if=False,\n bridge=FLAGS.ovz_bridge_device):\n # TODO(imsplitbit): fix this to be nova-ish i.e. async\n try:\n # Command necessary to create a bridge networking setup.\n # right now this is the only supported networking model\n # in the openvz connector.\n if not host_if:\n host_if = 'veth%s.%s' % (instance['id'], netif_number)\n\n out, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--netif_add',\n 'eth%s,,%s,,%s' % (netif_number,\n host_if, bridge))\n\n LOG.debug(out)\n\n if err:\n LOG.error(err)\n\n except ProcessExecutionError:\n raise exception.Error(\n 'Error adding network device to container %s' %\n instance['id'])",
"def create_bridge_for_int_in_namespace(\n node, namespace, bridge_name, *interfaces):\n cmd = f\"ip netns exec {namespace} brctl addbr {bridge_name}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n\n for interface in interfaces:\n cmd = f\"ip netns exec {namespace} brctl addif {bridge_name} \" \\\n f\"{interface}\"\n exec_cmd_no_error(node, cmd, sudo=True)\n\n cmd = f\"ip netns exec {namespace} ip link set dev {bridge_name} up\"\n exec_cmd_no_error(node, cmd, sudo=True)",
"def createBridge(self, name):\r\n return self._ref.callRemote('createBridge', name)",
"def add_port(bridge, port, external_id=None):\n _run('ip', 'link', 'set', port, 'up')\n _run('ovs-vsctl', 'add-port', bridge, port)\n if external_id:\n ports = SimpleOVSDB('ovs-vsctl', 'port')\n for port in ports.find('name={}'.format(port)):\n ports.set(port['_uuid'],\n 'external_ids:{}'.format(external_id[0]),\n external_id[1])",
"def register_oslo_configs(conf):\n conf.register_opts(_get_oslo_configs())",
"def setBropts(self, opts):\n # type: (tp.Dict[str, tp.Any])->None\n\n self._validator.validate_one(\n 'bridge-opts', VALID_OPTS['bridge-opts'], opts)\n self._ifAttributes['bridge-opts'] = opts",
"def add_bond(self, bond):\n logger.info('adding bond: %s' % bond.name)\n data = self._add_common(bond)\n logger.debug('bond data: %s' % data)\n self.interface_data[bond.name] = data\n if bond.routes:\n self._add_routes(bond.name, bond.routes)",
"def register_config(self, config):\n self.config = config",
"async def _setup_bridge(self, websession: ClientSession) -> None:\n self._bridge = aiohue.Bridge(\n self.config.hue.ip,\n websession,\n username=self.config.hue.username,\n )\n LOGGER.info(f\"Connecting to Hue Bridge at {self.config.hue.ip}\")\n await self._bridge.initialize()",
"def setup_unused_bridge_network(self):\n out = utils.run_script('conjure-up.lxc network show conjureup0')\n if out.returncode == 0:\n return # already configured\n\n out = utils.run_script('conjure-up.lxc network create conjureup0 '\n 'ipv4.address=auto '\n 'ipv4.nat=true '\n 'ipv6.address=none '\n 'ipv6.nat=false')\n\n if out.returncode != 0:\n raise Exception(\n \"Failed to create conjureup0 network bridge: \"\n \"{}\".format(out.stderr.decode()))",
"def add_to_subnet(self, subnet_properties: 'SubnetAffiliation'):\n self.subnets.append(subnet_properties)",
"def add(self, host, **kwargs):\n self.configs_[0][1].add(host, **kwargs)",
"def add(self, name: str):\n print(\"Adding new connection object '{}'...\\n\".format(name))\n\n drivername = input(\"Drivername: \")\n\n connector = self._load(drivername)\n try:\n config = connector.setup()\n except KeyboardInterrupt:\n return print(\"Setup cancelled.\")\n\n # Add the driver name to the config for this viewer to determine its class\n config['drivername'] = drivername\n self.config[name] = config\n print(\"Successfully added new connection\")",
"def _add_connection(self, con):\n # get connectors by the above specified labels\n start = self.connector_by_label(con[0])\n end = self.connector_by_label(con[1])\n if start.parent_type == 'box' and end.parent_type == 'box':\n # make sure, that not two inputs or two outputs are connected\n if start.connector_type == end.connector_type:\n raise ConnectorError(f\"Connection {con} connects \"\n f\"input to input or output to output.\")\n # make sure, that inputs are always first\n # and outputs are always second\n elif (start.connector_type == 'output'\n or end.connector_type == 'input'):\n start, end = end, start\n # make sure, that a switch does not connect to itself\n elif start.parent_type == 'switch' and end.parent_type == 'switch':\n if start.switch == end.switch:\n raise ConnectorError(f\"Connection {con} connects \"\n f\"a switch to itself.\")\n\n # create connection\n connection = ArduinoSwitchControlConnection(start, end)\n\n # add connection to attributes\n self.connections.append(connection)",
"def create_and_bridge_virtual_interfaces(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current bridge and interface configuration\n print(\"\\nThis the current bridge configuration:\")\n VPPUtil.show_bridge(node)\n question = \"\\nWould you like to keep this configuration [Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n continue\n\n # Create a script that builds a bridge configuration with\n # physical interfaces and virtual interfaces\n ints_with_vints = self._create_vints_questions(node)\n content = \"\"\n for intf in ints_with_vints:\n vhoststr = \"\\n\".join(\n [\n \"comment { The following command creates the socket }\",\n \"comment { and returns a virtual interface }\",\n \"comment {{ create vhost-user socket \"\n \"/var/run/vpp/sock{}.sock server }}\\n\".format(intf[\"bridge\"]),\n ]\n )\n\n setintdnstr = \"set interface state {} down\\n\".format(intf[\"name\"])\n\n setintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"name\"], intf[\"bridge\"]\n )\n setvintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"virtualinterface\"], intf[\"bridge\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 up\n setintvststr = \"set interface state {} up\\n\".format(\n intf[\"virtualinterface\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 down\n setintupstr = \"set interface state {} up\\n\".format(intf[\"name\"])\n\n content += (\n vhoststr\n + setintdnstr\n + setintbrstr\n + setvintbrstr\n + setintvststr\n + setintupstr\n )\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/create_vms_and_connect_to_vpp\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))",
"def add_connection(self, switch_name, port1, port2, bidir=False):\n raise NotImplementedError()",
"def add_n_uo_sensors_to_config(uo_sensors: gpd.GeoDataFrame, config: dict):\n n_uo_oa = uo_sensors[\"oa11cd\"].nunique()\n if n_uo_oa not in config[\"optimisation\"][\"n_sensors\"][\"generate\"]:\n print(\n f\"Urban Observatory sensors found in {n_uo_oa} OA. \"\n \"Adding this to network sizes to generate.\"\n )\n n_sensors = config[\"optimisation\"][\"n_sensors\"][\"generate\"]\n n_sensors.append(n_uo_oa)\n config[\"optimisation\"][\"n_sensors\"][\"generate\"] = n_sensors\n with open(\"config.yml\", \"w\") as f:\n yaml.dump(config, f)",
"def add_conn(self, a1, a2):\n if self.use_pconn:\n raise ValueError(\"Can not add bonds to systems with pconn - well, we can fix this ;) \")\n self.conn[a1].append(a2)\n self.conn[a2].append(a1)\n d,v,imgi = self.get_distvec(a1,a2)\n self.pconn[a1].append(images[imgi])\n d,v,imgi = self.get_distvec(a2,a1)\n self.pconn[a2].append(images[imgi])\n logger.warning('pconn may not be properly updated!!!')\n return",
"def set_ovs_config_with_controller(ports, switch, controller):\n switch.ui.create_ovs_bridge(bridge_name=\"spp0\")\n controller_ip = \"tcp:%s:%s\" % (controller.ipaddr, controller.cport, )\n switch.ui.create_ovs_bridge_controller(\"spp0\", controller_ip)\n for port in ports:\n if port[0] != \"tg1\":\n for link_id in ports[port]:\n try:\n switch.ui.get_table_ovs_ports()[ports[port][link_id]]\n except Exception:\n switch.ui.create_ovs_port(int(ports[port][link_id]), bridge_name=\"spp0\")\n # Wait until configuration is applied and connection to Controller is established\n time.sleep(6)",
"async def async_step_use_bridge_settings(self, user_input=None):\n if \"host\" not in self.bridge_config:\n return self.async_abort(\n reason=\"abort_by_error\",\n description_placeholders={\n \"error_info\": f\"Error code: {self.bridge_config_answer_status}. Response: {self.bridge_config}\"\n },\n )\n\n # save mqtt connection info\n ais_global.save_ais_mqtt_connection_settings(\n ais_global.G_AIS_SUPLA_MQTT_CONFIG_FILE_NAME, self.bridge_config\n )\n # restart mqtt broker\n await self.hass.services.async_call(\n \"ais_shell_command\", \"restart_pm2_service\", {\"service\": \"mqtt\"}\n )\n # finish the config flow\n return self.async_create_entry(\n title=\"SUPLA MQTT BRIDGE\", data=self.bridge_config\n )",
"def create_and_bridge_iperf_virtual_interface(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current bridge and interface configuration\n print(\"\\nThis the current bridge configuration:\")\n ifaces = VPPUtil.show_bridge(node)\n question = \"\\nWould you like to keep this configuration [Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n self._sockfilename = \"/var/run/vpp/{}.sock\".format(\n ifaces[0][\"name\"].replace(\"/\", \"_\")\n )\n if os.path.exists(self._sockfilename):\n continue\n\n # Create a script that builds a bridge configuration with\n # physical interfaces and virtual interfaces\n ints_with_vints = self._iperf_vm_questions(node)\n content = \"\"\n for intf in ints_with_vints:\n vhoststr = \"\\n\".join(\n [\n \"comment { The following command creates the socket }\",\n \"comment { and returns a virtual interface }\",\n \"comment {{ create vhost-user socket \"\n \"/var/run/vpp/sock{}.sock server }}\\n\".format(intf[\"bridge\"]),\n ]\n )\n\n setintdnstr = \"set interface state {} down\\n\".format(intf[\"name\"])\n\n setintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"name\"], intf[\"bridge\"]\n )\n setvintbrstr = \"set interface l2 bridge {} {}\\n\".format(\n intf[\"virtualinterface\"], intf[\"bridge\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 up\n setintvststr = \"set interface state {} up\\n\".format(\n intf[\"virtualinterface\"]\n )\n\n # set interface state VirtualEthernet/0/0/0 down\n setintupstr = \"set interface state {} up\\n\".format(intf[\"name\"])\n\n content += (\n vhoststr\n + setintdnstr\n + setintbrstr\n + setvintbrstr\n + setintvststr\n + setintupstr\n )\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/create_iperf_vm\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))",
"def setup_bridge_network(self, iface):\n out = utils.run_script('conjure-up.lxc network show conjureup1')\n if out.returncode == 0:\n return # already configured\n\n out = utils.run_script('conjure-up.lxc network create conjureup1 '\n 'ipv4.address=auto '\n 'ipv4.nat=true '\n 'ipv6.address=none '\n 'ipv6.nat=false')\n if out.returncode != 0:\n raise Exception(\"Failed to create LXD conjureup1 network bridge: \"\n \"{}\".format(out.stderr.decode()))",
"def add_network_adapter(self, network_obj):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n nic_spec.device = vim.vm.device.VirtualVmxnet3()\n nic_spec.device.wakeOnLanEnabled = True\n nic_spec.device.addressType = \"assigned\"\n nic_spec.device.deviceInfo = vim.Description()\n nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()\n nic_spec.device.backing.network = network_obj\n nic_spec.device.backing.deviceName = network_obj.name\n nic_spec.device.backing.useAutoDetect = False\n nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.connected = True\n nic_spec.device.connectable.allowGuestControl = True\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)",
"def push_config(self, config):\n raise NotImplementedError # pragma: no cover"
] | [
"0.7036302",
"0.636155",
"0.62205625",
"0.58536595",
"0.5717984",
"0.5396469",
"0.5350926",
"0.533486",
"0.5209206",
"0.5184907",
"0.5178858",
"0.51081914",
"0.5087172",
"0.5024791",
"0.5011627",
"0.49891123",
"0.49801546",
"0.49567738",
"0.492117",
"0.49055576",
"0.4870483",
"0.4869171",
"0.48564765",
"0.4830473",
"0.4816",
"0.47980925",
"0.47962442",
"0.479194",
"0.47916496",
"0.47871375"
] | 0.70225567 | 1 |
Add an OvsBond object to the net config object. | def add_bond(self, bond):
logger.info('adding bond: %s' % bond.name)
data = self._add_common(bond)
logger.debug('bond data: %s' % data)
self.interface_data[bond.name] = data
if bond.routes:
self._add_routes(bond.name, bond.routes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_bond(self,bond):\n if bond.label==None:\n bond.label = bond_label_dir[str(self.Nbonds)]\n if bond.color==None:\n bond.color = 'black'\n self.bonds.append(bond)\n self.Nbonds +=1",
"def addBond(self, **kwds):\n print kwds\n self.MagCell.addBond(**kwds)",
"def add_bond(molecule, atom1_index, atom2_index, bond_order=1):\n molecule.AppendBond(atom1_index, atom2_index, bond_order)",
"def add_bond(self, bond):\n self._graph.connect(bond.atom1, bond.atom2, bond)",
"def set_bond_network_details(self):\n self.clear_screen()\n default = 'bond0'\n name = input('enter bond name\\n'\n 'default [bond0]: ')\n name = set_values(name, default)\n interfaces = input('enter bond interfaces seperated by \\',\\'\\n'\n 'default [ens2f0,ens2f1]: ')\n default = 'ens2f0,ens2f1'\n interfaces = set_values(interfaces, default)\n default = 'mode=active-backup,miimon=100,primary=ens2f0'\n options = input('enter bond options \\n'\n 'default [mode=active-backup,miimon=100,primary=ens2f0]: ')\n options = set_values(options, default)\n logging.info('adding bond_name: {} interfaces: {} bond_options: {}'.format(name, interfaces, options))\n self.inventory_dict['csah']['vars']['bond_name'] = name\n self.inventory_dict['csah']['vars']['bond_interfaces'] = interfaces\n self.inventory_dict['csah']['vars']['bond_options'] = options",
"def add_vovnet_config(cfg):\n _C = cfg\n\n _C.MODEL.VOVNET = CN()\n\n _C.MODEL.VOVNET.CONV_BODY = \"V-39-eSE\"\n _C.MODEL.VOVNET.OUT_FEATURES = [\"stage2\", \"stage3\", \"stage4\", \"stage5\"]\n\n # Options: FrozenBN, GN, \"SyncBN\", \"BN\"\n _C.MODEL.VOVNET.NORM = \"FrozenBN\"\n\n _C.MODEL.VOVNET.OUT_CHANNELS = 256\n\n _C.MODEL.VOVNET.BACKBONE_OUT_CHANNELS = 256",
"def on_bond(self):\n pass",
"def add_config_object(config_object: \"BaseConfig\") -> None:\n assert (\n len(G_CONFIG_OBJECT) == 0\n ), \"Looks like previous quatize object is alive. Did you call clear() on the object?\"\n G_CONFIG_OBJECT.append(config_object)",
"def create_bond(self,\n atom = None,\n bond_type = None,\n atom1_symop = None,\n atom2_symop = None,\n standard_res_bond = False):\n assert isinstance(atom, Atom)\n assert ((self.alt_loc == atom.alt_loc) or\n (self.alt_loc == \"\" and atom.alt_loc != \"\") or\n (self.alt_loc != \"\" and atom.alt_loc == \"\"))\n\n bond = Bond(atom1 = self,\n atom2 = atom,\n bond_type = bond_type,\n atom1_symop = atom1_symop,\n atom2_symop = atom2_symop,\n standard_res_bond = standard_res_bond)\n\n self.bond_list.append(bond)\n atom.bond_list.append(bond)",
"def add(self, config):\n self.__idx(config)",
"def addBridge(self, bridge):\n self.bridges.append(bridge)",
"def add(self, bento_name, bento_version):",
"def create_bond(self, address, type):\n logging.info(\"Cert: Creating bond to '%s' from '%s'\" % (str(address), str(self._device.address)))\n # TODO(optedoblivion): Trigger connection to Send AuthenticationRequested",
"def set_bond(self, locant1, locant2, bond_symbol):\n atom1 = self.get_atom_by_locant(locant1)\n atom2 = self.get_atom_by_locant(locant2)\n bond = Bond.create(bond_symbol, atom1, atom2)\n self._graph.connect(atom1, atom2, bond)",
"def add_bridge(self, bridge):\n logger.info('adding bridge: %s' % bridge.name)\n data = self._add_common(bridge)\n logger.debug('bridge data: %s' % data)\n self.bridge_data[bridge.name] = data\n if bridge.routes:\n self._add_routes(bridge.name, bridge.routes)",
"def add_br(bridge, external_id=None):\n cmd = ['ovs-vsctl', 'add-br', bridge, '--', 'set', 'bridge', bridge,\n 'protocols=OpenFlow13']\n if external_id:\n cmd.extend(('--', 'br-set-external-id', bridge))\n cmd.extend(external_id)\n _run(*cmd)",
"def add_neighbor(self, neigh_id: str) -> None:\n if neigh_id in self.nodes_rib_out.keys():\n raise KeyError(\"{} neighbor is already present\".format(neigh_id))\n self.nodes_rib_out[neigh_id] = ADJ_RIB_out(self.node_id)",
"def add_pvrdma(self, dvs_obj, network_obj, label=\"pvRDMA Network Adapter\"):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n nic_spec.device = vim.vm.device.VirtualVmxnet3Vrdma()\n nic_spec.device.deviceInfo = vim.Description(label=label)\n nic_spec.device.addressType = \"generated\"\n nic_spec.device.backing = (\n vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()\n )\n nic_spec.device.backing.port = vim.dvs.PortConnection(\n switchUuid=dvs_obj.summary.uuid, portgroupKey=network_obj.config.key\n )\n nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.connected = True\n nic_spec.device.connectable.allowGuestControl = True\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)",
"def _add_bond_units(self, bond):\n # TODO: Shouldn't we be making a deep copy?\n\n # If already promoted to unit-bearing quantities, return the object\n if type(bond.type.k)==unit.Quantity:\n return bond\n # Add parmed units\n # TODO: Get rid of this, and just operate on the OpenMM System instead\n bond.type.req = unit.Quantity(bond.type.req, unit=unit.angstrom)\n bond.type.k = unit.Quantity(2.0*bond.type.k, unit=unit.kilocalorie_per_mole/unit.angstrom**2)\n return bond",
"def add_config(self, config):\n clean=lambda n: n.strip().strip('\"').lower()\n for line in config.split('\\n'):\n items=line.strip().split()\n if items and len(items) >= 3:\n cmd, evt, hnd=items[:3]\n \"\"\" NOTE\n - just 'bind' command expected right now\n - '+' prepended ti the handler means REPEAT (make sense just for keyboard keys actually)\n \"\"\"\n cmd=clean(cmd)\n if cmd in ['bind']:\n evt,hnd=(clean(evt), clean(hnd))\n if not cmd in self.config: self.config[cmd]={}\n repeat=hnd.startswith('+')\n if repeat: hnd=hnd[1:]\n self.config[cmd].update([[evt, [hnd, repeat]]])",
"def setup_lacp_bonds(self):\n if not self.host_options:\n return\n bond_index = 1\n for host_id, options in self.host_options.items():\n if 'lacp' in options:\n host = self.host_information[host_id]['host']\n # LACP must be configured with host ports down\n for dpid, ports in self.host_information[host_id]['ports'].items():\n for port in ports:\n self.set_port_down(port, dpid)\n orig_ip = host.IP()\n lacp_switches = [self.net.switches[i] for i in self.host_links[host_id]]\n bond_members = [\n pair[0].name for switch in lacp_switches for pair in host.connectionsTo(switch)]\n bond_name = 'bond%u' % (bond_index)\n self.host_information[host_id]['bond'] = bond_name\n for bond_member in bond_members:\n # Deconfigure bond members\n self.quiet_commands(host, (\n 'ip link set %s down' % bond_member,\n 'ip address flush dev %s' % bond_member))\n # Configure bond interface\n self.quiet_commands(host, (\n ('ip link add %s address 0e:00:00:00:00:99 '\n 'type bond mode 802.3ad lacp_rate fast miimon 100 '\n 'xmit_hash_policy layer2+3') % (bond_name),\n 'ip add add %s/%s dev %s' % (orig_ip, self.NETPREFIX, bond_name),\n 'ip link set %s up' % bond_name))\n # Add bond members\n for bond_member in bond_members:\n self.quiet_commands(host, (\n 'ip link set dev %s master %s' % (bond_member, bond_name),))\n bond_index += 1\n # Return the ports to UP\n for dpid, ports in self.host_information[host_id]['ports'].items():\n for port in ports:\n self.set_port_up(port, dpid)",
"def add_conn(self, a1, a2):\n if self.use_pconn:\n raise ValueError(\"Can not add bonds to systems with pconn - well, we can fix this ;) \")\n self.conn[a1].append(a2)\n self.conn[a2].append(a1)\n d,v,imgi = self.get_distvec(a1,a2)\n self.pconn[a1].append(images[imgi])\n d,v,imgi = self.get_distvec(a2,a1)\n self.pconn[a2].append(images[imgi])\n logger.warning('pconn may not be properly updated!!!')\n return",
"def bondMetaAtoms(self):\r\n self.bonded = True",
"def add_obstacle(self, *points: Tuple[float, float]):\n self.obstacles.append(o.Obstacle(*points))",
"def __init__(self, bond: Bond, device: BondDevice):\n self._bond = bond\n self._device = device",
"def add_config(self, config):\n\n if config.identifier in self.configs:\n raise DuplicateConfigException(\n \"Builder already has config with identifier : {}\".format(\n config.identifier\n )\n )\n\n self.configs[config.identifier] = config",
"def add_neighbor(self, neighbor):\r\n self.neighbors.append(neighbor)",
"def appendBropts(self, key, value):\n # type: (str, tp.Any)->None\n new_value = value\n if key in self._ifAttributes['bridge-opts']:\n new_value = self._ifAttributes['bridge-opts'][key] + value\n self.replaceBropt(key, new_value)",
"def add_sriov_adapter(self, network_obj, pf_obj, dvs_obj):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n nic_spec.device = vim.vm.device.VirtualSriovEthernetCard()\n nic_spec.device.wakeOnLanEnabled = True\n nic_spec.device.addressType = \"assigned\"\n nic_spec.device.deviceInfo = vim.Description()\n if dvs_obj:\n nic_spec.device.backing = (\n vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()\n )\n nic_spec.device.backing.port = vim.dvs.PortConnection(\n switchUuid=dvs_obj.summary.uuid, portgroupKey=network_obj.config.key\n )\n else:\n nic_spec.device.backing = (\n vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()\n )\n nic_spec.device.backing.network = network_obj\n nic_spec.device.backing.deviceName = network_obj.name\n nic_spec.device.backing.useAutoDetect = False\n nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()\n nic_spec.device.connectable.startConnected = True\n nic_spec.device.connectable.connected = True\n nic_spec.device.connectable.allowGuestControl = True\n nic_spec.device.sriovBacking = (\n vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()\n )\n nic_spec.device.allowGuestOSMtuChange = False\n # convert decimal to hex for the device ID of physical adapter\n device_id = hex(pf_obj.deviceId % 2 ** 16).lstrip(\"0x\")\n sys_id = GetVM(self.vm_obj).pci_id_sys_id_sriov()\n backing = vim.VirtualPCIPassthroughDeviceBackingInfo(\n deviceId=device_id,\n id=pf_obj.id,\n systemId=sys_id[pf_obj.id],\n vendorId=pf_obj.vendorId,\n deviceName=pf_obj.deviceName,\n )\n nic_spec.device.sriovBacking.physicalFunctionBacking = backing\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)",
"def append(self, bts_node: BTSNode):\n pass"
] | [
"0.7042654",
"0.6725331",
"0.66269624",
"0.6428748",
"0.58589774",
"0.5577654",
"0.54904604",
"0.5479174",
"0.5383009",
"0.5240194",
"0.5154615",
"0.5078846",
"0.5077104",
"0.5046048",
"0.5018264",
"0.49932727",
"0.49637228",
"0.49340352",
"0.48739976",
"0.48453963",
"0.48439473",
"0.48153692",
"0.4811977",
"0.4796292",
"0.4771347",
"0.47581273",
"0.4743295",
"0.4720254",
"0.4717695",
"0.4709562"
] | 0.6750536 | 1 |
Apply the network configuration. | def apply(self, cleanup=False, activate=True):
logger.info('applying network configs...')
restart_interfaces = []
restart_bridges = []
update_files = {}
all_file_names = []
for interface_name, iface_data in self.interface_data.iteritems():
route_data = self.route_data.get(interface_name, '')
interface_path = ifcfg_config_path(interface_name)
route_path = route_config_path(interface_name)
all_file_names.append(interface_path)
all_file_names.append(route_path)
if (utils.diff(interface_path, iface_data) or
utils.diff(route_path, route_data)):
restart_interfaces.append(interface_name)
restart_interfaces.extend(self.child_members(interface_name))
update_files[interface_path] = iface_data
update_files[route_path] = route_data
logger.info('No changes required for interface: %s' %
interface_name)
for bridge_name, bridge_data in self.bridge_data.iteritems():
route_data = self.route_data.get(bridge_name, '')
bridge_path = bridge_config_path(bridge_name)
bridge_route_path = route_config_path(bridge_name)
all_file_names.append(bridge_path)
all_file_names.append(bridge_route_path)
if (utils.diff(bridge_path, bridge_data) or
utils.diff(bridge_route_path, route_data)):
restart_bridges.append(bridge_name)
restart_interfaces.extend(self.child_members(bridge_name))
update_files[bridge_path] = bridge_data
update_files[bridge_route_path] = route_data
logger.info('No changes required for bridge: %s' % bridge_name)
if cleanup:
for ifcfg_file in glob.iglob(cleanup_pattern()):
if ifcfg_file not in all_file_names:
interface_name = ifcfg_file[len(cleanup_pattern()) - 1:]
if interface_name != 'lo':
logger.info('cleaning up interface: %s'
% interface_name)
self.ifdown(interface_name)
self.remove_config(ifcfg_file)
if activate:
for interface in restart_interfaces:
self.ifdown(interface)
for bridge in restart_bridges:
self.ifdown(bridge, iftype='bridge')
for oldname, newname in self.renamed_interfaces.iteritems():
self.ifrename(oldname, newname)
for location, data in update_files.iteritems():
self.write_config(location, data)
if activate:
for bridge in restart_bridges:
self.ifup(bridge, iftype='bridge')
for interface in restart_interfaces:
self.ifup(interface)
return update_files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_network_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if self.ext_net:\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'public_network_id', self.ext_net.id)\n rconfig.set('network', 'floating_network_name', self.ext_net.name)\n rconfig.set('network-feature-enabled', 'floating_ips', True)\n else:\n if not rconfig.has_section('network-feature-enabled'):\n rconfig.add_section('network-feature-enabled')\n rconfig.set('network-feature-enabled', 'floating_ips', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def network_config(self):\n\n if self._network_config:\n return self._network_config\n\n interfaces = self.metadata.get('interfaces')\n\n if not interfaces:\n raise Exception(\"Unable to get meta-data from server....\")\n\n # Convert Vultr network configuration to cloudinit.net format\n\n # Example JSON:\n # [\n # {\n # \"ipv4\": {\n # \"additional\": [\n # {\n # \"address\": \"192.0.2.3\",\n # \"netmask\": \"255.255.255.0\"\n # }\n # ],\n # \"address\": \"192.0.2.2\",\n # \"gateway\": \"192.0.2.1\",\n # \"netmask\": \"255.255.255.0\"\n # },\n # \"ipv6\": {\n # \"additional\": [\n # {\n # \"network\": \"2001:0db8:0:2::\",\n # \"prefix\": \"64\"\n # }\n # ],\n # \"address\": \"2001:0db8:0:1:5428:d5ff:fe28:1910\",\n # \"network\": \"2001:0db8:0:1::\",\n # \"prefix\": \"64\"\n # },\n # \"mac\": \"00:00:00:00:00:00\",\n # \"network-type\": \"public\"\n # },\n # ......\n # ]\n\n nic_configs = []\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n LOG.debug(\"nic mapping: %s\", macs_to_nics)\n\n config = []\n for vultr_ip_dict in interfaces:\n mac = vultr_ip_dict[\"mac\"]\n\n if mac not in macs_to_nics:\n raise ValueError(\"Did not find network interface on system \"\n \"with mac '%s'. Cannot apply configuration: %s\"\n % (mac_address, nic))\n if_name = macs_to_nics[mac] # if_name = string 'eth0', ...\n if_config= {\n 'type': 'physical',\n 'mac_address': mac,\n 'name': if_name,\n 'subnets': [{\n 'type': 'dhcp',\n 'control': 'auto',\n }\n ]\n }\n config.append(if_config)\n\n LOG.debug(\"nic '%s' configuration: %s\", if_name, if_config)\n\n LOG.debug(\"added dns servers: %s\", self.dns_servers)\n config.append({'type': 'nameserver', 'address': self.dns_servers})\n\n return {'version': 1, 'config': config}",
"def apply(self) -> None:\n _ba.apply_config()",
"def apply_network_settings(**settings):\n if 'require_reboot' not in settings:\n settings['require_reboot'] = False\n if 'apply_hostname' not in settings:\n settings['apply_hostname'] = False\n\n hostname_res = True\n if settings['apply_hostname'] in _CONFIG_TRUE:\n if 'hostname' in settings:\n hostname_res = __salt__['network.mod_hostname'](settings['hostname'])\n else:\n log.warning(\n 'The network state sls is trying to apply hostname '\n 'changes but no hostname is defined.'\n )\n hostname_res = False\n\n res = True\n if settings['require_reboot'] in _CONFIG_TRUE:\n log.warning(\n 'The network state sls is requiring a reboot of the system to '\n 'properly apply network configuration.'\n )\n res = True\n else:\n res = __salt__['cmd.run']('/etc/netstart restart')\n\n return hostname_res and res",
"def setup_networks(self, configs):\n self.__networks = self.setup_components(configs, 'scale_client.networks')",
"def setup_net(self):\n pass",
"def _update_network_config(port_config, allow_multiple=False):\n # Get network id from port config\n network_id = port_config.get('network_id')\n\n # Get the network id from relationship if any\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n # Check if network config comes from two sources or not\n if network_id and rel_network_id and not allow_multiple:\n raise NonRecoverableError('Port can\\'t both have the '\n '\"network_id\" property and be '\n 'connected to a network via a '\n 'relationship at the same time')\n\n port_config['network_id'] = network_id or rel_network_id",
"def compile(self):\n logger.info('Define network with dnnet of version : %s'\\\n % dnnet.__version__)\n if self.layers.size == 0:\n msg = 'NeuralNetwork has no layer.\\n Add layers before compiling.'\n raise DNNetRuntimeError(msg)\n\n parent = self.layers[0]\n self.add(OutputLayer())\n\n for i, layer in enumerate(self.layers, 1):\n logger.debug('Add %s layer.' % layer.get_type())\n layer.set_parent(parent)\n parent = layer\n\n logger.debug('Defined network.')",
"def create_config(self):\n\n #FIXME: Try to do it over loops ie. self[attr].set_config()\n for attr, value in self.__dict__.items():\n if attr == \"connection\":\n self.connection.set_config(self.cfg)\n if attr == \"ipv4\":\n self.ipv4.set_config(self.cfg)\n if attr == \"ipv6\":\n self.ipv6.set_config(self.cfg)\n if attr == \"_802_3_ethernet\" and not value == \"none\":\n self._802_3_ethernet.set_config(self.cfg)\n if attr == \"_802_11_wireless\" and not value == \"none\":\n self._802_11_wireless.set_config(self.cfg)\n if attr == \"_802_11_wireless_security\" and not value == \"none\":\n self._802_11_wireless_security.set_config(self.cfg)",
"def _update_target_net(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.target_net.eval()",
"def _build_network(self):\n pass",
"def configure_net(self):\n try:\n transport_type = Conf.get(self._index,\n f'cluster>{self._server_id}')['network']['data']['transport_type']\n except:\n raise MotrError(errno.EINVAL, \"transport_type not found\")\n check_type(transport_type, str, \"transport_type\")\n\n if transport_type == \"lnet\":\n configure_lnet(self)\n elif transport_type == \"libfabric\":\n configure_libfabric(self)\n else:\n raise MotrError(errno.EINVAL, \"Unknown data transport type\\n\")",
"def multiple_apply_config(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_compute = computes[0]\n config = utils.get_config_template('nova_disk')\n structured_config_old = get_structured_config_dict(config)\n\n config['nova_config'][\n 'DEFAULT/default_ephemeral_format']['value'] = 'ext3'\n structured_config_new = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_id=target_compute['id'])\n\n self.show_step(3)\n service_name = 'nova-compute'\n uptimes = self.get_service_uptime([target_compute], service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_compute['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted([target_compute],\n uptimes, service_name)\n\n self.show_step(6)\n for compute in computes:\n if compute == target_compute:\n self.check_config_on_remote([compute], structured_config_new)\n target_hypervisor_name = compute['fqdn']\n else:\n hypervisor_name = compute['fqdn']\n self.check_config_on_remote([compute], structured_config_old)\n\n self.show_step(7)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=target_hypervisor_name,\n fs_type='ext3')\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.env.make_snapshot(\"multiple_apply_config\")",
"def apply_net_config(self, service_config):\n\n LOGGER.debug(\"apply_net_config start\")\n start_time = time()\n\n # Validate the service configuration.\n self._config_validator.validate(service_config)\n\n # Determine the default route domain for the partition\n default_route_domain = self._bigip.get_default_route_domain()\n\n # Read in the configuration\n desired_config = self._config_reader.read_net_config(\n service_config, default_route_domain)\n # Deploy the service desired configuration.\n retval = self._service_deployer.deploy_net(desired_config)\n\n LOGGER.debug(\n \"apply_net_config took %.5f seconds.\", (time() - start_time))\n\n return retval",
"def config(self):\n self._resource_manager = self._api._ixnetwork.ResourceManager\n self._ixn_vport = self._api._vport\n self._delete_vports()\n self._create_vports()\n self._create_capture()\n self._set_location()\n self._set_layer1()",
"def update_target_network(self):\n self.target.set_weights(self.policy.get_weights()) # Update weights of target network with weights of policy network",
"def initialise_network(self):\n raise NotImplementedError",
"def apply_configs(task):\n\n if \"3750X\" in task.host[\"sw_model\"]:\n # run 3750X function\n aaa_3750x(task)\n\n # apply global config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_dot1x_global.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x global configuration applied ***\")\n # apply snmp config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_snmp.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: SNMP configuration applied ***\")\n # apply interface config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_dot1x_intf.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x interface configuration applied ***\")",
"def _network_conf(conf_tuples=None, **kwargs):\n nic = kwargs.get(\"network_profile\", None)\n ret = []\n nic_opts = kwargs.get(\"nic_opts\", {})\n if nic_opts is None:\n # coming from elsewhere\n nic_opts = {}\n if not conf_tuples:\n conf_tuples = []\n old = _get_veths(conf_tuples)\n if not old:\n old = {}\n\n # if we have a profile name, get the profile and load the network settings\n # this will obviously by default look for a profile called \"eth0\"\n # or by what is defined in nic_opts\n # and complete each nic settings by sane defaults\n if nic and isinstance(nic, ((str,), dict)):\n nicp = get_network_profile(nic)\n else:\n nicp = {}\n if DEFAULT_NIC not in nicp:\n nicp[DEFAULT_NIC] = {}\n\n kwargs = copy.deepcopy(kwargs)\n gateway = kwargs.pop(\"gateway\", None)\n bridge = kwargs.get(\"bridge\", None)\n if nic_opts:\n for dev, args in nic_opts.items():\n ethx = nicp.setdefault(dev, {})\n try:\n ethx = salt.utils.dictupdate.update(ethx, args)\n except AttributeError:\n raise SaltInvocationError(\"Invalid nic_opts configuration\")\n ifs = [a for a in nicp]\n ifs += [a for a in old if a not in nicp]\n ifs.sort()\n gateway_set = False\n for dev in ifs:\n args = nicp.get(dev, {})\n opts = nic_opts.get(dev, {}) if nic_opts else {}\n old_if = old.get(dev, {})\n disable = opts.get(\"disable\", args.get(\"disable\", False))\n if disable:\n continue\n mac = opts.get(\n \"mac\", opts.get(\"hwaddr\", args.get(\"mac\", args.get(\"hwaddr\", \"\")))\n )\n type_ = opts.get(\"type\", args.get(\"type\", \"\"))\n flags = opts.get(\"flags\", args.get(\"flags\", \"\"))\n link = opts.get(\"link\", args.get(\"link\", \"\"))\n ipv4 = opts.get(\"ipv4\", args.get(\"ipv4\", \"\"))\n ipv6 = opts.get(\"ipv6\", args.get(\"ipv6\", \"\"))\n infos = salt.utils.odict.OrderedDict(\n [\n (\n \"lxc.network.type\",\n {\n \"test\": not type_,\n \"value\": type_,\n \"old\": old_if.get(\"lxc.network.type\"),\n \"default\": \"veth\",\n },\n ),\n (\n \"lxc.network.name\",\n {\"test\": False, \"value\": dev, \"old\": dev, \"default\": dev},\n ),\n (\n \"lxc.network.flags\",\n {\n \"test\": not flags,\n \"value\": flags,\n \"old\": old_if.get(\"lxc.network.flags\"),\n \"default\": \"up\",\n },\n ),\n (\n \"lxc.network.link\",\n {\n \"test\": not link,\n \"value\": link,\n \"old\": old_if.get(\"lxc.network.link\"),\n \"default\": search_lxc_bridge(),\n },\n ),\n (\n \"lxc.network.hwaddr\",\n {\n \"test\": not mac,\n \"value\": mac,\n \"old\": old_if.get(\"lxc.network.hwaddr\"),\n \"default\": salt.utils.network.gen_mac(),\n },\n ),\n (\n \"lxc.network.ipv4\",\n {\n \"test\": not ipv4,\n \"value\": ipv4,\n \"old\": old_if.get(\"lxc.network.ipv4\", \"\"),\n \"default\": None,\n },\n ),\n (\n \"lxc.network.ipv6\",\n {\n \"test\": not ipv6,\n \"value\": ipv6,\n \"old\": old_if.get(\"lxc.network.ipv6\", \"\"),\n \"default\": None,\n },\n ),\n ]\n )\n # for each parameter, if not explicitly set, the\n # config value present in the LXC configuration should\n # take precedence over the profile configuration\n for info in list(infos.keys()):\n bundle = infos[info]\n if bundle[\"test\"]:\n if bundle[\"old\"]:\n bundle[\"value\"] = bundle[\"old\"]\n elif bundle[\"default\"]:\n bundle[\"value\"] = bundle[\"default\"]\n for info, data in infos.items():\n if data[\"value\"]:\n ret.append({info: data[\"value\"]})\n for key, val in args.items():\n if key == \"link\" and bridge:\n val = bridge\n val = opts.get(key, val)\n if key in [\n \"type\",\n \"flags\",\n \"name\",\n \"gateway\",\n \"mac\",\n \"link\",\n \"ipv4\",\n \"ipv6\",\n ]:\n continue\n ret.append({f\"lxc.network.{key}\": val})\n # gateway (in automode) must be appended following network conf !\n if not gateway:\n gateway = args.get(\"gateway\", None)\n if gateway is not None and not gateway_set:\n ret.append({\"lxc.network.ipv4.gateway\": gateway})\n # only one network gateway ;)\n gateway_set = True\n # normally, this won't happen\n # set the gateway if specified even if we did\n # not managed the network underlying\n if gateway is not None and not gateway_set:\n ret.append({\"lxc.network.ipv4.gateway\": gateway})\n # only one network gateway ;)\n gateway_set = True\n\n new = _get_veths(ret)\n # verify that we did not loose the mac settings\n for iface in [a for a in new]:\n ndata = new[iface]\n nmac = ndata.get(\"lxc.network.hwaddr\", \"\")\n ntype = ndata.get(\"lxc.network.type\", \"\")\n omac, otype = \"\", \"\"\n if iface in old:\n odata = old[iface]\n omac = odata.get(\"lxc.network.hwaddr\", \"\")\n otype = odata.get(\"lxc.network.type\", \"\")\n # default for network type is setted here\n # attention not to change the network type\n # without a good and explicit reason to.\n if otype and not ntype:\n ntype = otype\n if not ntype:\n ntype = \"veth\"\n new[iface][\"lxc.network.type\"] = ntype\n if omac and not nmac:\n new[iface][\"lxc.network.hwaddr\"] = omac\n\n ret = []\n for val in new.values():\n for row in val:\n ret.append(salt.utils.odict.OrderedDict([(row, val[row])]))\n # on old versions of lxc, still support the gateway auto mode\n # if we didn't explicitly say no to\n # (lxc.network.ipv4.gateway: auto)\n if (\n Version(version()) <= Version(\"1.0.7\")\n and True not in [\"lxc.network.ipv4.gateway\" in a for a in ret]\n and True in [\"lxc.network.ipv4\" in a for a in ret]\n ):\n ret.append({\"lxc.network.ipv4.gateway\": \"auto\"})\n return ret",
"def update_compute_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section('compute'):\n rconfig.add_section('compute')\n rconfig.set(\n 'compute', 'fixed_network_name',\n self.network.name if self.network else env.get(\"EXTERNAL_NETWORK\"))\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def config_networking(\n self, network_obj, ip, netmask, gateway, domain, dns, guest_hostname\n ):\n\n global_ip = vim.vm.customization.GlobalIPSettings()\n adapter_map = vim.vm.customization.AdapterMapping()\n adapter_map.adapter = vim.vm.customization.IPSettings()\n adapter_map.macAddress = network_obj.macAddress\n if ip:\n adapter_map.adapter.ip = vim.vm.customization.FixedIp()\n adapter_map.adapter.ip.ipAddress = ip\n else:\n adapter_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()\n adapter_map.adapter.subnetMask = netmask\n adapter_map.adapter.gateway = gateway\n global_ip.dnsServerList = dns\n adapter_map.adapter.dnsDomain = domain\n ident = vim.vm.customization.LinuxPrep()\n ident.hostName = vim.vm.customization.FixedName()\n if guest_hostname:\n ident.hostName.name = guest_hostname\n else:\n ident.hostName.name = self.vm_obj.name\n custom_spec = vim.vm.customization.Specification()\n custom_spec.nicSettingMap = [adapter_map]\n custom_spec.identity = ident\n custom_spec.globalIPSettings = global_ip\n return self.vm_obj.Customize(spec=custom_spec)",
"def _set_rules_mgmt(self, gnp_config, network, host):\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"],\n f\"{addr_pool.network}/{addr_pool.prefix}\")\n if (ip_version == 6):\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"], LINK_LOCAL)\n if (ip_version == 4):\n # add rule to allow DHCP requests (dhcp-offer have src addr == 0.0.0.0)\n # worker/storage nodes request IP dynamically\n rule = self._get_dhcp_rule(host.personality, \"UDP\", ip_version)\n gnp_config[\"spec\"][\"ingress\"].append(rule)\n\n # copy the TCP rule and do the same for IGMP\n igmp_proto = 2\n igmp_egr_rule = copy.deepcopy(gnp_config[\"spec\"][\"egress\"][0])\n igmp_egr_rule[\"protocol\"] = igmp_proto\n igmp_egr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-egr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"egress\"].append(igmp_egr_rule)\n igmp_ingr_rule = copy.deepcopy(gnp_config[\"spec\"][\"ingress\"][0])\n igmp_ingr_rule[\"protocol\"] = igmp_proto\n igmp_ingr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-ingr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"ingress\"].append(igmp_ingr_rule)",
"def configure(self, config=None):\r\n\r\n # FIXME: this is wrong, it should be a single dict per node (or not?)\r\n # List of attributes:\r\n # * can reflect a form for configuring whole stream\r\n # * can have attribute order regardless of their node ownership\r\n # List of nodes:\r\n # * bundled attributes in single dictioary\r\n # FIXME: this is inconsistent with node configuration! node.config()\r\n if config is None:\r\n config = {}\r\n configurations = {}\r\n\r\n # Collect configurations for each node\r\n\r\n for attribute in config:\r\n node_name = attribute[\"node\"]\r\n attribute_name = attribute[\"attribute\"]\r\n value = attribute.get(\"value\")\r\n\r\n if not node_name in configurations:\r\n config = {}\r\n configurations[node_name] = config\r\n else:\r\n config = configurations[node_name]\r\n\r\n config[attribute_name] = value\r\n\r\n # Configure nodes\r\n\r\n for (node_name, config) in configurations.items():\r\n node = self.coalesce_node(node_name)\r\n node.configure(config)",
"def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Update the critic network\n for target_param, param in zip(self.critic_target_network.parameters(), self.critic_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)",
"def run(self, network_update_args, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.update_network(network[\"id\"], **network_update_args)",
"def _set_rules_admin(self, gnp_config, network, host):\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"],\n f\"{addr_pool.network}/{addr_pool.prefix}\")\n if (ip_version == 6):\n self._add_source_net_filter(gnp_config[\"spec\"][\"ingress\"], LINK_LOCAL)\n if (ip_version == 4):\n # copy the TCP rule and do the same for IGMP\n igmp_proto = 2\n igmp_egr_rule = copy.deepcopy(gnp_config[\"spec\"][\"egress\"][0])\n igmp_egr_rule[\"protocol\"] = igmp_proto\n igmp_egr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-egr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"egress\"].append(igmp_egr_rule)\n igmp_ingr_rule = copy.deepcopy(gnp_config[\"spec\"][\"ingress\"][0])\n igmp_ingr_rule[\"protocol\"] = igmp_proto\n igmp_ingr_rule[\"metadata\"][\"annotations\"][\"name\"] = \\\n f\"stx-ingr-{host.personality}-{network.type}-igmp{ip_version}\"\n gnp_config[\"spec\"][\"ingress\"].append(igmp_ingr_rule)",
"def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()",
"def adapt_to_config(self, neb_config: config.NEBConfig):\n if neb_config.optim_config.eval_config is not None:\n self.model.adapt_to_config(neb_config.optim_config.eval_config)\n self.spring_constant = neb_config.spring_constant\n self.weight_decay = neb_config.weight_decay",
"def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")",
"def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")"
] | [
"0.66229737",
"0.64015234",
"0.6399242",
"0.63743067",
"0.62465507",
"0.6235132",
"0.6220781",
"0.60972697",
"0.6086234",
"0.60562795",
"0.6052708",
"0.60332054",
"0.59511423",
"0.5916945",
"0.5869258",
"0.5860552",
"0.5835882",
"0.58355385",
"0.58274704",
"0.5805548",
"0.58027786",
"0.57975477",
"0.57725716",
"0.5771135",
"0.5735175",
"0.57252145",
"0.56963164",
"0.56858915",
"0.5680376",
"0.5680376"
] | 0.65709144 | 1 |
Load configuration text from a file | def load_from_file(self, file_path):
with open(file_path) as f:
config_text = f.read()
self.load_from_string(config_text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(file):\n _config.load(file)",
"def load( self ):\n ini = codecs.open(self.filename,\"r\",\"utf-8\",errors=\"replace\",buffering=0)\n for l in ini:\n l = l.strip()\n if l:\n (name,value) = l.split(\"=\",1)\n self.conf[name.strip()] = value.strip()\n ini.close()",
"def load(self, file, config={}):\n if not os.path.exists(file):\n raise SystemExit('ERROR: config file at \"{f}\" does not exist'.format(f=file))\n config = config.copy()\n cp = ConfigParser.ConfigParser()\n cp.read(file)\n for sec in cp.sections():\n name = sec.lower()\n for opt in cp.options(sec):\n config[name + \".\" + opt.lower()] = cp.get(sec, opt).strip()\n return config",
"def read_config(self, config_filename):",
"def read_file(self, filename):\n # The config file is Python code -- makes life easy.\n config_vars = {}\n try:\n execfile(filename, config_vars)\n except IOError, exc:\n if exc.filename is None: # arg! execfile() loses filename\n exc.filename = filename\n raise exc\n self.set_from_dict(config_vars)",
"def load(self, file, config={}):\n if not os.path.exists(file):\n err = 'ERROR: config file at \"{f}\" does not exist'\n err = err.format(f=file)\n raise SettingsError(err)\n config = config.copy()\n cp = GoulashConfigParser()\n cp.read(file)\n return cp._sections",
"def load(filename):\n conf = CommonConfig.get()\n conf.update(toml.load(filename))\n return conf",
"def loadConfig():\n lines = []\n config = {}\n here = path.dirname(__file__)\n fn = path.join(here,'manatee.conf')\n try:\n with codecs.open(fn,'rU','utf-8') as conf:\n lines = conf.readlines()\n conf.close()\n except IOError as e:\n print \" Could not open configuration file: %s\" % e\n\n for line in lines:\n try:\n line = line.strip()\n if line:\n values = [x.strip() for x in line.split('=')]\n config[values[0]] = values[1]\n except Exception as e:\n print \"There was an error in the configuration file: %s\" % e\n # TODO: Any strings from the config file that might be displayed or passed into the SQL server need to be validated here.\n# config = validateConfig(config)\n return config",
"def from_file(file_path: str) -> \"Configuration\":\n\n with open(file_path, encoding=\"utf-8\") as config_file:\n return Configuration(cast(Dict[str, Any], toml.load(config_file)))",
"def load_config(self, config_file):\n self.config = ConfigParser.ConfigParser()\n self.config.read(config_file)",
"def load(self, file):\n self.namespace['workflow'].configfile(file)\n self.updateNamespace()",
"def load_config(self, filename):\n # read entire file for metadata\n fh = open(filename, 'r')\n self.file_contents = fh.read()\n\n # replace !include directives with content\n config_dir = os.path.split(filename)[0]\n include_re = re.compile('^!include\\s+(.*)$', re.MULTILINE)\n def include_repl(matchobj):\n fname = os.path.join(config_dir, matchobj.group(1))\n with open(fname) as f:\n return f.read()\n while re.search(include_re, self.file_contents): # for recursive !include\n self.file_contents = re.sub(include_re, include_repl, self.file_contents)\n\n # read in dictionary\n self.config = self.__ordered_load(self.file_contents)\n\n # convert functions of other params to true expressions\n for k in self.config.keys():\n self.config[k] = ExperimentConfig.__convert_key(self.config[k])\n\n # load core configuration\n return self.config",
"def load_config(filename):\n AS[\"config\"] = load_yaml_file(filename)",
"def loadConf(self):\n\n with open(self.configFile) as f:\n self.config = json.load(f)",
"def load_config(configfile=\"../data/test.cfg\"):\n\n config = configparser.ConfigParser()\n config.read([configfile])\n return config",
"def _get_config_from_file(self, filename):\n\n with open(filename, 'r') as f:\n config = load(f)\n return config",
"def load(self):\n config_dict = {}\n with open(\n os.path.join(\n os.path.dirname(\n os.path.abspath(\n inspect.stack()[0][1]\n )\n ),\n \"config.txt\"), 'r') as config_file:\n for line in config_file:\n if not line.startswith('#'):\n line = line.strip().split('=', 1)\n if len(line) == 2:\n config_dict[line[0]] = line[1]\n return config_dict",
"def _cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n cfg = yaml.load(f)\n return cfg",
"def _load_config(file):\n try:\n return bb.parse.handle(os.path.join('conf', file), bb.data.init() )\n except IOError, e:\n return None",
"def load_conf(self, filename):\n\n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename) as file:\n self.conf = json.loads(file.read())",
"def load_config(self, filename, fileconfout=None):\n self._filename = filename\n self._init_config = open(filename).read().splitlines()\n metaconfig = [l for l in self._init_config\n if not (l.startswith(\"#\") or l.startswith(\"\\t\") or l.startswith(\" \")) and len(l)>0]\n\n for k in metaconfig:\n key, *value = k.split()\n if len(value)==1:\n self.set_value(key, value[0], None)\n \n elif len(value)>1:\n if value[1]==\"#\":\n self.set_value(key, value[0], \" \".join(value[2:]))\n else:\n raise IOError(\"Cannot parse the line %s\"%k)\n else:\n raise IOError(\"cannot parse the line %s\"%k)\n if fileconfout is not None:\n self.set_value(\"PARA_OUT\", fileconfout)",
"def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)",
"def read_config_file(filename):\n\n # Read the config file\n toml_data = open(filename).read()\n\n # Load the definitions in the config file\n data = toml.loads(toml_data)\n\n return data",
"def load_config_file(filename):\n test_data_path = get_config_file_path(filename)\n with open(test_data_path) as f:\n test_data = f.read()\n return test_data",
"def load_config(f):\n config = ConfigParser.RawConfigParser()\n config.readfp(f)\n # Mininum required data. Raises exception if non-existent.\n config.get('memrise', 'username')\n config.get('beeminder', 'username')\n config.get('beeminder', 'auth_token')\n config.get('beeminder', 'goal_slug')\n return config",
"def load_from_file(self, path):\n schema = self.schema\n \n # Set up the default values.\n if schema is not None:\n for sect, sect_obj in schema.items():\n for opt, val in sect_obj.items():\n # This call is to convert the value to\n # the type specified. We do this to\n # prevent the programmer from specifying\n # inconsistent type with the value in the \n # schema.\n self.set(*_convert(schema, sect, opt, val[1]))\n\n # Parse the INI file.\n parser = RawConfigParser()\n parser.read(path)\n \n sections = parser.sections()\n for section in sections:\n \n # If application has supplied a schema,\n # and it does not has such a section, we skip\n # it. No error raised.\n if schema is not None and \\\n not schema.has_key(section):\n continue\n\n options = parser.options(section)\n \n for option in options:\n \n # If application has supplied a schema,\n # we know the section is valid since it pass the\n # previus test, but if the option is not included\n # in the section, we skip it. No error raised.\n if schema is not None and \\\n (option not in schema[section]):\n continue \n \n # If there is a schema, then we convert the \n # option to its type stated in the schema,\n # otherwise we just leave it as string.\n if schema is not None:\n self.set(*_convert(schema, section, option,\n parser.get(section, option)))\n else:\n self.set(section, option,\n parser.get(section, option))",
"def load(filepath):\n with open(filepath) as f:\n return Config(json.load(f))",
"def load_configuration(config_file):\n filename = config_file\n config = configparser.ConfigParser()\n config.read(filename)\n\n return config",
"def load_file(path):\n ext = None\n if '.' in path:\n ext = path.split('.')[-1]\n with open(path) as f:\n contents = f.read()\n if ext in ('yaml', 'yml'):\n return yaml.safe_load(contents)\n elif ext == 'json':\n return json.loads(contents)\n elif ext == 'conf':\n result = dict()\n # Here we just extract key-value pairs while ignoring comments.\n for line in contents.strip().split('\\n'):\n if line.strip().startswith('#'):\n continue \n try:\n key, value = line.split('=', 1)\n except ValueError:\n # Ignore lines that are not formatted correctly as\n # some downstream kernels may have weird lines and\n # the needed fields are probably formatted correctly.\n pass \n # Make sure to catch inline comments, too\n if '#' in value:\n value = value.split('#')[0].strip()\n result[key] = value\n return result\n elif ext is None:\n raise NotImplementedError",
"def parse(self):\n try:\n with open(self.path, 'r') as ymlfile:\n self.__cfg = yaml.load(ymlfile)\n except IOError:\n self.log(\"File {0} not found -- aborting\".format(self.path))\n raise ConfigFileException"
] | [
"0.75854945",
"0.75312644",
"0.7097647",
"0.70612305",
"0.6969976",
"0.6944861",
"0.6935008",
"0.6870552",
"0.68562406",
"0.68411684",
"0.68382555",
"0.683738",
"0.6824307",
"0.67287827",
"0.6713675",
"0.67064744",
"0.6700246",
"0.66864276",
"0.66733307",
"0.66413516",
"0.6617898",
"0.660999",
"0.65697753",
"0.6567984",
"0.65661037",
"0.655593",
"0.6526951",
"0.6510983",
"0.65081084",
"0.6477953"
] | 0.8322267 | 0 |
Add ACL sequence numbers for use on configurations with a style of 'ios' | def _add_acl_sequence_numbers(self):
ipv4_acl_sw = 'ip access-list'
# ipv6_acl_sw = ('ipv6 access-list')
if self.host.os in ['ios']:
acl_line_sw = ('permit', 'deny')
else:
acl_line_sw = ('permit', 'deny', 'remark')
for child in self.children:
if child.text.startswith(ipv4_acl_sw):
sn = 10
for sub_child in child.children:
if sub_child.text.startswith(acl_line_sw):
sub_child.text = "{} {}".format(sn, sub_child.text)
sn += 10
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _GenerateACLLine(self, app_id, term, unit, precedence=None):\n target = []\n target.append('application')\n target.append(str(app_id))\n target.append(term.term.name)\n target.append('protocol')\n if term.term.protocol:\n target.append('/'.join(sorted(term.term.protocol)))\n else:\n target.append(self._DEFAULT_PROTOCOL)\n target.append('src-ip %s src-port %s' % (unit[0], unit[1]))\n target.append('dst-ip %s dst-port %s' % (unit[2], unit[3]))\n if precedence:\n target.append('dscp %s\\n\\n' %\n self.precedence_value_map[precedence])\n else:\n target.append('dscp any\\n\\n')\n return ' '.join(target)",
"def at_config_ids(seq, value):\n at(\"CONFIG_IDS\", seq, value)",
"def _get_id_ac_string(accession: str, gene: str, sequence_len: int) -> str:\n id_str = \"ID {GENE:<24}{REVIEW:<18}{AA_COUNT} AA.\\n\".format(\n GENE=gene,\n REVIEW=\"Unreviewed;\",\n AA_COUNT=sequence_len\n )\n acc_str = \"AC {};\".format(accession)\n return id_str + acc_str",
"def reformat(self, seq_name, *, prefix=\"s\"):\n\t\treturn \"%s_%012u\" % (prefix, self.get_sid(seq_name))",
"def acl_id(self) -> str:\n return pulumi.get(self, \"acl_id\")",
"def acl_id(self) -> str:\n return pulumi.get(self, \"acl_id\")",
"def acl_id(self) -> str:\n return pulumi.get(self, \"acl_id\")",
"def add_sequence_numbers(contents):\n output = ''\n lines = contents.split(os.linesep)\n\n i = 1\n for line in lines:\n if timestamp_line(line):\n output += str(i) + os.linesep\n i += 1\n output += line + os.linesep\n return output",
"def format_fasta_entry(otu_name, isolate_name, sequence_id, sequence):\n return f\">{otu_name}|{isolate_name}|{sequence_id}|{len(sequence)}\\n{sequence}\"",
"def format_acl(version=1, **kwargs):\n if version == 1:\n return format_acl_v1(\n groups=kwargs.get('groups'), referrers=kwargs.get('referrers'),\n header_name=kwargs.get('header_name'))\n elif version == 2:\n return format_acl_v2(kwargs.get('acl_dict'))\n raise ValueError(\"Invalid ACL version: %r\" % version)",
"def _rm_ipv6_acl_sequence_numbers(self):\n\n for acl in self.get_children('startswith', 'ipv6 access-list '):\n for entry in acl.children:\n if entry.text.startswith('sequence'):\n entry.text = ' '.join(entry.text.split()[2:])\n return self",
"def unique_id(self):\n return self.config_entry.entry_id + \"lsa\"",
"def acl_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"acl_id\")",
"def acl_id(self) -> Optional[str]:\n return pulumi.get(self, \"acl_id\")",
"def pad_sequence(seq):\n seq_split = seq.strip().split(\"1\")\n last = seq_split[0]\n new_seq = last + \"1\"\n inc_added = 0\n out_added = 0\n for i in range(1, len(seq_split)-1):\n current = seq_split[i]\n\n # break up the intial sequences that leak information by adding padding\n if current == last:\n if last == \"-\":\n new_seq += \"+1\"\n inc_added += 1\n last = \"+\"\n else:\n new_seq += \"-1\"\n out_added += 1\n last = \"-\"\n else:\n new_seq += current + \"1\"\n last = current\n\n # 30% chance to inject randomness\n coin = random.randint(1, 101)\n if coin <= 30:\n if coin % 2 == 0:\n new_seq += \"+1\"\n else:\n new_seq += \"-1\"\n \n # return padded sequence, original number of cells, \n # number of incoming padding cells, and number of outgoing padding cells\n return new_seq, len(seq_split), inc_added, out_added",
"def config_mgmt_acl(zdcli, **kwargs):\n option = {}\n if kwargs: option.update(kwargs)\n \n logging.info(\"Create acl %s\" % option)\n cmd_block = _define_mgmt_ip_acl_cmd_block(option)\n zdcli.do_cfg(cmd_block)",
"def test_acl_configuration(self, env):\n # Create ACL Expression\n self.suite_logger.debug(\"Create and Verify ACL Expression\")\n expressions = [(1, 'DstMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:01:01:01'), ]\n env.switch[1].ui.create_acl(expressions=expressions)\n # Verify ACL Expression\n expression = env.switch[1].ui.get_table_acl(\"ACLExpressions\")[0]\n assert expression['data'] == '00:00:00:01:01:01'\n assert expression['mask'] == 'FF:FF:FF:FF:FF:FF'\n assert expression['expressionId'] == 1\n assert expression['field'] == 'DstMac'\n\n # Create ACL Actions\n self.suite_logger.debug(\"Create and Verify ACL Action\")\n actions = [(1, 'Drop', ''), ]\n env.switch[1].ui.create_acl(actions=actions)\n # Verify ACL Action\n action = env.switch[1].ui.get_table_acl(\"ACLActions\")[0]\n assert action['action'] == 'Drop'\n assert action['param'] == ''\n assert action['actionId'] == 1\n\n # Create ACL Rule\n self.suite_logger.debug(\"Create and Verify ACL Rule\")\n rules = [(1, 1, 1, 'Ingress', 'Enabled', 0), ]\n # Note: ACL Rule should be assigned to ports\n env.switch[1].ui.create_acl(ports=[1, ], rules=rules)\n # Verify ACL Rule\n rule = env.switch[1].ui.get_table_acl(\"ACLRules\")[0]\n assert rule['ruleId'] == 1\n assert rule['expressionId'] == 1\n assert rule['actionId'] == 1\n assert rule['stage'] == 'Ingress'\n assert rule['enabled'] == 'Enabled'\n assert rule['priority'] == 0",
"def format_aws_account_id(value):\n return str(value).zfill(12)",
"def generate_aa_sequence(chain):\n\n chain.strip()\n chain_list = chain.split(' ')\n # TODO: What if aa is not in the lookup\n seq = [IUPAC_AA_codes[aa] for aa in chain_list]\n return ''.join(seq)",
"def Ab_seq(RNs):\n seq = []\n for res in range(cf.nkey):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq",
"def pad_to_max_length(self, sequence):\n sequence = sequence[:self.max_seq_length]\n n = len(sequence)\n #return sequence + ['[PAD]'] * (self.max_seq_length - n)\n return sequence + [0] *(self.max_seq_length - n)",
"def acl_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"acl_id\")",
"def increase_seq(sequence_number):\n\n sequence_number += 1\n if sequence_number > 0xFFFF:\n sequence_number = 1\n\n return sequence_number",
"def format_acl_v2(acl_dict):\n return json.dumps(acl_dict, ensure_ascii=True, separators=(',', ':'),\n sort_keys=True)",
"def configure_aaa_session_id(device, type):\n try:\n device.configure([\n f\"aaa session-id {type}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure AAA session ID'\n )",
"def Ag_seq(RNs):\n seq = []\n for res in range(cf.lAg):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq",
"def _format_id(self, index):\n if index in self.VALID_ID_RANGE:\n return '!ID {}\\n'.format(str(index).zfill(self.MAX_DIGITS_QTD))\n raise IndexError(\"IDFile._format_id: {} is out of range\".format(index))",
"def rule_id(self, action):\n ext_id = \"{0}_{1}_acl_{2}\".format(\n self.model.short_name.replace('.', '_'),\n self.group.name.replace('.', '_'),\n action,\n )\n return ext_id[-64:]",
"def fix_indents(self):\n indent_map = list(map(self._get_indent, self.config_lines_str))\n fixed_indent_map = []\n for i in range(len(indent_map)):\n if i == 0:\n ### Assume the first line is not indented\n fixed_indent_map.append(0)\n continue\n if indent_map[i] == 0:\n fixed_indent_map.append(0)\n continue\n # If indent is same preceding line, copy its indent\n if indent_map[i] == indent_map[i-1]:\n fixed_indent_map.append(fixed_indent_map[-1])\n # If indent is higher that preceding line, increase by one\n elif indent_map[i] > indent_map[i-1]:\n fixed_indent_map.append(fixed_indent_map[-1]+1)\n # If indent is lower that preceding l\n elif indent_map[i] < indent_map[i-1]:\n fixed_indent_map.append(fixed_indent_map[-1]-1)\n for i, val in enumerate(fixed_indent_map):\n self.config_lines_str[i] = \" \"*val + self.config_lines_str[i].strip()\n #print(val, \"'{}'\".format(self.config_lines_str[i]))",
"def getAcNum(self):\n\n # stores the integer account number as a formatted 3-digit string (in which 0's occupy unused digits)\n strAcNum = str(\"{self.acNum:03d}\".format(self=self))\n return strAcNum"
] | [
"0.568859",
"0.5519338",
"0.5210421",
"0.5170846",
"0.51472306",
"0.51472306",
"0.51472306",
"0.504872",
"0.49429473",
"0.49280027",
"0.49235392",
"0.48615628",
"0.48489556",
"0.47969285",
"0.47635686",
"0.47626066",
"0.4755888",
"0.4732398",
"0.47136363",
"0.47089124",
"0.47014162",
"0.46952215",
"0.46812704",
"0.46547294",
"0.46427009",
"0.4624955",
"0.46149802",
"0.45986557",
"0.45610553",
"0.45574772"
] | 0.75554097 | 0 |
Subsets and Splits